5c199610719fe96ed8c2b8d62e8c5d29a50e1404
[deliverable/linux.git] / arch / arm / kernel / ptrace.c
1 /*
2 * linux/arch/arm/kernel/ptrace.c
3 *
4 * By Ross Biro 1/23/92
5 * edited by Linus Torvalds
6 * ARM modifications Copyright (C) 2000 Russell King
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12 #include <linux/kernel.h>
13 #include <linux/sched.h>
14 #include <linux/mm.h>
15 #include <linux/smp.h>
16 #include <linux/ptrace.h>
17 #include <linux/user.h>
18 #include <linux/security.h>
19 #include <linux/init.h>
20 #include <linux/signal.h>
21 #include <linux/uaccess.h>
22 #include <linux/perf_event.h>
23 #include <linux/hw_breakpoint.h>
24 #include <linux/regset.h>
25
26 #include <asm/pgtable.h>
27 #include <asm/system.h>
28 #include <asm/traps.h>
29
30 #define REG_PC 15
31 #define REG_PSR 16
32 /*
33 * does not yet catch signals sent when the child dies.
34 * in exit.c or in signal.c.
35 */
36
37 #if 0
38 /*
39 * Breakpoint SWI instruction: SWI &9F0001
40 */
41 #define BREAKINST_ARM 0xef9f0001
42 #define BREAKINST_THUMB 0xdf00 /* fill this in later */
43 #else
44 /*
45 * New breakpoints - use an undefined instruction. The ARM architecture
46 * reference manual guarantees that the following instruction space
47 * will produce an undefined instruction exception on all CPUs:
48 *
49 * ARM: xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
50 * Thumb: 1101 1110 xxxx xxxx
51 */
52 #define BREAKINST_ARM 0xe7f001f0
53 #define BREAKINST_THUMB 0xde01
54 #endif
55
56 struct pt_regs_offset {
57 const char *name;
58 int offset;
59 };
60
61 #define REG_OFFSET_NAME(r) \
62 {.name = #r, .offset = offsetof(struct pt_regs, ARM_##r)}
63 #define REG_OFFSET_END {.name = NULL, .offset = 0}
64
65 static const struct pt_regs_offset regoffset_table[] = {
66 REG_OFFSET_NAME(r0),
67 REG_OFFSET_NAME(r1),
68 REG_OFFSET_NAME(r2),
69 REG_OFFSET_NAME(r3),
70 REG_OFFSET_NAME(r4),
71 REG_OFFSET_NAME(r5),
72 REG_OFFSET_NAME(r6),
73 REG_OFFSET_NAME(r7),
74 REG_OFFSET_NAME(r8),
75 REG_OFFSET_NAME(r9),
76 REG_OFFSET_NAME(r10),
77 REG_OFFSET_NAME(fp),
78 REG_OFFSET_NAME(ip),
79 REG_OFFSET_NAME(sp),
80 REG_OFFSET_NAME(lr),
81 REG_OFFSET_NAME(pc),
82 REG_OFFSET_NAME(cpsr),
83 REG_OFFSET_NAME(ORIG_r0),
84 REG_OFFSET_END,
85 };
86
87 /**
88 * regs_query_register_offset() - query register offset from its name
89 * @name: the name of a register
90 *
91 * regs_query_register_offset() returns the offset of a register in struct
92 * pt_regs from its name. If the name is invalid, this returns -EINVAL;
93 */
94 int regs_query_register_offset(const char *name)
95 {
96 const struct pt_regs_offset *roff;
97 for (roff = regoffset_table; roff->name != NULL; roff++)
98 if (!strcmp(roff->name, name))
99 return roff->offset;
100 return -EINVAL;
101 }
102
103 /**
104 * regs_query_register_name() - query register name from its offset
105 * @offset: the offset of a register in struct pt_regs.
106 *
107 * regs_query_register_name() returns the name of a register from its
108 * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
109 */
110 const char *regs_query_register_name(unsigned int offset)
111 {
112 const struct pt_regs_offset *roff;
113 for (roff = regoffset_table; roff->name != NULL; roff++)
114 if (roff->offset == offset)
115 return roff->name;
116 return NULL;
117 }
118
119 /**
120 * regs_within_kernel_stack() - check the address in the stack
121 * @regs: pt_regs which contains kernel stack pointer.
122 * @addr: address which is checked.
123 *
124 * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
125 * If @addr is within the kernel stack, it returns true. If not, returns false.
126 */
127 bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
128 {
129 return ((addr & ~(THREAD_SIZE - 1)) ==
130 (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)));
131 }
132
133 /**
134 * regs_get_kernel_stack_nth() - get Nth entry of the stack
135 * @regs: pt_regs which contains kernel stack pointer.
136 * @n: stack entry number.
137 *
138 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
139 * is specified by @regs. If the @n th entry is NOT in the kernel stack,
140 * this returns 0.
141 */
142 unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
143 {
144 unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
145 addr += n;
146 if (regs_within_kernel_stack(regs, (unsigned long)addr))
147 return *addr;
148 else
149 return 0;
150 }
151
152 /*
153 * this routine will get a word off of the processes privileged stack.
154 * the offset is how far from the base addr as stored in the THREAD.
155 * this routine assumes that all the privileged stacks are in our
156 * data space.
157 */
158 static inline long get_user_reg(struct task_struct *task, int offset)
159 {
160 return task_pt_regs(task)->uregs[offset];
161 }
162
163 /*
164 * this routine will put a word on the processes privileged stack.
165 * the offset is how far from the base addr as stored in the THREAD.
166 * this routine assumes that all the privileged stacks are in our
167 * data space.
168 */
169 static inline int
170 put_user_reg(struct task_struct *task, int offset, long data)
171 {
172 struct pt_regs newregs, *regs = task_pt_regs(task);
173 int ret = -EINVAL;
174
175 newregs = *regs;
176 newregs.uregs[offset] = data;
177
178 if (valid_user_regs(&newregs)) {
179 regs->uregs[offset] = data;
180 ret = 0;
181 }
182
183 return ret;
184 }
185
186 /*
187 * Called by kernel/ptrace.c when detaching..
188 */
189 void ptrace_disable(struct task_struct *child)
190 {
191 /* Nothing to do. */
192 }
193
194 /*
195 * Handle hitting a breakpoint.
196 */
197 void ptrace_break(struct task_struct *tsk, struct pt_regs *regs)
198 {
199 siginfo_t info;
200
201 info.si_signo = SIGTRAP;
202 info.si_errno = 0;
203 info.si_code = TRAP_BRKPT;
204 info.si_addr = (void __user *)instruction_pointer(regs);
205
206 force_sig_info(SIGTRAP, &info, tsk);
207 }
208
209 static int break_trap(struct pt_regs *regs, unsigned int instr)
210 {
211 ptrace_break(current, regs);
212 return 0;
213 }
214
215 static struct undef_hook arm_break_hook = {
216 .instr_mask = 0x0fffffff,
217 .instr_val = 0x07f001f0,
218 .cpsr_mask = PSR_T_BIT,
219 .cpsr_val = 0,
220 .fn = break_trap,
221 };
222
223 static struct undef_hook thumb_break_hook = {
224 .instr_mask = 0xffff,
225 .instr_val = 0xde01,
226 .cpsr_mask = PSR_T_BIT,
227 .cpsr_val = PSR_T_BIT,
228 .fn = break_trap,
229 };
230
231 static int thumb2_break_trap(struct pt_regs *regs, unsigned int instr)
232 {
233 unsigned int instr2;
234 void __user *pc;
235
236 /* Check the second half of the instruction. */
237 pc = (void __user *)(instruction_pointer(regs) + 2);
238
239 if (processor_mode(regs) == SVC_MODE) {
240 instr2 = *(u16 *) pc;
241 } else {
242 get_user(instr2, (u16 __user *)pc);
243 }
244
245 if (instr2 == 0xa000) {
246 ptrace_break(current, regs);
247 return 0;
248 } else {
249 return 1;
250 }
251 }
252
253 static struct undef_hook thumb2_break_hook = {
254 .instr_mask = 0xffff,
255 .instr_val = 0xf7f0,
256 .cpsr_mask = PSR_T_BIT,
257 .cpsr_val = PSR_T_BIT,
258 .fn = thumb2_break_trap,
259 };
260
261 static int __init ptrace_break_init(void)
262 {
263 register_undef_hook(&arm_break_hook);
264 register_undef_hook(&thumb_break_hook);
265 register_undef_hook(&thumb2_break_hook);
266 return 0;
267 }
268
269 core_initcall(ptrace_break_init);
270
271 /*
272 * Read the word at offset "off" into the "struct user". We
273 * actually access the pt_regs stored on the kernel stack.
274 */
275 static int ptrace_read_user(struct task_struct *tsk, unsigned long off,
276 unsigned long __user *ret)
277 {
278 unsigned long tmp;
279
280 if (off & 3 || off >= sizeof(struct user))
281 return -EIO;
282
283 tmp = 0;
284 if (off == PT_TEXT_ADDR)
285 tmp = tsk->mm->start_code;
286 else if (off == PT_DATA_ADDR)
287 tmp = tsk->mm->start_data;
288 else if (off == PT_TEXT_END_ADDR)
289 tmp = tsk->mm->end_code;
290 else if (off < sizeof(struct pt_regs))
291 tmp = get_user_reg(tsk, off >> 2);
292
293 return put_user(tmp, ret);
294 }
295
296 /*
297 * Write the word at offset "off" into "struct user". We
298 * actually access the pt_regs stored on the kernel stack.
299 */
300 static int ptrace_write_user(struct task_struct *tsk, unsigned long off,
301 unsigned long val)
302 {
303 if (off & 3 || off >= sizeof(struct user))
304 return -EIO;
305
306 if (off >= sizeof(struct pt_regs))
307 return 0;
308
309 return put_user_reg(tsk, off >> 2, val);
310 }
311
312 #ifdef CONFIG_IWMMXT
313
314 /*
315 * Get the child iWMMXt state.
316 */
317 static int ptrace_getwmmxregs(struct task_struct *tsk, void __user *ufp)
318 {
319 struct thread_info *thread = task_thread_info(tsk);
320
321 if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT))
322 return -ENODATA;
323 iwmmxt_task_disable(thread); /* force it to ram */
324 return copy_to_user(ufp, &thread->fpstate.iwmmxt, IWMMXT_SIZE)
325 ? -EFAULT : 0;
326 }
327
328 /*
329 * Set the child iWMMXt state.
330 */
331 static int ptrace_setwmmxregs(struct task_struct *tsk, void __user *ufp)
332 {
333 struct thread_info *thread = task_thread_info(tsk);
334
335 if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT))
336 return -EACCES;
337 iwmmxt_task_release(thread); /* force a reload */
338 return copy_from_user(&thread->fpstate.iwmmxt, ufp, IWMMXT_SIZE)
339 ? -EFAULT : 0;
340 }
341
342 #endif
343
344 #ifdef CONFIG_CRUNCH
345 /*
346 * Get the child Crunch state.
347 */
348 static int ptrace_getcrunchregs(struct task_struct *tsk, void __user *ufp)
349 {
350 struct thread_info *thread = task_thread_info(tsk);
351
352 crunch_task_disable(thread); /* force it to ram */
353 return copy_to_user(ufp, &thread->crunchstate, CRUNCH_SIZE)
354 ? -EFAULT : 0;
355 }
356
357 /*
358 * Set the child Crunch state.
359 */
360 static int ptrace_setcrunchregs(struct task_struct *tsk, void __user *ufp)
361 {
362 struct thread_info *thread = task_thread_info(tsk);
363
364 crunch_task_release(thread); /* force a reload */
365 return copy_from_user(&thread->crunchstate, ufp, CRUNCH_SIZE)
366 ? -EFAULT : 0;
367 }
368 #endif
369
370 #ifdef CONFIG_HAVE_HW_BREAKPOINT
371 /*
372 * Convert a virtual register number into an index for a thread_info
373 * breakpoint array. Breakpoints are identified using positive numbers
374 * whilst watchpoints are negative. The registers are laid out as pairs
375 * of (address, control), each pair mapping to a unique hw_breakpoint struct.
376 * Register 0 is reserved for describing resource information.
377 */
378 static int ptrace_hbp_num_to_idx(long num)
379 {
380 if (num < 0)
381 num = (ARM_MAX_BRP << 1) - num;
382 return (num - 1) >> 1;
383 }
384
385 /*
386 * Returns the virtual register number for the address of the
387 * breakpoint at index idx.
388 */
389 static long ptrace_hbp_idx_to_num(int idx)
390 {
391 long mid = ARM_MAX_BRP << 1;
392 long num = (idx << 1) + 1;
393 return num > mid ? mid - num : num;
394 }
395
396 /*
397 * Handle hitting a HW-breakpoint.
398 */
399 static void ptrace_hbptriggered(struct perf_event *bp,
400 struct perf_sample_data *data,
401 struct pt_regs *regs)
402 {
403 struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
404 long num;
405 int i;
406 siginfo_t info;
407
408 for (i = 0; i < ARM_MAX_HBP_SLOTS; ++i)
409 if (current->thread.debug.hbp[i] == bp)
410 break;
411
412 num = (i == ARM_MAX_HBP_SLOTS) ? 0 : ptrace_hbp_idx_to_num(i);
413
414 info.si_signo = SIGTRAP;
415 info.si_errno = (int)num;
416 info.si_code = TRAP_HWBKPT;
417 info.si_addr = (void __user *)(bkpt->trigger);
418
419 force_sig_info(SIGTRAP, &info, current);
420 }
421
422 /*
423 * Set ptrace breakpoint pointers to zero for this task.
424 * This is required in order to prevent child processes from unregistering
425 * breakpoints held by their parent.
426 */
427 void clear_ptrace_hw_breakpoint(struct task_struct *tsk)
428 {
429 memset(tsk->thread.debug.hbp, 0, sizeof(tsk->thread.debug.hbp));
430 }
431
432 /*
433 * Unregister breakpoints from this task and reset the pointers in
434 * the thread_struct.
435 */
436 void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
437 {
438 int i;
439 struct thread_struct *t = &tsk->thread;
440
441 for (i = 0; i < ARM_MAX_HBP_SLOTS; i++) {
442 if (t->debug.hbp[i]) {
443 unregister_hw_breakpoint(t->debug.hbp[i]);
444 t->debug.hbp[i] = NULL;
445 }
446 }
447 }
448
449 static u32 ptrace_get_hbp_resource_info(void)
450 {
451 u8 num_brps, num_wrps, debug_arch, wp_len;
452 u32 reg = 0;
453
454 num_brps = hw_breakpoint_slots(TYPE_INST);
455 num_wrps = hw_breakpoint_slots(TYPE_DATA);
456 debug_arch = arch_get_debug_arch();
457 wp_len = arch_get_max_wp_len();
458
459 reg |= debug_arch;
460 reg <<= 8;
461 reg |= wp_len;
462 reg <<= 8;
463 reg |= num_wrps;
464 reg <<= 8;
465 reg |= num_brps;
466
467 return reg;
468 }
469
470 static struct perf_event *ptrace_hbp_create(struct task_struct *tsk, int type)
471 {
472 struct perf_event_attr attr;
473
474 ptrace_breakpoint_init(&attr);
475
476 /* Initialise fields to sane defaults. */
477 attr.bp_addr = 0;
478 attr.bp_len = HW_BREAKPOINT_LEN_4;
479 attr.bp_type = type;
480 attr.disabled = 1;
481
482 return register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL,
483 tsk);
484 }
485
486 static int ptrace_gethbpregs(struct task_struct *tsk, long num,
487 unsigned long __user *data)
488 {
489 u32 reg;
490 int idx, ret = 0;
491 struct perf_event *bp;
492 struct arch_hw_breakpoint_ctrl arch_ctrl;
493
494 if (num == 0) {
495 reg = ptrace_get_hbp_resource_info();
496 } else {
497 idx = ptrace_hbp_num_to_idx(num);
498 if (idx < 0 || idx >= ARM_MAX_HBP_SLOTS) {
499 ret = -EINVAL;
500 goto out;
501 }
502
503 bp = tsk->thread.debug.hbp[idx];
504 if (!bp) {
505 reg = 0;
506 goto put;
507 }
508
509 arch_ctrl = counter_arch_bp(bp)->ctrl;
510
511 /*
512 * Fix up the len because we may have adjusted it
513 * to compensate for an unaligned address.
514 */
515 while (!(arch_ctrl.len & 0x1))
516 arch_ctrl.len >>= 1;
517
518 if (num & 0x1)
519 reg = bp->attr.bp_addr;
520 else
521 reg = encode_ctrl_reg(arch_ctrl);
522 }
523
524 put:
525 if (put_user(reg, data))
526 ret = -EFAULT;
527
528 out:
529 return ret;
530 }
531
532 static int ptrace_sethbpregs(struct task_struct *tsk, long num,
533 unsigned long __user *data)
534 {
535 int idx, gen_len, gen_type, implied_type, ret = 0;
536 u32 user_val;
537 struct perf_event *bp;
538 struct arch_hw_breakpoint_ctrl ctrl;
539 struct perf_event_attr attr;
540
541 if (num == 0)
542 goto out;
543 else if (num < 0)
544 implied_type = HW_BREAKPOINT_RW;
545 else
546 implied_type = HW_BREAKPOINT_X;
547
548 idx = ptrace_hbp_num_to_idx(num);
549 if (idx < 0 || idx >= ARM_MAX_HBP_SLOTS) {
550 ret = -EINVAL;
551 goto out;
552 }
553
554 if (get_user(user_val, data)) {
555 ret = -EFAULT;
556 goto out;
557 }
558
559 bp = tsk->thread.debug.hbp[idx];
560 if (!bp) {
561 bp = ptrace_hbp_create(tsk, implied_type);
562 if (IS_ERR(bp)) {
563 ret = PTR_ERR(bp);
564 goto out;
565 }
566 tsk->thread.debug.hbp[idx] = bp;
567 }
568
569 attr = bp->attr;
570
571 if (num & 0x1) {
572 /* Address */
573 attr.bp_addr = user_val;
574 } else {
575 /* Control */
576 decode_ctrl_reg(user_val, &ctrl);
577 ret = arch_bp_generic_fields(ctrl, &gen_len, &gen_type);
578 if (ret)
579 goto out;
580
581 if ((gen_type & implied_type) != gen_type) {
582 ret = -EINVAL;
583 goto out;
584 }
585
586 attr.bp_len = gen_len;
587 attr.bp_type = gen_type;
588 attr.disabled = !ctrl.enabled;
589 }
590
591 ret = modify_user_hw_breakpoint(bp, &attr);
592 out:
593 return ret;
594 }
595 #endif
596
597 /* regset get/set implementations */
598
599 static int gpr_get(struct task_struct *target,
600 const struct user_regset *regset,
601 unsigned int pos, unsigned int count,
602 void *kbuf, void __user *ubuf)
603 {
604 struct pt_regs *regs = task_pt_regs(target);
605
606 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
607 regs,
608 0, sizeof(*regs));
609 }
610
611 static int gpr_set(struct task_struct *target,
612 const struct user_regset *regset,
613 unsigned int pos, unsigned int count,
614 const void *kbuf, const void __user *ubuf)
615 {
616 int ret;
617 struct pt_regs newregs;
618
619 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
620 &newregs,
621 0, sizeof(newregs));
622 if (ret)
623 return ret;
624
625 if (!valid_user_regs(&newregs))
626 return -EINVAL;
627
628 *task_pt_regs(target) = newregs;
629 return 0;
630 }
631
632 static int fpa_get(struct task_struct *target,
633 const struct user_regset *regset,
634 unsigned int pos, unsigned int count,
635 void *kbuf, void __user *ubuf)
636 {
637 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
638 &task_thread_info(target)->fpstate,
639 0, sizeof(struct user_fp));
640 }
641
642 static int fpa_set(struct task_struct *target,
643 const struct user_regset *regset,
644 unsigned int pos, unsigned int count,
645 const void *kbuf, const void __user *ubuf)
646 {
647 struct thread_info *thread = task_thread_info(target);
648
649 thread->used_cp[1] = thread->used_cp[2] = 1;
650
651 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
652 &thread->fpstate,
653 0, sizeof(struct user_fp));
654 }
655
656 #ifdef CONFIG_VFP
657 /*
658 * VFP register get/set implementations.
659 *
660 * With respect to the kernel, struct user_fp is divided into three chunks:
661 * 16 or 32 real VFP registers (d0-d15 or d0-31)
662 * These are transferred to/from the real registers in the task's
663 * vfp_hard_struct. The number of registers depends on the kernel
664 * configuration.
665 *
666 * 16 or 0 fake VFP registers (d16-d31 or empty)
667 * i.e., the user_vfp structure has space for 32 registers even if
668 * the kernel doesn't have them all.
669 *
670 * vfp_get() reads this chunk as zero where applicable
671 * vfp_set() ignores this chunk
672 *
673 * 1 word for the FPSCR
674 *
675 * The bounds-checking logic built into user_regset_copyout and friends
676 * means that we can make a simple sequence of calls to map the relevant data
677 * to/from the specified slice of the user regset structure.
678 */
679 static int vfp_get(struct task_struct *target,
680 const struct user_regset *regset,
681 unsigned int pos, unsigned int count,
682 void *kbuf, void __user *ubuf)
683 {
684 int ret;
685 struct thread_info *thread = task_thread_info(target);
686 struct vfp_hard_struct const *vfp = &thread->vfpstate.hard;
687 const size_t user_fpregs_offset = offsetof(struct user_vfp, fpregs);
688 const size_t user_fpscr_offset = offsetof(struct user_vfp, fpscr);
689
690 vfp_sync_hwstate(thread);
691
692 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
693 &vfp->fpregs,
694 user_fpregs_offset,
695 user_fpregs_offset + sizeof(vfp->fpregs));
696 if (ret)
697 return ret;
698
699 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
700 user_fpregs_offset + sizeof(vfp->fpregs),
701 user_fpscr_offset);
702 if (ret)
703 return ret;
704
705 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
706 &vfp->fpscr,
707 user_fpscr_offset,
708 user_fpscr_offset + sizeof(vfp->fpscr));
709 }
710
711 /*
712 * For vfp_set() a read-modify-write is done on the VFP registers,
713 * in order to avoid writing back a half-modified set of registers on
714 * failure.
715 */
716 static int vfp_set(struct task_struct *target,
717 const struct user_regset *regset,
718 unsigned int pos, unsigned int count,
719 const void *kbuf, const void __user *ubuf)
720 {
721 int ret;
722 struct thread_info *thread = task_thread_info(target);
723 struct vfp_hard_struct new_vfp = thread->vfpstate.hard;
724 const size_t user_fpregs_offset = offsetof(struct user_vfp, fpregs);
725 const size_t user_fpscr_offset = offsetof(struct user_vfp, fpscr);
726
727 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
728 &new_vfp.fpregs,
729 user_fpregs_offset,
730 user_fpregs_offset + sizeof(new_vfp.fpregs));
731 if (ret)
732 return ret;
733
734 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
735 user_fpregs_offset + sizeof(new_vfp.fpregs),
736 user_fpscr_offset);
737 if (ret)
738 return ret;
739
740 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
741 &new_vfp.fpscr,
742 user_fpscr_offset,
743 user_fpscr_offset + sizeof(new_vfp.fpscr));
744 if (ret)
745 return ret;
746
747 vfp_sync_hwstate(thread);
748 thread->vfpstate.hard = new_vfp;
749 vfp_flush_hwstate(thread);
750
751 return 0;
752 }
753 #endif /* CONFIG_VFP */
754
755 enum arm_regset {
756 REGSET_GPR,
757 REGSET_FPR,
758 #ifdef CONFIG_VFP
759 REGSET_VFP,
760 #endif
761 };
762
763 static const struct user_regset arm_regsets[] = {
764 [REGSET_GPR] = {
765 .core_note_type = NT_PRSTATUS,
766 .n = ELF_NGREG,
767 .size = sizeof(u32),
768 .align = sizeof(u32),
769 .get = gpr_get,
770 .set = gpr_set
771 },
772 [REGSET_FPR] = {
773 /*
774 * For the FPA regs in fpstate, the real fields are a mixture
775 * of sizes, so pretend that the registers are word-sized:
776 */
777 .core_note_type = NT_PRFPREG,
778 .n = sizeof(struct user_fp) / sizeof(u32),
779 .size = sizeof(u32),
780 .align = sizeof(u32),
781 .get = fpa_get,
782 .set = fpa_set
783 },
784 #ifdef CONFIG_VFP
785 [REGSET_VFP] = {
786 /*
787 * Pretend that the VFP regs are word-sized, since the FPSCR is
788 * a single word dangling at the end of struct user_vfp:
789 */
790 .core_note_type = NT_ARM_VFP,
791 .n = ARM_VFPREGS_SIZE / sizeof(u32),
792 .size = sizeof(u32),
793 .align = sizeof(u32),
794 .get = vfp_get,
795 .set = vfp_set
796 },
797 #endif /* CONFIG_VFP */
798 };
799
800 static const struct user_regset_view user_arm_view = {
801 .name = "arm", .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI,
802 .regsets = arm_regsets, .n = ARRAY_SIZE(arm_regsets)
803 };
804
805 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
806 {
807 return &user_arm_view;
808 }
809
810 long arch_ptrace(struct task_struct *child, long request,
811 unsigned long addr, unsigned long data)
812 {
813 int ret;
814 unsigned long __user *datap = (unsigned long __user *) data;
815
816 switch (request) {
817 case PTRACE_PEEKUSR:
818 ret = ptrace_read_user(child, addr, datap);
819 break;
820
821 case PTRACE_POKEUSR:
822 ret = ptrace_write_user(child, addr, data);
823 break;
824
825 case PTRACE_GETREGS:
826 ret = copy_regset_to_user(child,
827 &user_arm_view, REGSET_GPR,
828 0, sizeof(struct pt_regs),
829 datap);
830 break;
831
832 case PTRACE_SETREGS:
833 ret = copy_regset_from_user(child,
834 &user_arm_view, REGSET_GPR,
835 0, sizeof(struct pt_regs),
836 datap);
837 break;
838
839 case PTRACE_GETFPREGS:
840 ret = copy_regset_to_user(child,
841 &user_arm_view, REGSET_FPR,
842 0, sizeof(union fp_state),
843 datap);
844 break;
845
846 case PTRACE_SETFPREGS:
847 ret = copy_regset_from_user(child,
848 &user_arm_view, REGSET_FPR,
849 0, sizeof(union fp_state),
850 datap);
851 break;
852
853 #ifdef CONFIG_IWMMXT
854 case PTRACE_GETWMMXREGS:
855 ret = ptrace_getwmmxregs(child, datap);
856 break;
857
858 case PTRACE_SETWMMXREGS:
859 ret = ptrace_setwmmxregs(child, datap);
860 break;
861 #endif
862
863 case PTRACE_GET_THREAD_AREA:
864 ret = put_user(task_thread_info(child)->tp_value,
865 datap);
866 break;
867
868 case PTRACE_SET_SYSCALL:
869 task_thread_info(child)->syscall = data;
870 ret = 0;
871 break;
872
873 #ifdef CONFIG_CRUNCH
874 case PTRACE_GETCRUNCHREGS:
875 ret = ptrace_getcrunchregs(child, datap);
876 break;
877
878 case PTRACE_SETCRUNCHREGS:
879 ret = ptrace_setcrunchregs(child, datap);
880 break;
881 #endif
882
883 #ifdef CONFIG_VFP
884 case PTRACE_GETVFPREGS:
885 ret = copy_regset_to_user(child,
886 &user_arm_view, REGSET_VFP,
887 0, ARM_VFPREGS_SIZE,
888 datap);
889 break;
890
891 case PTRACE_SETVFPREGS:
892 ret = copy_regset_from_user(child,
893 &user_arm_view, REGSET_VFP,
894 0, ARM_VFPREGS_SIZE,
895 datap);
896 break;
897 #endif
898
899 #ifdef CONFIG_HAVE_HW_BREAKPOINT
900 case PTRACE_GETHBPREGS:
901 if (ptrace_get_breakpoints(child) < 0)
902 return -ESRCH;
903
904 ret = ptrace_gethbpregs(child, addr,
905 (unsigned long __user *)data);
906 ptrace_put_breakpoints(child);
907 break;
908 case PTRACE_SETHBPREGS:
909 if (ptrace_get_breakpoints(child) < 0)
910 return -ESRCH;
911
912 ret = ptrace_sethbpregs(child, addr,
913 (unsigned long __user *)data);
914 ptrace_put_breakpoints(child);
915 break;
916 #endif
917
918 default:
919 ret = ptrace_request(child, request, addr, data);
920 break;
921 }
922
923 return ret;
924 }
925
926 asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
927 {
928 unsigned long ip;
929
930 if (!test_thread_flag(TIF_SYSCALL_TRACE))
931 return scno;
932 if (!(current->ptrace & PT_PTRACED))
933 return scno;
934
935 /*
936 * Save IP. IP is used to denote syscall entry/exit:
937 * IP = 0 -> entry, = 1 -> exit
938 */
939 ip = regs->ARM_ip;
940 regs->ARM_ip = why;
941
942 current_thread_info()->syscall = scno;
943
944 /* the 0x80 provides a way for the tracing parent to distinguish
945 between a syscall stop and SIGTRAP delivery */
946 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
947 ? 0x80 : 0));
948 /*
949 * this isn't the same as continuing with a signal, but it will do
950 * for normal use. strace only continues with a signal if the
951 * stopping signal is not SIGTRAP. -brl
952 */
953 if (current->exit_code) {
954 send_sig(current->exit_code, current, 1);
955 current->exit_code = 0;
956 }
957 regs->ARM_ip = ip;
958
959 return current_thread_info()->syscall;
960 }
This page took 0.050317 seconds and 5 git commands to generate.