Merge branch 'kconfig-for-40' of git://git.kernel.org/pub/scm/linux/kernel/git/mmarek...
[deliverable/linux.git] / arch / arm / kernel / ptrace.c
1 /*
2 * linux/arch/arm/kernel/ptrace.c
3 *
4 * By Ross Biro 1/23/92
5 * edited by Linus Torvalds
6 * ARM modifications Copyright (C) 2000 Russell King
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12 #include <linux/kernel.h>
13 #include <linux/sched.h>
14 #include <linux/mm.h>
15 #include <linux/smp.h>
16 #include <linux/ptrace.h>
17 #include <linux/user.h>
18 #include <linux/security.h>
19 #include <linux/init.h>
20 #include <linux/signal.h>
21 #include <linux/uaccess.h>
22 #include <linux/perf_event.h>
23 #include <linux/hw_breakpoint.h>
24 #include <linux/regset.h>
25
26 #include <asm/pgtable.h>
27 #include <asm/system.h>
28 #include <asm/traps.h>
29
30 #define REG_PC 15
31 #define REG_PSR 16
32 /*
33 * does not yet catch signals sent when the child dies.
34 * in exit.c or in signal.c.
35 */
36
37 #if 0
38 /*
39 * Breakpoint SWI instruction: SWI &9F0001
40 */
41 #define BREAKINST_ARM 0xef9f0001
42 #define BREAKINST_THUMB 0xdf00 /* fill this in later */
43 #else
44 /*
45 * New breakpoints - use an undefined instruction. The ARM architecture
46 * reference manual guarantees that the following instruction space
47 * will produce an undefined instruction exception on all CPUs:
48 *
49 * ARM: xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
50 * Thumb: 1101 1110 xxxx xxxx
51 */
52 #define BREAKINST_ARM 0xe7f001f0
53 #define BREAKINST_THUMB 0xde01
54 #endif
55
56 struct pt_regs_offset {
57 const char *name;
58 int offset;
59 };
60
61 #define REG_OFFSET_NAME(r) \
62 {.name = #r, .offset = offsetof(struct pt_regs, ARM_##r)}
63 #define REG_OFFSET_END {.name = NULL, .offset = 0}
64
65 static const struct pt_regs_offset regoffset_table[] = {
66 REG_OFFSET_NAME(r0),
67 REG_OFFSET_NAME(r1),
68 REG_OFFSET_NAME(r2),
69 REG_OFFSET_NAME(r3),
70 REG_OFFSET_NAME(r4),
71 REG_OFFSET_NAME(r5),
72 REG_OFFSET_NAME(r6),
73 REG_OFFSET_NAME(r7),
74 REG_OFFSET_NAME(r8),
75 REG_OFFSET_NAME(r9),
76 REG_OFFSET_NAME(r10),
77 REG_OFFSET_NAME(fp),
78 REG_OFFSET_NAME(ip),
79 REG_OFFSET_NAME(sp),
80 REG_OFFSET_NAME(lr),
81 REG_OFFSET_NAME(pc),
82 REG_OFFSET_NAME(cpsr),
83 REG_OFFSET_NAME(ORIG_r0),
84 REG_OFFSET_END,
85 };
86
87 /**
88 * regs_query_register_offset() - query register offset from its name
89 * @name: the name of a register
90 *
91 * regs_query_register_offset() returns the offset of a register in struct
92 * pt_regs from its name. If the name is invalid, this returns -EINVAL;
93 */
94 int regs_query_register_offset(const char *name)
95 {
96 const struct pt_regs_offset *roff;
97 for (roff = regoffset_table; roff->name != NULL; roff++)
98 if (!strcmp(roff->name, name))
99 return roff->offset;
100 return -EINVAL;
101 }
102
103 /**
104 * regs_query_register_name() - query register name from its offset
105 * @offset: the offset of a register in struct pt_regs.
106 *
107 * regs_query_register_name() returns the name of a register from its
108 * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
109 */
110 const char *regs_query_register_name(unsigned int offset)
111 {
112 const struct pt_regs_offset *roff;
113 for (roff = regoffset_table; roff->name != NULL; roff++)
114 if (roff->offset == offset)
115 return roff->name;
116 return NULL;
117 }
118
119 /**
120 * regs_within_kernel_stack() - check the address in the stack
121 * @regs: pt_regs which contains kernel stack pointer.
122 * @addr: address which is checked.
123 *
124 * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
125 * If @addr is within the kernel stack, it returns true. If not, returns false.
126 */
127 bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
128 {
129 return ((addr & ~(THREAD_SIZE - 1)) ==
130 (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)));
131 }
132
133 /**
134 * regs_get_kernel_stack_nth() - get Nth entry of the stack
135 * @regs: pt_regs which contains kernel stack pointer.
136 * @n: stack entry number.
137 *
138 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
139 * is specified by @regs. If the @n th entry is NOT in the kernel stack,
140 * this returns 0.
141 */
142 unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
143 {
144 unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
145 addr += n;
146 if (regs_within_kernel_stack(regs, (unsigned long)addr))
147 return *addr;
148 else
149 return 0;
150 }
151
152 /*
153 * this routine will get a word off of the processes privileged stack.
154 * the offset is how far from the base addr as stored in the THREAD.
155 * this routine assumes that all the privileged stacks are in our
156 * data space.
157 */
158 static inline long get_user_reg(struct task_struct *task, int offset)
159 {
160 return task_pt_regs(task)->uregs[offset];
161 }
162
163 /*
164 * this routine will put a word on the processes privileged stack.
165 * the offset is how far from the base addr as stored in the THREAD.
166 * this routine assumes that all the privileged stacks are in our
167 * data space.
168 */
169 static inline int
170 put_user_reg(struct task_struct *task, int offset, long data)
171 {
172 struct pt_regs newregs, *regs = task_pt_regs(task);
173 int ret = -EINVAL;
174
175 newregs = *regs;
176 newregs.uregs[offset] = data;
177
178 if (valid_user_regs(&newregs)) {
179 regs->uregs[offset] = data;
180 ret = 0;
181 }
182
183 return ret;
184 }
185
186 /*
187 * Called by kernel/ptrace.c when detaching..
188 */
189 void ptrace_disable(struct task_struct *child)
190 {
191 /* Nothing to do. */
192 }
193
194 /*
195 * Handle hitting a breakpoint.
196 */
197 void ptrace_break(struct task_struct *tsk, struct pt_regs *regs)
198 {
199 siginfo_t info;
200
201 info.si_signo = SIGTRAP;
202 info.si_errno = 0;
203 info.si_code = TRAP_BRKPT;
204 info.si_addr = (void __user *)instruction_pointer(regs);
205
206 force_sig_info(SIGTRAP, &info, tsk);
207 }
208
209 static int break_trap(struct pt_regs *regs, unsigned int instr)
210 {
211 ptrace_break(current, regs);
212 return 0;
213 }
214
215 static struct undef_hook arm_break_hook = {
216 .instr_mask = 0x0fffffff,
217 .instr_val = 0x07f001f0,
218 .cpsr_mask = PSR_T_BIT,
219 .cpsr_val = 0,
220 .fn = break_trap,
221 };
222
223 static struct undef_hook thumb_break_hook = {
224 .instr_mask = 0xffff,
225 .instr_val = 0xde01,
226 .cpsr_mask = PSR_T_BIT,
227 .cpsr_val = PSR_T_BIT,
228 .fn = break_trap,
229 };
230
231 static int thumb2_break_trap(struct pt_regs *regs, unsigned int instr)
232 {
233 unsigned int instr2;
234 void __user *pc;
235
236 /* Check the second half of the instruction. */
237 pc = (void __user *)(instruction_pointer(regs) + 2);
238
239 if (processor_mode(regs) == SVC_MODE) {
240 instr2 = *(u16 *) pc;
241 } else {
242 get_user(instr2, (u16 __user *)pc);
243 }
244
245 if (instr2 == 0xa000) {
246 ptrace_break(current, regs);
247 return 0;
248 } else {
249 return 1;
250 }
251 }
252
253 static struct undef_hook thumb2_break_hook = {
254 .instr_mask = 0xffff,
255 .instr_val = 0xf7f0,
256 .cpsr_mask = PSR_T_BIT,
257 .cpsr_val = PSR_T_BIT,
258 .fn = thumb2_break_trap,
259 };
260
261 static int __init ptrace_break_init(void)
262 {
263 register_undef_hook(&arm_break_hook);
264 register_undef_hook(&thumb_break_hook);
265 register_undef_hook(&thumb2_break_hook);
266 return 0;
267 }
268
269 core_initcall(ptrace_break_init);
270
271 /*
272 * Read the word at offset "off" into the "struct user". We
273 * actually access the pt_regs stored on the kernel stack.
274 */
275 static int ptrace_read_user(struct task_struct *tsk, unsigned long off,
276 unsigned long __user *ret)
277 {
278 unsigned long tmp;
279
280 if (off & 3 || off >= sizeof(struct user))
281 return -EIO;
282
283 tmp = 0;
284 if (off == PT_TEXT_ADDR)
285 tmp = tsk->mm->start_code;
286 else if (off == PT_DATA_ADDR)
287 tmp = tsk->mm->start_data;
288 else if (off == PT_TEXT_END_ADDR)
289 tmp = tsk->mm->end_code;
290 else if (off < sizeof(struct pt_regs))
291 tmp = get_user_reg(tsk, off >> 2);
292
293 return put_user(tmp, ret);
294 }
295
296 /*
297 * Write the word at offset "off" into "struct user". We
298 * actually access the pt_regs stored on the kernel stack.
299 */
300 static int ptrace_write_user(struct task_struct *tsk, unsigned long off,
301 unsigned long val)
302 {
303 if (off & 3 || off >= sizeof(struct user))
304 return -EIO;
305
306 if (off >= sizeof(struct pt_regs))
307 return 0;
308
309 return put_user_reg(tsk, off >> 2, val);
310 }
311
312 #ifdef CONFIG_IWMMXT
313
314 /*
315 * Get the child iWMMXt state.
316 */
317 static int ptrace_getwmmxregs(struct task_struct *tsk, void __user *ufp)
318 {
319 struct thread_info *thread = task_thread_info(tsk);
320
321 if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT))
322 return -ENODATA;
323 iwmmxt_task_disable(thread); /* force it to ram */
324 return copy_to_user(ufp, &thread->fpstate.iwmmxt, IWMMXT_SIZE)
325 ? -EFAULT : 0;
326 }
327
328 /*
329 * Set the child iWMMXt state.
330 */
331 static int ptrace_setwmmxregs(struct task_struct *tsk, void __user *ufp)
332 {
333 struct thread_info *thread = task_thread_info(tsk);
334
335 if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT))
336 return -EACCES;
337 iwmmxt_task_release(thread); /* force a reload */
338 return copy_from_user(&thread->fpstate.iwmmxt, ufp, IWMMXT_SIZE)
339 ? -EFAULT : 0;
340 }
341
342 #endif
343
344 #ifdef CONFIG_CRUNCH
345 /*
346 * Get the child Crunch state.
347 */
348 static int ptrace_getcrunchregs(struct task_struct *tsk, void __user *ufp)
349 {
350 struct thread_info *thread = task_thread_info(tsk);
351
352 crunch_task_disable(thread); /* force it to ram */
353 return copy_to_user(ufp, &thread->crunchstate, CRUNCH_SIZE)
354 ? -EFAULT : 0;
355 }
356
357 /*
358 * Set the child Crunch state.
359 */
360 static int ptrace_setcrunchregs(struct task_struct *tsk, void __user *ufp)
361 {
362 struct thread_info *thread = task_thread_info(tsk);
363
364 crunch_task_release(thread); /* force a reload */
365 return copy_from_user(&thread->crunchstate, ufp, CRUNCH_SIZE)
366 ? -EFAULT : 0;
367 }
368 #endif
369
370 #ifdef CONFIG_HAVE_HW_BREAKPOINT
371 /*
372 * Convert a virtual register number into an index for a thread_info
373 * breakpoint array. Breakpoints are identified using positive numbers
374 * whilst watchpoints are negative. The registers are laid out as pairs
375 * of (address, control), each pair mapping to a unique hw_breakpoint struct.
376 * Register 0 is reserved for describing resource information.
377 */
378 static int ptrace_hbp_num_to_idx(long num)
379 {
380 if (num < 0)
381 num = (ARM_MAX_BRP << 1) - num;
382 return (num - 1) >> 1;
383 }
384
385 /*
386 * Returns the virtual register number for the address of the
387 * breakpoint at index idx.
388 */
389 static long ptrace_hbp_idx_to_num(int idx)
390 {
391 long mid = ARM_MAX_BRP << 1;
392 long num = (idx << 1) + 1;
393 return num > mid ? mid - num : num;
394 }
395
396 /*
397 * Handle hitting a HW-breakpoint.
398 */
399 static void ptrace_hbptriggered(struct perf_event *bp, int unused,
400 struct perf_sample_data *data,
401 struct pt_regs *regs)
402 {
403 struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
404 long num;
405 int i;
406 siginfo_t info;
407
408 for (i = 0; i < ARM_MAX_HBP_SLOTS; ++i)
409 if (current->thread.debug.hbp[i] == bp)
410 break;
411
412 num = (i == ARM_MAX_HBP_SLOTS) ? 0 : ptrace_hbp_idx_to_num(i);
413
414 info.si_signo = SIGTRAP;
415 info.si_errno = (int)num;
416 info.si_code = TRAP_HWBKPT;
417 info.si_addr = (void __user *)(bkpt->trigger);
418
419 force_sig_info(SIGTRAP, &info, current);
420 }
421
422 /*
423 * Set ptrace breakpoint pointers to zero for this task.
424 * This is required in order to prevent child processes from unregistering
425 * breakpoints held by their parent.
426 */
427 void clear_ptrace_hw_breakpoint(struct task_struct *tsk)
428 {
429 memset(tsk->thread.debug.hbp, 0, sizeof(tsk->thread.debug.hbp));
430 }
431
432 /*
433 * Unregister breakpoints from this task and reset the pointers in
434 * the thread_struct.
435 */
436 void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
437 {
438 int i;
439 struct thread_struct *t = &tsk->thread;
440
441 for (i = 0; i < ARM_MAX_HBP_SLOTS; i++) {
442 if (t->debug.hbp[i]) {
443 unregister_hw_breakpoint(t->debug.hbp[i]);
444 t->debug.hbp[i] = NULL;
445 }
446 }
447 }
448
449 static u32 ptrace_get_hbp_resource_info(void)
450 {
451 u8 num_brps, num_wrps, debug_arch, wp_len;
452 u32 reg = 0;
453
454 num_brps = hw_breakpoint_slots(TYPE_INST);
455 num_wrps = hw_breakpoint_slots(TYPE_DATA);
456 debug_arch = arch_get_debug_arch();
457 wp_len = arch_get_max_wp_len();
458
459 reg |= debug_arch;
460 reg <<= 8;
461 reg |= wp_len;
462 reg <<= 8;
463 reg |= num_wrps;
464 reg <<= 8;
465 reg |= num_brps;
466
467 return reg;
468 }
469
470 static struct perf_event *ptrace_hbp_create(struct task_struct *tsk, int type)
471 {
472 struct perf_event_attr attr;
473
474 ptrace_breakpoint_init(&attr);
475
476 /* Initialise fields to sane defaults. */
477 attr.bp_addr = 0;
478 attr.bp_len = HW_BREAKPOINT_LEN_4;
479 attr.bp_type = type;
480 attr.disabled = 1;
481
482 return register_user_hw_breakpoint(&attr, ptrace_hbptriggered, tsk);
483 }
484
485 static int ptrace_gethbpregs(struct task_struct *tsk, long num,
486 unsigned long __user *data)
487 {
488 u32 reg;
489 int idx, ret = 0;
490 struct perf_event *bp;
491 struct arch_hw_breakpoint_ctrl arch_ctrl;
492
493 if (num == 0) {
494 reg = ptrace_get_hbp_resource_info();
495 } else {
496 idx = ptrace_hbp_num_to_idx(num);
497 if (idx < 0 || idx >= ARM_MAX_HBP_SLOTS) {
498 ret = -EINVAL;
499 goto out;
500 }
501
502 bp = tsk->thread.debug.hbp[idx];
503 if (!bp) {
504 reg = 0;
505 goto put;
506 }
507
508 arch_ctrl = counter_arch_bp(bp)->ctrl;
509
510 /*
511 * Fix up the len because we may have adjusted it
512 * to compensate for an unaligned address.
513 */
514 while (!(arch_ctrl.len & 0x1))
515 arch_ctrl.len >>= 1;
516
517 if (num & 0x1)
518 reg = bp->attr.bp_addr;
519 else
520 reg = encode_ctrl_reg(arch_ctrl);
521 }
522
523 put:
524 if (put_user(reg, data))
525 ret = -EFAULT;
526
527 out:
528 return ret;
529 }
530
531 static int ptrace_sethbpregs(struct task_struct *tsk, long num,
532 unsigned long __user *data)
533 {
534 int idx, gen_len, gen_type, implied_type, ret = 0;
535 u32 user_val;
536 struct perf_event *bp;
537 struct arch_hw_breakpoint_ctrl ctrl;
538 struct perf_event_attr attr;
539
540 if (num == 0)
541 goto out;
542 else if (num < 0)
543 implied_type = HW_BREAKPOINT_RW;
544 else
545 implied_type = HW_BREAKPOINT_X;
546
547 idx = ptrace_hbp_num_to_idx(num);
548 if (idx < 0 || idx >= ARM_MAX_HBP_SLOTS) {
549 ret = -EINVAL;
550 goto out;
551 }
552
553 if (get_user(user_val, data)) {
554 ret = -EFAULT;
555 goto out;
556 }
557
558 bp = tsk->thread.debug.hbp[idx];
559 if (!bp) {
560 bp = ptrace_hbp_create(tsk, implied_type);
561 if (IS_ERR(bp)) {
562 ret = PTR_ERR(bp);
563 goto out;
564 }
565 tsk->thread.debug.hbp[idx] = bp;
566 }
567
568 attr = bp->attr;
569
570 if (num & 0x1) {
571 /* Address */
572 attr.bp_addr = user_val;
573 } else {
574 /* Control */
575 decode_ctrl_reg(user_val, &ctrl);
576 ret = arch_bp_generic_fields(ctrl, &gen_len, &gen_type);
577 if (ret)
578 goto out;
579
580 if ((gen_type & implied_type) != gen_type) {
581 ret = -EINVAL;
582 goto out;
583 }
584
585 attr.bp_len = gen_len;
586 attr.bp_type = gen_type;
587 attr.disabled = !ctrl.enabled;
588 }
589
590 ret = modify_user_hw_breakpoint(bp, &attr);
591 out:
592 return ret;
593 }
594 #endif
595
596 /* regset get/set implementations */
597
598 static int gpr_get(struct task_struct *target,
599 const struct user_regset *regset,
600 unsigned int pos, unsigned int count,
601 void *kbuf, void __user *ubuf)
602 {
603 struct pt_regs *regs = task_pt_regs(target);
604
605 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
606 regs,
607 0, sizeof(*regs));
608 }
609
610 static int gpr_set(struct task_struct *target,
611 const struct user_regset *regset,
612 unsigned int pos, unsigned int count,
613 const void *kbuf, const void __user *ubuf)
614 {
615 int ret;
616 struct pt_regs newregs;
617
618 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
619 &newregs,
620 0, sizeof(newregs));
621 if (ret)
622 return ret;
623
624 if (!valid_user_regs(&newregs))
625 return -EINVAL;
626
627 *task_pt_regs(target) = newregs;
628 return 0;
629 }
630
631 static int fpa_get(struct task_struct *target,
632 const struct user_regset *regset,
633 unsigned int pos, unsigned int count,
634 void *kbuf, void __user *ubuf)
635 {
636 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
637 &task_thread_info(target)->fpstate,
638 0, sizeof(struct user_fp));
639 }
640
641 static int fpa_set(struct task_struct *target,
642 const struct user_regset *regset,
643 unsigned int pos, unsigned int count,
644 const void *kbuf, const void __user *ubuf)
645 {
646 struct thread_info *thread = task_thread_info(target);
647
648 thread->used_cp[1] = thread->used_cp[2] = 1;
649
650 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
651 &thread->fpstate,
652 0, sizeof(struct user_fp));
653 }
654
655 #ifdef CONFIG_VFP
656 /*
657 * VFP register get/set implementations.
658 *
659 * With respect to the kernel, struct user_fp is divided into three chunks:
660 * 16 or 32 real VFP registers (d0-d15 or d0-31)
661 * These are transferred to/from the real registers in the task's
662 * vfp_hard_struct. The number of registers depends on the kernel
663 * configuration.
664 *
665 * 16 or 0 fake VFP registers (d16-d31 or empty)
666 * i.e., the user_vfp structure has space for 32 registers even if
667 * the kernel doesn't have them all.
668 *
669 * vfp_get() reads this chunk as zero where applicable
670 * vfp_set() ignores this chunk
671 *
672 * 1 word for the FPSCR
673 *
674 * The bounds-checking logic built into user_regset_copyout and friends
675 * means that we can make a simple sequence of calls to map the relevant data
676 * to/from the specified slice of the user regset structure.
677 */
678 static int vfp_get(struct task_struct *target,
679 const struct user_regset *regset,
680 unsigned int pos, unsigned int count,
681 void *kbuf, void __user *ubuf)
682 {
683 int ret;
684 struct thread_info *thread = task_thread_info(target);
685 struct vfp_hard_struct const *vfp = &thread->vfpstate.hard;
686 const size_t user_fpregs_offset = offsetof(struct user_vfp, fpregs);
687 const size_t user_fpscr_offset = offsetof(struct user_vfp, fpscr);
688
689 vfp_sync_hwstate(thread);
690
691 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
692 &vfp->fpregs,
693 user_fpregs_offset,
694 user_fpregs_offset + sizeof(vfp->fpregs));
695 if (ret)
696 return ret;
697
698 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
699 user_fpregs_offset + sizeof(vfp->fpregs),
700 user_fpscr_offset);
701 if (ret)
702 return ret;
703
704 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
705 &vfp->fpscr,
706 user_fpscr_offset,
707 user_fpscr_offset + sizeof(vfp->fpscr));
708 }
709
710 /*
711 * For vfp_set() a read-modify-write is done on the VFP registers,
712 * in order to avoid writing back a half-modified set of registers on
713 * failure.
714 */
715 static int vfp_set(struct task_struct *target,
716 const struct user_regset *regset,
717 unsigned int pos, unsigned int count,
718 const void *kbuf, const void __user *ubuf)
719 {
720 int ret;
721 struct thread_info *thread = task_thread_info(target);
722 struct vfp_hard_struct new_vfp = thread->vfpstate.hard;
723 const size_t user_fpregs_offset = offsetof(struct user_vfp, fpregs);
724 const size_t user_fpscr_offset = offsetof(struct user_vfp, fpscr);
725
726 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
727 &new_vfp.fpregs,
728 user_fpregs_offset,
729 user_fpregs_offset + sizeof(new_vfp.fpregs));
730 if (ret)
731 return ret;
732
733 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
734 user_fpregs_offset + sizeof(new_vfp.fpregs),
735 user_fpscr_offset);
736 if (ret)
737 return ret;
738
739 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
740 &new_vfp.fpscr,
741 user_fpscr_offset,
742 user_fpscr_offset + sizeof(new_vfp.fpscr));
743 if (ret)
744 return ret;
745
746 vfp_sync_hwstate(thread);
747 thread->vfpstate.hard = new_vfp;
748 vfp_flush_hwstate(thread);
749
750 return 0;
751 }
752 #endif /* CONFIG_VFP */
753
754 enum arm_regset {
755 REGSET_GPR,
756 REGSET_FPR,
757 #ifdef CONFIG_VFP
758 REGSET_VFP,
759 #endif
760 };
761
762 static const struct user_regset arm_regsets[] = {
763 [REGSET_GPR] = {
764 .core_note_type = NT_PRSTATUS,
765 .n = ELF_NGREG,
766 .size = sizeof(u32),
767 .align = sizeof(u32),
768 .get = gpr_get,
769 .set = gpr_set
770 },
771 [REGSET_FPR] = {
772 /*
773 * For the FPA regs in fpstate, the real fields are a mixture
774 * of sizes, so pretend that the registers are word-sized:
775 */
776 .core_note_type = NT_PRFPREG,
777 .n = sizeof(struct user_fp) / sizeof(u32),
778 .size = sizeof(u32),
779 .align = sizeof(u32),
780 .get = fpa_get,
781 .set = fpa_set
782 },
783 #ifdef CONFIG_VFP
784 [REGSET_VFP] = {
785 /*
786 * Pretend that the VFP regs are word-sized, since the FPSCR is
787 * a single word dangling at the end of struct user_vfp:
788 */
789 .core_note_type = NT_ARM_VFP,
790 .n = ARM_VFPREGS_SIZE / sizeof(u32),
791 .size = sizeof(u32),
792 .align = sizeof(u32),
793 .get = vfp_get,
794 .set = vfp_set
795 },
796 #endif /* CONFIG_VFP */
797 };
798
799 static const struct user_regset_view user_arm_view = {
800 .name = "arm", .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI,
801 .regsets = arm_regsets, .n = ARRAY_SIZE(arm_regsets)
802 };
803
804 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
805 {
806 return &user_arm_view;
807 }
808
809 long arch_ptrace(struct task_struct *child, long request,
810 unsigned long addr, unsigned long data)
811 {
812 int ret;
813 unsigned long __user *datap = (unsigned long __user *) data;
814
815 switch (request) {
816 case PTRACE_PEEKUSR:
817 ret = ptrace_read_user(child, addr, datap);
818 break;
819
820 case PTRACE_POKEUSR:
821 ret = ptrace_write_user(child, addr, data);
822 break;
823
824 case PTRACE_GETREGS:
825 ret = copy_regset_to_user(child,
826 &user_arm_view, REGSET_GPR,
827 0, sizeof(struct pt_regs),
828 datap);
829 break;
830
831 case PTRACE_SETREGS:
832 ret = copy_regset_from_user(child,
833 &user_arm_view, REGSET_GPR,
834 0, sizeof(struct pt_regs),
835 datap);
836 break;
837
838 case PTRACE_GETFPREGS:
839 ret = copy_regset_to_user(child,
840 &user_arm_view, REGSET_FPR,
841 0, sizeof(union fp_state),
842 datap);
843 break;
844
845 case PTRACE_SETFPREGS:
846 ret = copy_regset_from_user(child,
847 &user_arm_view, REGSET_FPR,
848 0, sizeof(union fp_state),
849 datap);
850 break;
851
852 #ifdef CONFIG_IWMMXT
853 case PTRACE_GETWMMXREGS:
854 ret = ptrace_getwmmxregs(child, datap);
855 break;
856
857 case PTRACE_SETWMMXREGS:
858 ret = ptrace_setwmmxregs(child, datap);
859 break;
860 #endif
861
862 case PTRACE_GET_THREAD_AREA:
863 ret = put_user(task_thread_info(child)->tp_value,
864 datap);
865 break;
866
867 case PTRACE_SET_SYSCALL:
868 task_thread_info(child)->syscall = data;
869 ret = 0;
870 break;
871
872 #ifdef CONFIG_CRUNCH
873 case PTRACE_GETCRUNCHREGS:
874 ret = ptrace_getcrunchregs(child, datap);
875 break;
876
877 case PTRACE_SETCRUNCHREGS:
878 ret = ptrace_setcrunchregs(child, datap);
879 break;
880 #endif
881
882 #ifdef CONFIG_VFP
883 case PTRACE_GETVFPREGS:
884 ret = copy_regset_to_user(child,
885 &user_arm_view, REGSET_VFP,
886 0, ARM_VFPREGS_SIZE,
887 datap);
888 break;
889
890 case PTRACE_SETVFPREGS:
891 ret = copy_regset_from_user(child,
892 &user_arm_view, REGSET_VFP,
893 0, ARM_VFPREGS_SIZE,
894 datap);
895 break;
896 #endif
897
898 #ifdef CONFIG_HAVE_HW_BREAKPOINT
899 case PTRACE_GETHBPREGS:
900 if (ptrace_get_breakpoints(child) < 0)
901 return -ESRCH;
902
903 ret = ptrace_gethbpregs(child, addr,
904 (unsigned long __user *)data);
905 ptrace_put_breakpoints(child);
906 break;
907 case PTRACE_SETHBPREGS:
908 if (ptrace_get_breakpoints(child) < 0)
909 return -ESRCH;
910
911 ret = ptrace_sethbpregs(child, addr,
912 (unsigned long __user *)data);
913 ptrace_put_breakpoints(child);
914 break;
915 #endif
916
917 default:
918 ret = ptrace_request(child, request, addr, data);
919 break;
920 }
921
922 return ret;
923 }
924
925 asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
926 {
927 unsigned long ip;
928
929 if (!test_thread_flag(TIF_SYSCALL_TRACE))
930 return scno;
931 if (!(current->ptrace & PT_PTRACED))
932 return scno;
933
934 /*
935 * Save IP. IP is used to denote syscall entry/exit:
936 * IP = 0 -> entry, = 1 -> exit
937 */
938 ip = regs->ARM_ip;
939 regs->ARM_ip = why;
940
941 current_thread_info()->syscall = scno;
942
943 /* the 0x80 provides a way for the tracing parent to distinguish
944 between a syscall stop and SIGTRAP delivery */
945 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
946 ? 0x80 : 0));
947 /*
948 * this isn't the same as continuing with a signal, but it will do
949 * for normal use. strace only continues with a signal if the
950 * stopping signal is not SIGTRAP. -brl
951 */
952 if (current->exit_code) {
953 send_sig(current->exit_code, current, 1);
954 current->exit_code = 0;
955 }
956 regs->ARM_ip = ip;
957
958 return current_thread_info()->syscall;
959 }
This page took 0.05136 seconds and 6 git commands to generate.