x86: x86 ia32 ptrace getreg/putreg merge
[deliverable/linux.git] / arch / x86 / kernel / ptrace.c
CommitLineData
1da177e4
LT
1/* By Ross Biro 1/23/92 */
2/*
3 * Pentium III FXSR, SSE support
4 * Gareth Hughes <gareth@valinux.com>, May 2000
5 */
6
7#include <linux/kernel.h>
8#include <linux/sched.h>
9#include <linux/mm.h>
10#include <linux/smp.h>
1da177e4
LT
11#include <linux/errno.h>
12#include <linux/ptrace.h>
13#include <linux/user.h>
14#include <linux/security.h>
15#include <linux/audit.h>
16#include <linux/seccomp.h>
7ed20e1a 17#include <linux/signal.h>
1da177e4
LT
18
19#include <asm/uaccess.h>
20#include <asm/pgtable.h>
21#include <asm/system.h>
22#include <asm/processor.h>
23#include <asm/i387.h>
24#include <asm/debugreg.h>
25#include <asm/ldt.h>
26#include <asm/desc.h>
2047b08b
RM
27#include <asm/prctl.h>
28#include <asm/proto.h>
1da177e4
LT
29
30/*
31 * does not yet catch signals sent when the child dies.
32 * in exit.c or in signal.c.
33 */
34
9f155b98
CE
35/*
36 * Determines which flags the user has access to [1 = access, 0 = no access].
9f155b98 37 */
e39c2891
RM
38#define FLAG_MASK_32 ((unsigned long) \
39 (X86_EFLAGS_CF | X86_EFLAGS_PF | \
40 X86_EFLAGS_AF | X86_EFLAGS_ZF | \
41 X86_EFLAGS_SF | X86_EFLAGS_TF | \
42 X86_EFLAGS_DF | X86_EFLAGS_OF | \
43 X86_EFLAGS_RF | X86_EFLAGS_AC))
44
2047b08b
RM
45/*
46 * Determines whether a value may be installed in a segment register.
47 */
48static inline bool invalid_selector(u16 value)
49{
50 return unlikely(value != 0 && (value & SEGMENT_RPL_MASK) != USER_RPL);
51}
52
53#ifdef CONFIG_X86_32
54
e39c2891 55#define FLAG_MASK FLAG_MASK_32
1da177e4 56
62a97d44 57static long *pt_regs_access(struct pt_regs *regs, unsigned long regno)
1da177e4 58{
65ea5b03 59 BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0);
06ee1b68 60 regno >>= 2;
62a97d44
RM
61 if (regno > FS)
62 --regno;
65ea5b03 63 return &regs->bx + regno;
1da177e4
LT
64}
65
06ee1b68 66static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
1da177e4 67{
06ee1b68
RM
68 /*
69 * Returning the value truncates it to 16 bits.
70 */
71 unsigned int retval;
72 if (offset != offsetof(struct user_regs_struct, gs))
73 retval = *pt_regs_access(task_pt_regs(task), offset);
74 else {
75 retval = task->thread.gs;
76 if (task == current)
77 savesegment(gs, retval);
78 }
79 return retval;
80}
81
82static int set_segment_reg(struct task_struct *task,
83 unsigned long offset, u16 value)
84{
85 /*
86 * The value argument was already truncated to 16 bits.
87 */
2047b08b 88 if (invalid_selector(value))
06ee1b68
RM
89 return -EIO;
90
91 if (offset != offsetof(struct user_regs_struct, gs))
92 *pt_regs_access(task_pt_regs(task), offset) = value;
93 else {
94 task->thread.gs = value;
95 if (task == current)
5fd4d16b
RM
96 /*
97 * The user-mode %gs is not affected by
98 * kernel entry, so we must update the CPU.
99 */
100 loadsegment(gs, value);
1da177e4 101 }
06ee1b68 102
1da177e4
LT
103 return 0;
104}
105
2047b08b
RM
106static unsigned long debugreg_addr_limit(struct task_struct *task)
107{
108 return TASK_SIZE - 3;
109}
110
111#else /* CONFIG_X86_64 */
112
113#define FLAG_MASK (FLAG_MASK_32 | X86_EFLAGS_NT)
114
115static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long offset)
116{
117 BUILD_BUG_ON(offsetof(struct pt_regs, r15) != 0);
118 return &regs->r15 + (offset / sizeof(regs->r15));
119}
120
121static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
122{
123 /*
124 * Returning the value truncates it to 16 bits.
125 */
126 unsigned int seg;
127
128 switch (offset) {
129 case offsetof(struct user_regs_struct, fs):
130 if (task == current) {
131 /* Older gas can't assemble movq %?s,%r?? */
132 asm("movl %%fs,%0" : "=r" (seg));
133 return seg;
134 }
135 return task->thread.fsindex;
136 case offsetof(struct user_regs_struct, gs):
137 if (task == current) {
138 asm("movl %%gs,%0" : "=r" (seg));
139 return seg;
140 }
141 return task->thread.gsindex;
142 case offsetof(struct user_regs_struct, ds):
143 if (task == current) {
144 asm("movl %%ds,%0" : "=r" (seg));
145 return seg;
146 }
147 return task->thread.ds;
148 case offsetof(struct user_regs_struct, es):
149 if (task == current) {
150 asm("movl %%es,%0" : "=r" (seg));
151 return seg;
152 }
153 return task->thread.es;
154
155 case offsetof(struct user_regs_struct, cs):
156 case offsetof(struct user_regs_struct, ss):
157 break;
158 }
159 return *pt_regs_access(task_pt_regs(task), offset);
160}
161
162static int set_segment_reg(struct task_struct *task,
163 unsigned long offset, u16 value)
164{
165 /*
166 * The value argument was already truncated to 16 bits.
167 */
168 if (invalid_selector(value))
169 return -EIO;
170
171 switch (offset) {
172 case offsetof(struct user_regs_struct,fs):
173 /*
174 * If this is setting fs as for normal 64-bit use but
175 * setting fs_base has implicitly changed it, leave it.
176 */
177 if ((value == FS_TLS_SEL && task->thread.fsindex == 0 &&
178 task->thread.fs != 0) ||
179 (value == 0 && task->thread.fsindex == FS_TLS_SEL &&
180 task->thread.fs == 0))
181 break;
182 task->thread.fsindex = value;
183 if (task == current)
184 loadsegment(fs, task->thread.fsindex);
185 break;
186 case offsetof(struct user_regs_struct,gs):
187 /*
188 * If this is setting gs as for normal 64-bit use but
189 * setting gs_base has implicitly changed it, leave it.
190 */
191 if ((value == GS_TLS_SEL && task->thread.gsindex == 0 &&
192 task->thread.gs != 0) ||
193 (value == 0 && task->thread.gsindex == GS_TLS_SEL &&
194 task->thread.gs == 0))
195 break;
196 task->thread.gsindex = value;
197 if (task == current)
198 load_gs_index(task->thread.gsindex);
199 break;
200 case offsetof(struct user_regs_struct,ds):
201 task->thread.ds = value;
202 if (task == current)
203 loadsegment(ds, task->thread.ds);
204 break;
205 case offsetof(struct user_regs_struct,es):
206 task->thread.es = value;
207 if (task == current)
208 loadsegment(es, task->thread.es);
209 break;
210
211 /*
212 * Can't actually change these in 64-bit mode.
213 */
214 case offsetof(struct user_regs_struct,cs):
215#ifdef CONFIG_IA32_EMULATION
216 if (test_tsk_thread_flag(task, TIF_IA32))
217 task_pt_regs(task)->cs = value;
2047b08b 218#endif
cb757c41 219 break;
2047b08b
RM
220 case offsetof(struct user_regs_struct,ss):
221#ifdef CONFIG_IA32_EMULATION
222 if (test_tsk_thread_flag(task, TIF_IA32))
223 task_pt_regs(task)->ss = value;
2047b08b 224#endif
cb757c41 225 break;
2047b08b
RM
226 }
227
228 return 0;
229}
230
231static unsigned long debugreg_addr_limit(struct task_struct *task)
232{
233#ifdef CONFIG_IA32_EMULATION
234 if (test_tsk_thread_flag(task, TIF_IA32))
235 return IA32_PAGE_OFFSET - 3;
236#endif
237 return TASK_SIZE64 - 7;
238}
239
240#endif /* CONFIG_X86_32 */
241
06ee1b68 242static unsigned long get_flags(struct task_struct *task)
1da177e4 243{
06ee1b68
RM
244 unsigned long retval = task_pt_regs(task)->flags;
245
246 /*
247 * If the debugger set TF, hide it from the readout.
248 */
249 if (test_tsk_thread_flag(task, TIF_FORCED_TF))
250 retval &= ~X86_EFLAGS_TF;
1da177e4 251
1da177e4
LT
252 return retval;
253}
254
06ee1b68
RM
255static int set_flags(struct task_struct *task, unsigned long value)
256{
257 struct pt_regs *regs = task_pt_regs(task);
258
259 /*
260 * If the user value contains TF, mark that
261 * it was not "us" (the debugger) that set it.
262 * If not, make sure it stays set if we had.
263 */
264 if (value & X86_EFLAGS_TF)
265 clear_tsk_thread_flag(task, TIF_FORCED_TF);
266 else if (test_tsk_thread_flag(task, TIF_FORCED_TF))
267 value |= X86_EFLAGS_TF;
268
269 regs->flags = (regs->flags & ~FLAG_MASK) | (value & FLAG_MASK);
270
271 return 0;
272}
273
274static int putreg(struct task_struct *child,
275 unsigned long offset, unsigned long value)
276{
277 switch (offset) {
278 case offsetof(struct user_regs_struct, cs):
279 case offsetof(struct user_regs_struct, ds):
280 case offsetof(struct user_regs_struct, es):
281 case offsetof(struct user_regs_struct, fs):
282 case offsetof(struct user_regs_struct, gs):
283 case offsetof(struct user_regs_struct, ss):
284 return set_segment_reg(child, offset, value);
285
286 case offsetof(struct user_regs_struct, flags):
287 return set_flags(child, value);
2047b08b
RM
288
289#ifdef CONFIG_X86_64
290 case offsetof(struct user_regs_struct,fs_base):
291 if (value >= TASK_SIZE_OF(child))
292 return -EIO;
293 /*
294 * When changing the segment base, use do_arch_prctl
295 * to set either thread.fs or thread.fsindex and the
296 * corresponding GDT slot.
297 */
298 if (child->thread.fs != value)
299 return do_arch_prctl(child, ARCH_SET_FS, value);
300 return 0;
301 case offsetof(struct user_regs_struct,gs_base):
302 /*
303 * Exactly the same here as the %fs handling above.
304 */
305 if (value >= TASK_SIZE_OF(child))
306 return -EIO;
307 if (child->thread.gs != value)
308 return do_arch_prctl(child, ARCH_SET_GS, value);
309 return 0;
310#endif
06ee1b68
RM
311 }
312
313 *pt_regs_access(task_pt_regs(child), offset) = value;
314 return 0;
315}
316
317static unsigned long getreg(struct task_struct *task, unsigned long offset)
318{
319 switch (offset) {
320 case offsetof(struct user_regs_struct, cs):
321 case offsetof(struct user_regs_struct, ds):
322 case offsetof(struct user_regs_struct, es):
323 case offsetof(struct user_regs_struct, fs):
324 case offsetof(struct user_regs_struct, gs):
325 case offsetof(struct user_regs_struct, ss):
326 return get_segment_reg(task, offset);
327
328 case offsetof(struct user_regs_struct, flags):
329 return get_flags(task);
2047b08b
RM
330
331#ifdef CONFIG_X86_64
332 case offsetof(struct user_regs_struct, fs_base): {
333 /*
334 * do_arch_prctl may have used a GDT slot instead of
335 * the MSR. To userland, it appears the same either
336 * way, except the %fs segment selector might not be 0.
337 */
338 unsigned int seg = task->thread.fsindex;
339 if (task->thread.fs != 0)
340 return task->thread.fs;
341 if (task == current)
342 asm("movl %%fs,%0" : "=r" (seg));
343 if (seg != FS_TLS_SEL)
344 return 0;
345 return get_desc_base(&task->thread.tls_array[FS_TLS]);
346 }
347 case offsetof(struct user_regs_struct, gs_base): {
348 /*
349 * Exactly the same here as the %fs handling above.
350 */
351 unsigned int seg = task->thread.gsindex;
352 if (task->thread.gs != 0)
353 return task->thread.gs;
354 if (task == current)
355 asm("movl %%gs,%0" : "=r" (seg));
356 if (seg != GS_TLS_SEL)
357 return 0;
358 return get_desc_base(&task->thread.tls_array[GS_TLS]);
359 }
360#endif
06ee1b68
RM
361 }
362
363 return *pt_regs_access(task_pt_regs(task), offset);
364}
365
d9771e8c
RM
366/*
367 * This function is trivial and will be inlined by the compiler.
368 * Having it separates the implementation details of debug
369 * registers from the interface details of ptrace.
370 */
371static unsigned long ptrace_get_debugreg(struct task_struct *child, int n)
372{
0f534093
RM
373 switch (n) {
374 case 0: return child->thread.debugreg0;
375 case 1: return child->thread.debugreg1;
376 case 2: return child->thread.debugreg2;
377 case 3: return child->thread.debugreg3;
378 case 6: return child->thread.debugreg6;
379 case 7: return child->thread.debugreg7;
380 }
381 return 0;
d9771e8c
RM
382}
383
384static int ptrace_set_debugreg(struct task_struct *child,
385 int n, unsigned long data)
386{
0f534093
RM
387 int i;
388
d9771e8c
RM
389 if (unlikely(n == 4 || n == 5))
390 return -EIO;
391
2047b08b 392 if (n < 4 && unlikely(data >= debugreg_addr_limit(child)))
d9771e8c
RM
393 return -EIO;
394
0f534093
RM
395 switch (n) {
396 case 0: child->thread.debugreg0 = data; break;
397 case 1: child->thread.debugreg1 = data; break;
398 case 2: child->thread.debugreg2 = data; break;
399 case 3: child->thread.debugreg3 = data; break;
400
401 case 6:
2047b08b
RM
402 if ((data & ~0xffffffffUL) != 0)
403 return -EIO;
0f534093
RM
404 child->thread.debugreg6 = data;
405 break;
406
407 case 7:
d9771e8c
RM
408 /*
409 * Sanity-check data. Take one half-byte at once with
410 * check = (val >> (16 + 4*i)) & 0xf. It contains the
411 * R/Wi and LENi bits; bits 0 and 1 are R/Wi, and bits
412 * 2 and 3 are LENi. Given a list of invalid values,
413 * we do mask |= 1 << invalid_value, so that
414 * (mask >> check) & 1 is a correct test for invalid
415 * values.
416 *
417 * R/Wi contains the type of the breakpoint /
418 * watchpoint, LENi contains the length of the watched
419 * data in the watchpoint case.
420 *
421 * The invalid values are:
2047b08b 422 * - LENi == 0x10 (undefined), so mask |= 0x0f00. [32-bit]
d9771e8c
RM
423 * - R/Wi == 0x10 (break on I/O reads or writes), so
424 * mask |= 0x4444.
425 * - R/Wi == 0x00 && LENi != 0x00, so we have mask |=
426 * 0x1110.
427 *
428 * Finally, mask = 0x0f00 | 0x4444 | 0x1110 == 0x5f54.
429 *
430 * See the Intel Manual "System Programming Guide",
431 * 15.2.4
432 *
433 * Note that LENi == 0x10 is defined on x86_64 in long
434 * mode (i.e. even for 32-bit userspace software, but
435 * 64-bit kernel), so the x86_64 mask value is 0x5454.
436 * See the AMD manual no. 24593 (AMD64 System Programming)
437 */
2047b08b
RM
438#ifdef CONFIG_X86_32
439#define DR7_MASK 0x5f54
440#else
441#define DR7_MASK 0x5554
442#endif
d9771e8c
RM
443 data &= ~DR_CONTROL_RESERVED;
444 for (i = 0; i < 4; i++)
2047b08b 445 if ((DR7_MASK >> ((data >> (16 + 4*i)) & 0xf)) & 1)
d9771e8c 446 return -EIO;
0f534093 447 child->thread.debugreg7 = data;
d9771e8c
RM
448 if (data)
449 set_tsk_thread_flag(child, TIF_DEBUG);
450 else
451 clear_tsk_thread_flag(child, TIF_DEBUG);
0f534093 452 break;
d9771e8c
RM
453 }
454
d9771e8c
RM
455 return 0;
456}
457
1da177e4
LT
458/*
459 * Called by kernel/ptrace.c when detaching..
460 *
461 * Make sure the single step bit is not set.
462 */
463void ptrace_disable(struct task_struct *child)
9e714bed 464{
7f232343 465 user_disable_single_step(child);
e9c86c78 466#ifdef TIF_SYSCALL_EMU
ab1c23c2 467 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
e9c86c78 468#endif
1da177e4
LT
469}
470
481bed45 471long arch_ptrace(struct task_struct *child, long request, long addr, long data)
1da177e4 472{
1da177e4
LT
473 int i, ret;
474 unsigned long __user *datap = (unsigned long __user *)data;
475
1da177e4
LT
476 switch (request) {
477 /* when I and D space are separate, these will need to be fixed. */
9e714bed 478 case PTRACE_PEEKTEXT: /* read word at location addr. */
76647323
AD
479 case PTRACE_PEEKDATA:
480 ret = generic_ptrace_peekdata(child, addr, data);
1da177e4 481 break;
1da177e4
LT
482
483 /* read the word at location addr in the USER area. */
484 case PTRACE_PEEKUSR: {
485 unsigned long tmp;
486
487 ret = -EIO;
e9c86c78
RM
488 if ((addr & (sizeof(data) - 1)) || addr < 0 ||
489 addr >= sizeof(struct user))
1da177e4
LT
490 break;
491
492 tmp = 0; /* Default return condition */
e9c86c78 493 if (addr < sizeof(struct user_regs_struct))
1da177e4 494 tmp = getreg(child, addr);
e9c86c78
RM
495 else if (addr >= offsetof(struct user, u_debugreg[0]) &&
496 addr <= offsetof(struct user, u_debugreg[7])) {
497 addr -= offsetof(struct user, u_debugreg[0]);
498 tmp = ptrace_get_debugreg(child, addr / sizeof(data));
1da177e4
LT
499 }
500 ret = put_user(tmp, datap);
501 break;
502 }
503
504 /* when I and D space are separate, this will have to be fixed. */
505 case PTRACE_POKETEXT: /* write the word at location addr. */
506 case PTRACE_POKEDATA:
f284ce72 507 ret = generic_ptrace_pokedata(child, addr, data);
1da177e4
LT
508 break;
509
510 case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
511 ret = -EIO;
e9c86c78
RM
512 if ((addr & (sizeof(data) - 1)) || addr < 0 ||
513 addr >= sizeof(struct user))
1da177e4
LT
514 break;
515
e9c86c78 516 if (addr < sizeof(struct user_regs_struct))
1da177e4 517 ret = putreg(child, addr, data);
e9c86c78
RM
518 else if (addr >= offsetof(struct user, u_debugreg[0]) &&
519 addr <= offsetof(struct user, u_debugreg[7])) {
520 addr -= offsetof(struct user, u_debugreg[0]);
521 ret = ptrace_set_debugreg(child,
522 addr / sizeof(data), data);
1da177e4 523 }
e9c86c78 524 break;
1da177e4 525
1da177e4 526 case PTRACE_GETREGS: { /* Get all gp regs from the child. */
e9c86c78 527 if (!access_ok(VERIFY_WRITE, datap, sizeof(struct user_regs_struct))) {
1da177e4
LT
528 ret = -EIO;
529 break;
530 }
e9c86c78 531 for (i = 0; i < sizeof(struct user_regs_struct); i += sizeof(long)) {
1da177e4
LT
532 __put_user(getreg(child, i), datap);
533 datap++;
534 }
535 ret = 0;
536 break;
537 }
538
539 case PTRACE_SETREGS: { /* Set all gp regs in the child. */
540 unsigned long tmp;
e9c86c78 541 if (!access_ok(VERIFY_READ, datap, sizeof(struct user_regs_struct))) {
1da177e4
LT
542 ret = -EIO;
543 break;
544 }
e9c86c78 545 for (i = 0; i < sizeof(struct user_regs_struct); i += sizeof(long)) {
1da177e4
LT
546 __get_user(tmp, datap);
547 putreg(child, i, tmp);
548 datap++;
549 }
550 ret = 0;
551 break;
552 }
553
554 case PTRACE_GETFPREGS: { /* Get the child FPU state. */
555 if (!access_ok(VERIFY_WRITE, datap,
556 sizeof(struct user_i387_struct))) {
557 ret = -EIO;
558 break;
559 }
560 ret = 0;
561 if (!tsk_used_math(child))
562 init_fpu(child);
563 get_fpregs((struct user_i387_struct __user *)data, child);
564 break;
565 }
566
567 case PTRACE_SETFPREGS: { /* Set the child FPU state. */
568 if (!access_ok(VERIFY_READ, datap,
569 sizeof(struct user_i387_struct))) {
570 ret = -EIO;
571 break;
572 }
573 set_stopped_child_used_math(child);
574 set_fpregs(child, (struct user_i387_struct __user *)data);
575 ret = 0;
576 break;
577 }
578
e9c86c78 579#ifdef CONFIG_X86_32
1da177e4
LT
580 case PTRACE_GETFPXREGS: { /* Get the child extended FPU state. */
581 if (!access_ok(VERIFY_WRITE, datap,
582 sizeof(struct user_fxsr_struct))) {
583 ret = -EIO;
584 break;
585 }
586 if (!tsk_used_math(child))
587 init_fpu(child);
588 ret = get_fpxregs((struct user_fxsr_struct __user *)data, child);
589 break;
590 }
591
592 case PTRACE_SETFPXREGS: { /* Set the child extended FPU state. */
593 if (!access_ok(VERIFY_READ, datap,
594 sizeof(struct user_fxsr_struct))) {
595 ret = -EIO;
596 break;
597 }
598 set_stopped_child_used_math(child);
599 ret = set_fpxregs(child, (struct user_fxsr_struct __user *)data);
600 break;
601 }
e9c86c78 602#endif
1da177e4 603
e9c86c78 604#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
1da177e4 605 case PTRACE_GET_THREAD_AREA:
efd1ca52
RM
606 if (addr < 0)
607 return -EIO;
608 ret = do_get_thread_area(child, addr,
609 (struct user_desc __user *) data);
1da177e4
LT
610 break;
611
612 case PTRACE_SET_THREAD_AREA:
efd1ca52
RM
613 if (addr < 0)
614 return -EIO;
615 ret = do_set_thread_area(child, addr,
616 (struct user_desc __user *) data, 0);
1da177e4 617 break;
e9c86c78
RM
618#endif
619
620#ifdef CONFIG_X86_64
621 /* normal 64bit interface to access TLS data.
622 Works just like arch_prctl, except that the arguments
623 are reversed. */
624 case PTRACE_ARCH_PRCTL:
625 ret = do_arch_prctl(child, data, addr);
626 break;
627#endif
1da177e4
LT
628
629 default:
630 ret = ptrace_request(child, request, addr, data);
631 break;
632 }
d9771e8c 633
1da177e4
LT
634 return ret;
635}
636
cb757c41
RM
637#ifdef CONFIG_IA32_EMULATION
638
639#include <asm/user32.h>
640
641#define R32(l,q) \
642 case offsetof(struct user32, regs.l): \
643 regs->q = value; break
644
645#define SEG32(rs) \
646 case offsetof(struct user32, regs.rs): \
647 return set_segment_reg(child, \
648 offsetof(struct user_regs_struct, rs), \
649 value); \
650 break
651
652static int putreg32(struct task_struct *child, unsigned regno, u32 value)
653{
654 struct pt_regs *regs = task_pt_regs(child);
655
656 switch (regno) {
657
658 SEG32(cs);
659 SEG32(ds);
660 SEG32(es);
661 SEG32(fs);
662 SEG32(gs);
663 SEG32(ss);
664
665 R32(ebx, bx);
666 R32(ecx, cx);
667 R32(edx, dx);
668 R32(edi, di);
669 R32(esi, si);
670 R32(ebp, bp);
671 R32(eax, ax);
672 R32(orig_eax, orig_ax);
673 R32(eip, ip);
674 R32(esp, sp);
675
676 case offsetof(struct user32, regs.eflags):
677 return set_flags(child, value);
678
679 case offsetof(struct user32, u_debugreg[0]) ...
680 offsetof(struct user32, u_debugreg[7]):
681 regno -= offsetof(struct user32, u_debugreg[0]);
682 return ptrace_set_debugreg(child, regno / 4, value);
683
684 default:
685 if (regno > sizeof(struct user32) || (regno & 3))
686 return -EIO;
687
688 /*
689 * Other dummy fields in the virtual user structure
690 * are ignored
691 */
692 break;
693 }
694 return 0;
695}
696
697#undef R32
698#undef SEG32
699
700#define R32(l,q) \
701 case offsetof(struct user32, regs.l): \
702 *val = regs->q; break
703
704#define SEG32(rs) \
705 case offsetof(struct user32, regs.rs): \
706 *val = get_segment_reg(child, \
707 offsetof(struct user_regs_struct, rs)); \
708 break
709
710static int getreg32(struct task_struct *child, unsigned regno, u32 *val)
711{
712 struct pt_regs *regs = task_pt_regs(child);
713
714 switch (regno) {
715
716 SEG32(ds);
717 SEG32(es);
718 SEG32(fs);
719 SEG32(gs);
720
721 R32(cs, cs);
722 R32(ss, ss);
723 R32(ebx, bx);
724 R32(ecx, cx);
725 R32(edx, dx);
726 R32(edi, di);
727 R32(esi, si);
728 R32(ebp, bp);
729 R32(eax, ax);
730 R32(orig_eax, orig_ax);
731 R32(eip, ip);
732 R32(esp, sp);
733
734 case offsetof(struct user32, regs.eflags):
735 *val = get_flags(child);
736 break;
737
738 case offsetof(struct user32, u_debugreg[0]) ...
739 offsetof(struct user32, u_debugreg[7]):
740 regno -= offsetof(struct user32, u_debugreg[0]);
741 *val = ptrace_get_debugreg(child, regno / 4);
742 break;
743
744 default:
745 if (regno > sizeof(struct user32) || (regno & 3))
746 return -EIO;
747
748 /*
749 * Other dummy fields in the virtual user structure
750 * are ignored
751 */
752 *val = 0;
753 break;
754 }
755 return 0;
756}
757
758#undef R32
759#undef SEG32
760
761#endif /* CONFIG_IA32_EMULATION */
762
86976cd8
RM
763#ifdef CONFIG_X86_32
764
1da177e4
LT
765void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code)
766{
767 struct siginfo info;
768
769 tsk->thread.trap_no = 1;
770 tsk->thread.error_code = error_code;
771
772 memset(&info, 0, sizeof(info));
773 info.si_signo = SIGTRAP;
774 info.si_code = TRAP_BRKPT;
775
65ea5b03
PA
776 /* User-mode ip? */
777 info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
1da177e4 778
27b46d76 779 /* Send us the fake SIGTRAP */
1da177e4
LT
780 force_sig_info(SIGTRAP, &info, tsk);
781}
782
783/* notification of system call entry/exit
784 * - triggered by current->work.syscall_trace
785 */
786__attribute__((regparm(3)))
ed75e8d5 787int do_syscall_trace(struct pt_regs *regs, int entryexit)
1da177e4 788{
4c7fc722
AA
789 int is_sysemu = test_thread_flag(TIF_SYSCALL_EMU);
790 /*
791 * With TIF_SYSCALL_EMU set we want to ignore TIF_SINGLESTEP for syscall
792 * interception
793 */
1b38f006 794 int is_singlestep = !is_sysemu && test_thread_flag(TIF_SINGLESTEP);
4c7fc722 795 int ret = 0;
1b38f006 796
1da177e4 797 /* do the secure computing check first */
4c7fc722 798 if (!entryexit)
65ea5b03 799 secure_computing(regs->orig_ax);
1da177e4 800
ab1c23c2
BS
801 if (unlikely(current->audit_context)) {
802 if (entryexit)
65ea5b03
PA
803 audit_syscall_exit(AUDITSC_RESULT(regs->ax),
804 regs->ax);
ab1c23c2
BS
805 /* Debug traps, when using PTRACE_SINGLESTEP, must be sent only
806 * on the syscall exit path. Normally, when TIF_SYSCALL_AUDIT is
807 * not used, entry.S will call us only on syscall exit, not
808 * entry; so when TIF_SYSCALL_AUDIT is used we must avoid
809 * calling send_sigtrap() on syscall entry.
810 *
811 * Note that when PTRACE_SYSEMU_SINGLESTEP is used,
812 * is_singlestep is false, despite his name, so we will still do
813 * the correct thing.
814 */
815 else if (is_singlestep)
816 goto out;
817 }
1da177e4
LT
818
819 if (!(current->ptrace & PT_PTRACED))
2fd6f58b 820 goto out;
1da177e4 821
1b38f006
BS
822 /* If a process stops on the 1st tracepoint with SYSCALL_TRACE
823 * and then is resumed with SYSEMU_SINGLESTEP, it will come in
824 * here. We have to check this and return */
825 if (is_sysemu && entryexit)
826 return 0;
ed75e8d5 827
1da177e4 828 /* Fake a debug trap */
c8c86cec 829 if (is_singlestep)
1da177e4
LT
830 send_sigtrap(current, regs, 0);
831
c8c86cec 832 if (!test_thread_flag(TIF_SYSCALL_TRACE) && !is_sysemu)
2fd6f58b 833 goto out;
1da177e4
LT
834
835 /* the 0x80 provides a way for the tracing parent to distinguish
836 between a syscall stop and SIGTRAP delivery */
ed75e8d5 837 /* Note that the debugger could change the result of test_thread_flag!*/
4c7fc722 838 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ? 0x80:0));
1da177e4
LT
839
840 /*
841 * this isn't the same as continuing with a signal, but it will do
842 * for normal use. strace only continues with a signal if the
843 * stopping signal is not SIGTRAP. -brl
844 */
845 if (current->exit_code) {
846 send_sig(current->exit_code, current, 1);
847 current->exit_code = 0;
848 }
ed75e8d5 849 ret = is_sysemu;
4c7fc722 850out:
2fd6f58b 851 if (unlikely(current->audit_context) && !entryexit)
65ea5b03
PA
852 audit_syscall_entry(AUDIT_ARCH_I386, regs->orig_ax,
853 regs->bx, regs->cx, regs->dx, regs->si);
c8c86cec
BS
854 if (ret == 0)
855 return 0;
856
65ea5b03 857 regs->orig_ax = -1; /* force skip of syscall restarting */
c8c86cec 858 if (unlikely(current->audit_context))
65ea5b03 859 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
c8c86cec 860 return 1;
1da177e4 861}
86976cd8
RM
862
863#else /* CONFIG_X86_64 */
864
865static void syscall_trace(struct pt_regs *regs)
866{
867
868#if 0
869 printk("trace %s ip %lx sp %lx ax %d origrax %d caller %lx tiflags %x ptrace %x\n",
870 current->comm,
871 regs->ip, regs->sp, regs->ax, regs->orig_ax, __builtin_return_address(0),
872 current_thread_info()->flags, current->ptrace);
873#endif
874
875 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
876 ? 0x80 : 0));
877 /*
878 * this isn't the same as continuing with a signal, but it will do
879 * for normal use. strace only continues with a signal if the
880 * stopping signal is not SIGTRAP. -brl
881 */
882 if (current->exit_code) {
883 send_sig(current->exit_code, current, 1);
884 current->exit_code = 0;
885 }
886}
887
888asmlinkage void syscall_trace_enter(struct pt_regs *regs)
889{
890 /* do the secure computing check first */
891 secure_computing(regs->orig_ax);
892
893 if (test_thread_flag(TIF_SYSCALL_TRACE)
894 && (current->ptrace & PT_PTRACED))
895 syscall_trace(regs);
896
897 if (unlikely(current->audit_context)) {
898 if (test_thread_flag(TIF_IA32)) {
899 audit_syscall_entry(AUDIT_ARCH_I386,
900 regs->orig_ax,
901 regs->bx, regs->cx,
902 regs->dx, regs->si);
903 } else {
904 audit_syscall_entry(AUDIT_ARCH_X86_64,
905 regs->orig_ax,
906 regs->di, regs->si,
907 regs->dx, regs->r10);
908 }
909 }
910}
911
912asmlinkage void syscall_trace_leave(struct pt_regs *regs)
913{
914 if (unlikely(current->audit_context))
915 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
916
917 if ((test_thread_flag(TIF_SYSCALL_TRACE)
918 || test_thread_flag(TIF_SINGLESTEP))
919 && (current->ptrace & PT_PTRACED))
920 syscall_trace(regs);
921}
922
923#endif /* CONFIG_X86_32 */
This page took 0.378609 seconds and 5 git commands to generate.