Merge master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
[deliverable/linux.git] / arch / ia64 / kernel / kprobes.c
1 /*
2 * Kernel Probes (KProbes)
3 * arch/ia64/kernel/kprobes.c
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 *
19 * Copyright (C) IBM Corporation, 2002, 2004
20 * Copyright (C) Intel Corporation, 2005
21 *
22 * 2005-Apr Rusty Lynch <rusty.lynch@intel.com> and Anil S Keshavamurthy
23 * <anil.s.keshavamurthy@intel.com> adapted from i386
24 */
25
26 #include <linux/kprobes.h>
27 #include <linux/ptrace.h>
28 #include <linux/string.h>
29 #include <linux/slab.h>
30 #include <linux/preempt.h>
31 #include <linux/moduleloader.h>
32 #include <linux/kdebug.h>
33
34 #include <asm/pgtable.h>
35 #include <asm/sections.h>
36 #include <asm/uaccess.h>
37
38 extern void jprobe_inst_return(void);
39
40 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
41 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
42
43 enum instruction_type {A, I, M, F, B, L, X, u};
44 static enum instruction_type bundle_encoding[32][3] = {
45 { M, I, I }, /* 00 */
46 { M, I, I }, /* 01 */
47 { M, I, I }, /* 02 */
48 { M, I, I }, /* 03 */
49 { M, L, X }, /* 04 */
50 { M, L, X }, /* 05 */
51 { u, u, u }, /* 06 */
52 { u, u, u }, /* 07 */
53 { M, M, I }, /* 08 */
54 { M, M, I }, /* 09 */
55 { M, M, I }, /* 0A */
56 { M, M, I }, /* 0B */
57 { M, F, I }, /* 0C */
58 { M, F, I }, /* 0D */
59 { M, M, F }, /* 0E */
60 { M, M, F }, /* 0F */
61 { M, I, B }, /* 10 */
62 { M, I, B }, /* 11 */
63 { M, B, B }, /* 12 */
64 { M, B, B }, /* 13 */
65 { u, u, u }, /* 14 */
66 { u, u, u }, /* 15 */
67 { B, B, B }, /* 16 */
68 { B, B, B }, /* 17 */
69 { M, M, B }, /* 18 */
70 { M, M, B }, /* 19 */
71 { u, u, u }, /* 1A */
72 { u, u, u }, /* 1B */
73 { M, F, B }, /* 1C */
74 { M, F, B }, /* 1D */
75 { u, u, u }, /* 1E */
76 { u, u, u }, /* 1F */
77 };
78
79 /*
80 * In this function we check to see if the instruction
81 * is IP relative instruction and update the kprobe
82 * inst flag accordingly
83 */
84 static void __kprobes update_kprobe_inst_flag(uint template, uint slot,
85 uint major_opcode,
86 unsigned long kprobe_inst,
87 struct kprobe *p)
88 {
89 p->ainsn.inst_flag = 0;
90 p->ainsn.target_br_reg = 0;
91 p->ainsn.slot = slot;
92
93 /* Check for Break instruction
94 * Bits 37:40 Major opcode to be zero
95 * Bits 27:32 X6 to be zero
96 * Bits 32:35 X3 to be zero
97 */
98 if ((!major_opcode) && (!((kprobe_inst >> 27) & 0x1FF)) ) {
99 /* is a break instruction */
100 p->ainsn.inst_flag |= INST_FLAG_BREAK_INST;
101 return;
102 }
103
104 if (bundle_encoding[template][slot] == B) {
105 switch (major_opcode) {
106 case INDIRECT_CALL_OPCODE:
107 p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG;
108 p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7);
109 break;
110 case IP_RELATIVE_PREDICT_OPCODE:
111 case IP_RELATIVE_BRANCH_OPCODE:
112 p->ainsn.inst_flag |= INST_FLAG_FIX_RELATIVE_IP_ADDR;
113 break;
114 case IP_RELATIVE_CALL_OPCODE:
115 p->ainsn.inst_flag |= INST_FLAG_FIX_RELATIVE_IP_ADDR;
116 p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG;
117 p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7);
118 break;
119 }
120 } else if (bundle_encoding[template][slot] == X) {
121 switch (major_opcode) {
122 case LONG_CALL_OPCODE:
123 p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG;
124 p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7);
125 break;
126 }
127 }
128 return;
129 }
130
131 /*
132 * In this function we check to see if the instruction
133 * (qp) cmpx.crel.ctype p1,p2=r2,r3
134 * on which we are inserting kprobe is cmp instruction
135 * with ctype as unc.
136 */
137 static uint __kprobes is_cmp_ctype_unc_inst(uint template, uint slot,
138 uint major_opcode,
139 unsigned long kprobe_inst)
140 {
141 cmp_inst_t cmp_inst;
142 uint ctype_unc = 0;
143
144 if (!((bundle_encoding[template][slot] == I) ||
145 (bundle_encoding[template][slot] == M)))
146 goto out;
147
148 if (!((major_opcode == 0xC) || (major_opcode == 0xD) ||
149 (major_opcode == 0xE)))
150 goto out;
151
152 cmp_inst.l = kprobe_inst;
153 if ((cmp_inst.f.x2 == 0) || (cmp_inst.f.x2 == 1)) {
154 /* Integere compare - Register Register (A6 type)*/
155 if ((cmp_inst.f.tb == 0) && (cmp_inst.f.ta == 0)
156 &&(cmp_inst.f.c == 1))
157 ctype_unc = 1;
158 } else if ((cmp_inst.f.x2 == 2)||(cmp_inst.f.x2 == 3)) {
159 /* Integere compare - Immediate Register (A8 type)*/
160 if ((cmp_inst.f.ta == 0) &&(cmp_inst.f.c == 1))
161 ctype_unc = 1;
162 }
163 out:
164 return ctype_unc;
165 }
166
167 /*
168 * In this function we check to see if the instruction
169 * on which we are inserting kprobe is supported.
170 * Returns qp value if supported
171 * Returns -EINVAL if unsupported
172 */
173 static int __kprobes unsupported_inst(uint template, uint slot,
174 uint major_opcode,
175 unsigned long kprobe_inst,
176 unsigned long addr)
177 {
178 int qp;
179
180 qp = kprobe_inst & 0x3f;
181 if (is_cmp_ctype_unc_inst(template, slot, major_opcode, kprobe_inst)) {
182 if (slot == 1 && qp) {
183 printk(KERN_WARNING "Kprobes on cmp unc"
184 "instruction on slot 1 at <0x%lx>"
185 "is not supported\n", addr);
186 return -EINVAL;
187
188 }
189 qp = 0;
190 }
191 else if (bundle_encoding[template][slot] == I) {
192 if (major_opcode == 0) {
193 /*
194 * Check for Integer speculation instruction
195 * - Bit 33-35 to be equal to 0x1
196 */
197 if (((kprobe_inst >> 33) & 0x7) == 1) {
198 printk(KERN_WARNING
199 "Kprobes on speculation inst at <0x%lx> not supported\n",
200 addr);
201 return -EINVAL;
202 }
203 /*
204 * IP relative mov instruction
205 * - Bit 27-35 to be equal to 0x30
206 */
207 if (((kprobe_inst >> 27) & 0x1FF) == 0x30) {
208 printk(KERN_WARNING
209 "Kprobes on \"mov r1=ip\" at <0x%lx> not supported\n",
210 addr);
211 return -EINVAL;
212
213 }
214 }
215 else if ((major_opcode == 5) && !(kprobe_inst & (0xFUl << 33)) &&
216 (kprobe_inst & (0x1UL << 12))) {
217 /* test bit instructions, tbit,tnat,tf
218 * bit 33-36 to be equal to 0
219 * bit 12 to be equal to 1
220 */
221 if (slot == 1 && qp) {
222 printk(KERN_WARNING "Kprobes on test bit"
223 "instruction on slot at <0x%lx>"
224 "is not supported\n", addr);
225 return -EINVAL;
226 }
227 qp = 0;
228 }
229 }
230 else if (bundle_encoding[template][slot] == B) {
231 if (major_opcode == 7) {
232 /* IP-Relative Predict major code is 7 */
233 printk(KERN_WARNING "Kprobes on IP-Relative"
234 "Predict is not supported\n");
235 return -EINVAL;
236 }
237 else if (major_opcode == 2) {
238 /* Indirect Predict, major code is 2
239 * bit 27-32 to be equal to 10 or 11
240 */
241 int x6=(kprobe_inst >> 27) & 0x3F;
242 if ((x6 == 0x10) || (x6 == 0x11)) {
243 printk(KERN_WARNING "Kprobes on"
244 "Indirect Predict is not supported\n");
245 return -EINVAL;
246 }
247 }
248 }
249 /* kernel does not use float instruction, here for safety kprobe
250 * will judge whether it is fcmp/flass/float approximation instruction
251 */
252 else if (unlikely(bundle_encoding[template][slot] == F)) {
253 if ((major_opcode == 4 || major_opcode == 5) &&
254 (kprobe_inst & (0x1 << 12))) {
255 /* fcmp/fclass unc instruction */
256 if (slot == 1 && qp) {
257 printk(KERN_WARNING "Kprobes on fcmp/fclass "
258 "instruction on slot at <0x%lx> "
259 "is not supported\n", addr);
260 return -EINVAL;
261
262 }
263 qp = 0;
264 }
265 if ((major_opcode == 0 || major_opcode == 1) &&
266 (kprobe_inst & (0x1UL << 33))) {
267 /* float Approximation instruction */
268 if (slot == 1 && qp) {
269 printk(KERN_WARNING "Kprobes on float Approx "
270 "instr at <0x%lx> is not supported\n",
271 addr);
272 return -EINVAL;
273 }
274 qp = 0;
275 }
276 }
277 return qp;
278 }
279
280 /*
281 * In this function we override the bundle with
282 * the break instruction at the given slot.
283 */
284 static void __kprobes prepare_break_inst(uint template, uint slot,
285 uint major_opcode,
286 unsigned long kprobe_inst,
287 struct kprobe *p,
288 int qp)
289 {
290 unsigned long break_inst = BREAK_INST;
291 bundle_t *bundle = &p->opcode.bundle;
292
293 /*
294 * Copy the original kprobe_inst qualifying predicate(qp)
295 * to the break instruction
296 */
297 break_inst |= qp;
298
299 switch (slot) {
300 case 0:
301 bundle->quad0.slot0 = break_inst;
302 break;
303 case 1:
304 bundle->quad0.slot1_p0 = break_inst;
305 bundle->quad1.slot1_p1 = break_inst >> (64-46);
306 break;
307 case 2:
308 bundle->quad1.slot2 = break_inst;
309 break;
310 }
311
312 /*
313 * Update the instruction flag, so that we can
314 * emulate the instruction properly after we
315 * single step on original instruction
316 */
317 update_kprobe_inst_flag(template, slot, major_opcode, kprobe_inst, p);
318 }
319
320 static void __kprobes get_kprobe_inst(bundle_t *bundle, uint slot,
321 unsigned long *kprobe_inst, uint *major_opcode)
322 {
323 unsigned long kprobe_inst_p0, kprobe_inst_p1;
324 unsigned int template;
325
326 template = bundle->quad0.template;
327
328 switch (slot) {
329 case 0:
330 *major_opcode = (bundle->quad0.slot0 >> SLOT0_OPCODE_SHIFT);
331 *kprobe_inst = bundle->quad0.slot0;
332 break;
333 case 1:
334 *major_opcode = (bundle->quad1.slot1_p1 >> SLOT1_p1_OPCODE_SHIFT);
335 kprobe_inst_p0 = bundle->quad0.slot1_p0;
336 kprobe_inst_p1 = bundle->quad1.slot1_p1;
337 *kprobe_inst = kprobe_inst_p0 | (kprobe_inst_p1 << (64-46));
338 break;
339 case 2:
340 *major_opcode = (bundle->quad1.slot2 >> SLOT2_OPCODE_SHIFT);
341 *kprobe_inst = bundle->quad1.slot2;
342 break;
343 }
344 }
345
346 /* Returns non-zero if the addr is in the Interrupt Vector Table */
347 static int __kprobes in_ivt_functions(unsigned long addr)
348 {
349 return (addr >= (unsigned long)__start_ivt_text
350 && addr < (unsigned long)__end_ivt_text);
351 }
352
353 static int __kprobes valid_kprobe_addr(int template, int slot,
354 unsigned long addr)
355 {
356 if ((slot > 2) || ((bundle_encoding[template][1] == L) && slot > 1)) {
357 printk(KERN_WARNING "Attempting to insert unaligned kprobe "
358 "at 0x%lx\n", addr);
359 return -EINVAL;
360 }
361
362 if (in_ivt_functions(addr)) {
363 printk(KERN_WARNING "Kprobes can't be inserted inside "
364 "IVT functions at 0x%lx\n", addr);
365 return -EINVAL;
366 }
367
368 return 0;
369 }
370
371 static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
372 {
373 kcb->prev_kprobe.kp = kprobe_running();
374 kcb->prev_kprobe.status = kcb->kprobe_status;
375 }
376
377 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
378 {
379 __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
380 kcb->kprobe_status = kcb->prev_kprobe.status;
381 }
382
383 static void __kprobes set_current_kprobe(struct kprobe *p,
384 struct kprobe_ctlblk *kcb)
385 {
386 __get_cpu_var(current_kprobe) = p;
387 }
388
389 static void kretprobe_trampoline(void)
390 {
391 }
392
393 /*
394 * At this point the target function has been tricked into
395 * returning into our trampoline. Lookup the associated instance
396 * and then:
397 * - call the handler function
398 * - cleanup by marking the instance as unused
399 * - long jump back to the original return address
400 */
401 int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
402 {
403 struct kretprobe_instance *ri = NULL;
404 struct hlist_head *head, empty_rp;
405 struct hlist_node *node, *tmp;
406 unsigned long flags, orig_ret_address = 0;
407 unsigned long trampoline_address =
408 ((struct fnptr *)kretprobe_trampoline)->ip;
409
410 INIT_HLIST_HEAD(&empty_rp);
411 spin_lock_irqsave(&kretprobe_lock, flags);
412 head = kretprobe_inst_table_head(current);
413
414 /*
415 * It is possible to have multiple instances associated with a given
416 * task either because an multiple functions in the call path
417 * have a return probe installed on them, and/or more then one return
418 * return probe was registered for a target function.
419 *
420 * We can handle this because:
421 * - instances are always inserted at the head of the list
422 * - when multiple return probes are registered for the same
423 * function, the first instance's ret_addr will point to the
424 * real return address, and all the rest will point to
425 * kretprobe_trampoline
426 */
427 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
428 if (ri->task != current)
429 /* another task is sharing our hash bucket */
430 continue;
431
432 if (ri->rp && ri->rp->handler)
433 ri->rp->handler(ri, regs);
434
435 orig_ret_address = (unsigned long)ri->ret_addr;
436 recycle_rp_inst(ri, &empty_rp);
437
438 if (orig_ret_address != trampoline_address)
439 /*
440 * This is the real return address. Any other
441 * instances associated with this task are for
442 * other calls deeper on the call stack
443 */
444 break;
445 }
446
447 kretprobe_assert(ri, orig_ret_address, trampoline_address);
448
449 regs->cr_iip = orig_ret_address;
450
451 reset_current_kprobe();
452 spin_unlock_irqrestore(&kretprobe_lock, flags);
453 preempt_enable_no_resched();
454
455 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
456 hlist_del(&ri->hlist);
457 kfree(ri);
458 }
459 /*
460 * By returning a non-zero value, we are telling
461 * kprobe_handler() that we don't want the post_handler
462 * to run (and have re-enabled preemption)
463 */
464 return 1;
465 }
466
467 /* Called with kretprobe_lock held */
468 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
469 struct pt_regs *regs)
470 {
471 ri->ret_addr = (kprobe_opcode_t *)regs->b0;
472
473 /* Replace the return addr with trampoline addr */
474 regs->b0 = ((struct fnptr *)kretprobe_trampoline)->ip;
475 }
476
477 int __kprobes arch_prepare_kprobe(struct kprobe *p)
478 {
479 unsigned long addr = (unsigned long) p->addr;
480 unsigned long *kprobe_addr = (unsigned long *)(addr & ~0xFULL);
481 unsigned long kprobe_inst=0;
482 unsigned int slot = addr & 0xf, template, major_opcode = 0;
483 bundle_t *bundle;
484 int qp;
485
486 bundle = &((kprobe_opcode_t *)kprobe_addr)->bundle;
487 template = bundle->quad0.template;
488
489 if(valid_kprobe_addr(template, slot, addr))
490 return -EINVAL;
491
492 /* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */
493 if (slot == 1 && bundle_encoding[template][1] == L)
494 slot++;
495
496 /* Get kprobe_inst and major_opcode from the bundle */
497 get_kprobe_inst(bundle, slot, &kprobe_inst, &major_opcode);
498
499 qp = unsupported_inst(template, slot, major_opcode, kprobe_inst, addr);
500 if (qp < 0)
501 return -EINVAL;
502
503 p->ainsn.insn = get_insn_slot();
504 if (!p->ainsn.insn)
505 return -ENOMEM;
506 memcpy(&p->opcode, kprobe_addr, sizeof(kprobe_opcode_t));
507 memcpy(p->ainsn.insn, kprobe_addr, sizeof(kprobe_opcode_t));
508
509 prepare_break_inst(template, slot, major_opcode, kprobe_inst, p, qp);
510
511 return 0;
512 }
513
514 void __kprobes arch_arm_kprobe(struct kprobe *p)
515 {
516 unsigned long arm_addr;
517 bundle_t *src, *dest;
518
519 arm_addr = ((unsigned long)p->addr) & ~0xFUL;
520 dest = &((kprobe_opcode_t *)arm_addr)->bundle;
521 src = &p->opcode.bundle;
522
523 flush_icache_range((unsigned long)p->ainsn.insn,
524 (unsigned long)p->ainsn.insn + sizeof(kprobe_opcode_t));
525 switch (p->ainsn.slot) {
526 case 0:
527 dest->quad0.slot0 = src->quad0.slot0;
528 break;
529 case 1:
530 dest->quad1.slot1_p1 = src->quad1.slot1_p1;
531 break;
532 case 2:
533 dest->quad1.slot2 = src->quad1.slot2;
534 break;
535 }
536 flush_icache_range(arm_addr, arm_addr + sizeof(kprobe_opcode_t));
537 }
538
539 void __kprobes arch_disarm_kprobe(struct kprobe *p)
540 {
541 unsigned long arm_addr;
542 bundle_t *src, *dest;
543
544 arm_addr = ((unsigned long)p->addr) & ~0xFUL;
545 dest = &((kprobe_opcode_t *)arm_addr)->bundle;
546 /* p->ainsn.insn contains the original unaltered kprobe_opcode_t */
547 src = &p->ainsn.insn->bundle;
548 switch (p->ainsn.slot) {
549 case 0:
550 dest->quad0.slot0 = src->quad0.slot0;
551 break;
552 case 1:
553 dest->quad1.slot1_p1 = src->quad1.slot1_p1;
554 break;
555 case 2:
556 dest->quad1.slot2 = src->quad1.slot2;
557 break;
558 }
559 flush_icache_range(arm_addr, arm_addr + sizeof(kprobe_opcode_t));
560 }
561
562 void __kprobes arch_remove_kprobe(struct kprobe *p)
563 {
564 mutex_lock(&kprobe_mutex);
565 free_insn_slot(p->ainsn.insn, 0);
566 mutex_unlock(&kprobe_mutex);
567 }
568 /*
569 * We are resuming execution after a single step fault, so the pt_regs
570 * structure reflects the register state after we executed the instruction
571 * located in the kprobe (p->ainsn.insn.bundle). We still need to adjust
572 * the ip to point back to the original stack address. To set the IP address
573 * to original stack address, handle the case where we need to fixup the
574 * relative IP address and/or fixup branch register.
575 */
576 static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
577 {
578 unsigned long bundle_addr = (unsigned long) (&p->ainsn.insn->bundle);
579 unsigned long resume_addr = (unsigned long)p->addr & ~0xFULL;
580 unsigned long template;
581 int slot = ((unsigned long)p->addr & 0xf);
582
583 template = p->ainsn.insn->bundle.quad0.template;
584
585 if (slot == 1 && bundle_encoding[template][1] == L)
586 slot = 2;
587
588 if (p->ainsn.inst_flag) {
589
590 if (p->ainsn.inst_flag & INST_FLAG_FIX_RELATIVE_IP_ADDR) {
591 /* Fix relative IP address */
592 regs->cr_iip = (regs->cr_iip - bundle_addr) +
593 resume_addr;
594 }
595
596 if (p->ainsn.inst_flag & INST_FLAG_FIX_BRANCH_REG) {
597 /*
598 * Fix target branch register, software convention is
599 * to use either b0 or b6 or b7, so just checking
600 * only those registers
601 */
602 switch (p->ainsn.target_br_reg) {
603 case 0:
604 if ((regs->b0 == bundle_addr) ||
605 (regs->b0 == bundle_addr + 0x10)) {
606 regs->b0 = (regs->b0 - bundle_addr) +
607 resume_addr;
608 }
609 break;
610 case 6:
611 if ((regs->b6 == bundle_addr) ||
612 (regs->b6 == bundle_addr + 0x10)) {
613 regs->b6 = (regs->b6 - bundle_addr) +
614 resume_addr;
615 }
616 break;
617 case 7:
618 if ((regs->b7 == bundle_addr) ||
619 (regs->b7 == bundle_addr + 0x10)) {
620 regs->b7 = (regs->b7 - bundle_addr) +
621 resume_addr;
622 }
623 break;
624 } /* end switch */
625 }
626 goto turn_ss_off;
627 }
628
629 if (slot == 2) {
630 if (regs->cr_iip == bundle_addr + 0x10) {
631 regs->cr_iip = resume_addr + 0x10;
632 }
633 } else {
634 if (regs->cr_iip == bundle_addr) {
635 regs->cr_iip = resume_addr;
636 }
637 }
638
639 turn_ss_off:
640 /* Turn off Single Step bit */
641 ia64_psr(regs)->ss = 0;
642 }
643
644 static void __kprobes prepare_ss(struct kprobe *p, struct pt_regs *regs)
645 {
646 unsigned long bundle_addr = (unsigned long) &p->ainsn.insn->bundle;
647 unsigned long slot = (unsigned long)p->addr & 0xf;
648
649 /* single step inline if break instruction */
650 if (p->ainsn.inst_flag == INST_FLAG_BREAK_INST)
651 regs->cr_iip = (unsigned long)p->addr & ~0xFULL;
652 else
653 regs->cr_iip = bundle_addr & ~0xFULL;
654
655 if (slot > 2)
656 slot = 0;
657
658 ia64_psr(regs)->ri = slot;
659
660 /* turn on single stepping */
661 ia64_psr(regs)->ss = 1;
662 }
663
664 static int __kprobes is_ia64_break_inst(struct pt_regs *regs)
665 {
666 unsigned int slot = ia64_psr(regs)->ri;
667 unsigned int template, major_opcode;
668 unsigned long kprobe_inst;
669 unsigned long *kprobe_addr = (unsigned long *)regs->cr_iip;
670 bundle_t bundle;
671
672 memcpy(&bundle, kprobe_addr, sizeof(bundle_t));
673 template = bundle.quad0.template;
674
675 /* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */
676 if (slot == 1 && bundle_encoding[template][1] == L)
677 slot++;
678
679 /* Get Kprobe probe instruction at given slot*/
680 get_kprobe_inst(&bundle, slot, &kprobe_inst, &major_opcode);
681
682 /* For break instruction,
683 * Bits 37:40 Major opcode to be zero
684 * Bits 27:32 X6 to be zero
685 * Bits 32:35 X3 to be zero
686 */
687 if (major_opcode || ((kprobe_inst >> 27) & 0x1FF) ) {
688 /* Not a break instruction */
689 return 0;
690 }
691
692 /* Is a break instruction */
693 return 1;
694 }
695
696 static int __kprobes pre_kprobes_handler(struct die_args *args)
697 {
698 struct kprobe *p;
699 int ret = 0;
700 struct pt_regs *regs = args->regs;
701 kprobe_opcode_t *addr = (kprobe_opcode_t *)instruction_pointer(regs);
702 struct kprobe_ctlblk *kcb;
703
704 /*
705 * We don't want to be preempted for the entire
706 * duration of kprobe processing
707 */
708 preempt_disable();
709 kcb = get_kprobe_ctlblk();
710
711 /* Handle recursion cases */
712 if (kprobe_running()) {
713 p = get_kprobe(addr);
714 if (p) {
715 if ((kcb->kprobe_status == KPROBE_HIT_SS) &&
716 (p->ainsn.inst_flag == INST_FLAG_BREAK_INST)) {
717 ia64_psr(regs)->ss = 0;
718 goto no_kprobe;
719 }
720 /* We have reentered the pre_kprobe_handler(), since
721 * another probe was hit while within the handler.
722 * We here save the original kprobes variables and
723 * just single step on the instruction of the new probe
724 * without calling any user handlers.
725 */
726 save_previous_kprobe(kcb);
727 set_current_kprobe(p, kcb);
728 kprobes_inc_nmissed_count(p);
729 prepare_ss(p, regs);
730 kcb->kprobe_status = KPROBE_REENTER;
731 return 1;
732 } else if (args->err == __IA64_BREAK_JPROBE) {
733 /*
734 * jprobe instrumented function just completed
735 */
736 p = __get_cpu_var(current_kprobe);
737 if (p->break_handler && p->break_handler(p, regs)) {
738 goto ss_probe;
739 }
740 } else if (!is_ia64_break_inst(regs)) {
741 /* The breakpoint instruction was removed by
742 * another cpu right after we hit, no further
743 * handling of this interrupt is appropriate
744 */
745 ret = 1;
746 goto no_kprobe;
747 } else {
748 /* Not our break */
749 goto no_kprobe;
750 }
751 }
752
753 p = get_kprobe(addr);
754 if (!p) {
755 if (!is_ia64_break_inst(regs)) {
756 /*
757 * The breakpoint instruction was removed right
758 * after we hit it. Another cpu has removed
759 * either a probepoint or a debugger breakpoint
760 * at this address. In either case, no further
761 * handling of this interrupt is appropriate.
762 */
763 ret = 1;
764
765 }
766
767 /* Not one of our break, let kernel handle it */
768 goto no_kprobe;
769 }
770
771 set_current_kprobe(p, kcb);
772 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
773
774 if (p->pre_handler && p->pre_handler(p, regs))
775 /*
776 * Our pre-handler is specifically requesting that we just
777 * do a return. This is used for both the jprobe pre-handler
778 * and the kretprobe trampoline
779 */
780 return 1;
781
782 ss_probe:
783 prepare_ss(p, regs);
784 kcb->kprobe_status = KPROBE_HIT_SS;
785 return 1;
786
787 no_kprobe:
788 preempt_enable_no_resched();
789 return ret;
790 }
791
792 static int __kprobes post_kprobes_handler(struct pt_regs *regs)
793 {
794 struct kprobe *cur = kprobe_running();
795 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
796
797 if (!cur)
798 return 0;
799
800 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
801 kcb->kprobe_status = KPROBE_HIT_SSDONE;
802 cur->post_handler(cur, regs, 0);
803 }
804
805 resume_execution(cur, regs);
806
807 /*Restore back the original saved kprobes variables and continue. */
808 if (kcb->kprobe_status == KPROBE_REENTER) {
809 restore_previous_kprobe(kcb);
810 goto out;
811 }
812 reset_current_kprobe();
813
814 out:
815 preempt_enable_no_resched();
816 return 1;
817 }
818
819 static int __kprobes kprobes_fault_handler(struct pt_regs *regs, int trapnr)
820 {
821 struct kprobe *cur = kprobe_running();
822 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
823
824
825 switch(kcb->kprobe_status) {
826 case KPROBE_HIT_SS:
827 case KPROBE_REENTER:
828 /*
829 * We are here because the instruction being single
830 * stepped caused a page fault. We reset the current
831 * kprobe and the instruction pointer points back to
832 * the probe address and allow the page fault handler
833 * to continue as a normal page fault.
834 */
835 regs->cr_iip = ((unsigned long)cur->addr) & ~0xFULL;
836 ia64_psr(regs)->ri = ((unsigned long)cur->addr) & 0xf;
837 if (kcb->kprobe_status == KPROBE_REENTER)
838 restore_previous_kprobe(kcb);
839 else
840 reset_current_kprobe();
841 preempt_enable_no_resched();
842 break;
843 case KPROBE_HIT_ACTIVE:
844 case KPROBE_HIT_SSDONE:
845 /*
846 * We increment the nmissed count for accounting,
847 * we can also use npre/npostfault count for accouting
848 * these specific fault cases.
849 */
850 kprobes_inc_nmissed_count(cur);
851
852 /*
853 * We come here because instructions in the pre/post
854 * handler caused the page_fault, this could happen
855 * if handler tries to access user space by
856 * copy_from_user(), get_user() etc. Let the
857 * user-specified handler try to fix it first.
858 */
859 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
860 return 1;
861 /*
862 * In case the user-specified fault handler returned
863 * zero, try to fix up.
864 */
865 if (ia64_done_with_exception(regs))
866 return 1;
867
868 /*
869 * Let ia64_do_page_fault() fix it.
870 */
871 break;
872 default:
873 break;
874 }
875
876 return 0;
877 }
878
879 int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
880 unsigned long val, void *data)
881 {
882 struct die_args *args = (struct die_args *)data;
883 int ret = NOTIFY_DONE;
884
885 if (args->regs && user_mode(args->regs))
886 return ret;
887
888 switch(val) {
889 case DIE_BREAK:
890 /* err is break number from ia64_bad_break() */
891 if ((args->err >> 12) == (__IA64_BREAK_KPROBE >> 12)
892 || args->err == __IA64_BREAK_JPROBE
893 || args->err == 0)
894 if (pre_kprobes_handler(args))
895 ret = NOTIFY_STOP;
896 break;
897 case DIE_FAULT:
898 /* err is vector number from ia64_fault() */
899 if (args->err == 36)
900 if (post_kprobes_handler(args->regs))
901 ret = NOTIFY_STOP;
902 break;
903 case DIE_PAGE_FAULT:
904 /* kprobe_running() needs smp_processor_id() */
905 preempt_disable();
906 if (kprobe_running() &&
907 kprobes_fault_handler(args->regs, args->trapnr))
908 ret = NOTIFY_STOP;
909 preempt_enable();
910 default:
911 break;
912 }
913 return ret;
914 }
915
916 struct param_bsp_cfm {
917 unsigned long ip;
918 unsigned long *bsp;
919 unsigned long cfm;
920 };
921
922 static void ia64_get_bsp_cfm(struct unw_frame_info *info, void *arg)
923 {
924 unsigned long ip;
925 struct param_bsp_cfm *lp = arg;
926
927 do {
928 unw_get_ip(info, &ip);
929 if (ip == 0)
930 break;
931 if (ip == lp->ip) {
932 unw_get_bsp(info, (unsigned long*)&lp->bsp);
933 unw_get_cfm(info, (unsigned long*)&lp->cfm);
934 return;
935 }
936 } while (unw_unwind(info) >= 0);
937 lp->bsp = NULL;
938 lp->cfm = 0;
939 return;
940 }
941
942 int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
943 {
944 struct jprobe *jp = container_of(p, struct jprobe, kp);
945 unsigned long addr = ((struct fnptr *)(jp->entry))->ip;
946 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
947 struct param_bsp_cfm pa;
948 int bytes;
949
950 /*
951 * Callee owns the argument space and could overwrite it, eg
952 * tail call optimization. So to be absolutely safe
953 * we save the argument space before transfering the control
954 * to instrumented jprobe function which runs in
955 * the process context
956 */
957 pa.ip = regs->cr_iip;
958 unw_init_running(ia64_get_bsp_cfm, &pa);
959 bytes = (char *)ia64_rse_skip_regs(pa.bsp, pa.cfm & 0x3f)
960 - (char *)pa.bsp;
961 memcpy( kcb->jprobes_saved_stacked_regs,
962 pa.bsp,
963 bytes );
964 kcb->bsp = pa.bsp;
965 kcb->cfm = pa.cfm;
966
967 /* save architectural state */
968 kcb->jprobe_saved_regs = *regs;
969
970 /* after rfi, execute the jprobe instrumented function */
971 regs->cr_iip = addr & ~0xFULL;
972 ia64_psr(regs)->ri = addr & 0xf;
973 regs->r1 = ((struct fnptr *)(jp->entry))->gp;
974
975 /*
976 * fix the return address to our jprobe_inst_return() function
977 * in the jprobes.S file
978 */
979 regs->b0 = ((struct fnptr *)(jprobe_inst_return))->ip;
980
981 return 1;
982 }
983
984 int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
985 {
986 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
987 int bytes;
988
989 /* restoring architectural state */
990 *regs = kcb->jprobe_saved_regs;
991
992 /* restoring the original argument space */
993 flush_register_stack();
994 bytes = (char *)ia64_rse_skip_regs(kcb->bsp, kcb->cfm & 0x3f)
995 - (char *)kcb->bsp;
996 memcpy( kcb->bsp,
997 kcb->jprobes_saved_stacked_regs,
998 bytes );
999 invalidate_stacked_regs();
1000
1001 preempt_enable_no_resched();
1002 return 1;
1003 }
1004
1005 static struct kprobe trampoline_p = {
1006 .pre_handler = trampoline_probe_handler
1007 };
1008
1009 int __init arch_init_kprobes(void)
1010 {
1011 trampoline_p.addr =
1012 (kprobe_opcode_t *)((struct fnptr *)kretprobe_trampoline)->ip;
1013 return register_kprobe(&trampoline_p);
1014 }
1015
1016 int __kprobes arch_trampoline_kprobe(struct kprobe *p)
1017 {
1018 if (p->addr ==
1019 (kprobe_opcode_t *)((struct fnptr *)kretprobe_trampoline)->ip)
1020 return 1;
1021
1022 return 0;
1023 }
This page took 0.051461 seconds and 6 git commands to generate.