2 * Kernel Probes (KProbes)
3 * arch/ia64/kernel/kprobes.c
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 * Copyright (C) IBM Corporation, 2002, 2004
20 * Copyright (C) Intel Corporation, 2005
22 * 2005-Apr Rusty Lynch <rusty.lynch@intel.com> and Anil S Keshavamurthy
23 * <anil.s.keshavamurthy@intel.com> adapted from i386
26 #include <linux/kprobes.h>
27 #include <linux/ptrace.h>
28 #include <linux/string.h>
29 #include <linux/slab.h>
30 #include <linux/preempt.h>
31 #include <linux/moduleloader.h>
33 #include <asm/pgtable.h>
34 #include <asm/kdebug.h>
35 #include <asm/sections.h>
36 #include <asm/uaccess.h>
38 extern void jprobe_inst_return(void);
40 DEFINE_PER_CPU(struct kprobe
*, current_kprobe
) = NULL
;
41 DEFINE_PER_CPU(struct kprobe_ctlblk
, kprobe_ctlblk
);
43 enum instruction_type
{A
, I
, M
, F
, B
, L
, X
, u
};
44 static enum instruction_type bundle_encoding
[32][3] = {
80 * In this function we check to see if the instruction
81 * is IP relative instruction and update the kprobe
82 * inst flag accordingly
84 static void __kprobes
update_kprobe_inst_flag(uint
template, uint slot
,
86 unsigned long kprobe_inst
,
89 p
->ainsn
.inst_flag
= 0;
90 p
->ainsn
.target_br_reg
= 0;
92 /* Check for Break instruction
93 * Bits 37:40 Major opcode to be zero
94 * Bits 27:32 X6 to be zero
95 * Bits 32:35 X3 to be zero
97 if ((!major_opcode
) && (!((kprobe_inst
>> 27) & 0x1FF)) ) {
98 /* is a break instruction */
99 p
->ainsn
.inst_flag
|= INST_FLAG_BREAK_INST
;
103 if (bundle_encoding
[template][slot
] == B
) {
104 switch (major_opcode
) {
105 case INDIRECT_CALL_OPCODE
:
106 p
->ainsn
.inst_flag
|= INST_FLAG_FIX_BRANCH_REG
;
107 p
->ainsn
.target_br_reg
= ((kprobe_inst
>> 6) & 0x7);
109 case IP_RELATIVE_PREDICT_OPCODE
:
110 case IP_RELATIVE_BRANCH_OPCODE
:
111 p
->ainsn
.inst_flag
|= INST_FLAG_FIX_RELATIVE_IP_ADDR
;
113 case IP_RELATIVE_CALL_OPCODE
:
114 p
->ainsn
.inst_flag
|= INST_FLAG_FIX_RELATIVE_IP_ADDR
;
115 p
->ainsn
.inst_flag
|= INST_FLAG_FIX_BRANCH_REG
;
116 p
->ainsn
.target_br_reg
= ((kprobe_inst
>> 6) & 0x7);
119 } else if (bundle_encoding
[template][slot
] == X
) {
120 switch (major_opcode
) {
121 case LONG_CALL_OPCODE
:
122 p
->ainsn
.inst_flag
|= INST_FLAG_FIX_BRANCH_REG
;
123 p
->ainsn
.target_br_reg
= ((kprobe_inst
>> 6) & 0x7);
131 * In this function we check to see if the instruction
132 * on which we are inserting kprobe is supported.
133 * Returns 0 if supported
134 * Returns -EINVAL if unsupported
136 static int __kprobes
unsupported_inst(uint
template, uint slot
,
138 unsigned long kprobe_inst
,
141 if (bundle_encoding
[template][slot
] == I
) {
142 switch (major_opcode
) {
143 case 0x0: //I_UNIT_MISC_OPCODE:
145 * Check for Integer speculation instruction
146 * - Bit 33-35 to be equal to 0x1
148 if (((kprobe_inst
>> 33) & 0x7) == 1) {
150 "Kprobes on speculation inst at <0x%lx> not supported\n",
156 * IP relative mov instruction
157 * - Bit 27-35 to be equal to 0x30
159 if (((kprobe_inst
>> 27) & 0x1FF) == 0x30) {
161 "Kprobes on \"mov r1=ip\" at <0x%lx> not supported\n",
173 * In this function we check to see if the instruction
174 * (qp) cmpx.crel.ctype p1,p2=r2,r3
175 * on which we are inserting kprobe is cmp instruction
178 static uint __kprobes
is_cmp_ctype_unc_inst(uint
template, uint slot
,
180 unsigned long kprobe_inst
)
185 if (!((bundle_encoding
[template][slot
] == I
) ||
186 (bundle_encoding
[template][slot
] == M
)))
189 if (!((major_opcode
== 0xC) || (major_opcode
== 0xD) ||
190 (major_opcode
== 0xE)))
193 cmp_inst
.l
= kprobe_inst
;
194 if ((cmp_inst
.f
.x2
== 0) || (cmp_inst
.f
.x2
== 1)) {
195 /* Integere compare - Register Register (A6 type)*/
196 if ((cmp_inst
.f
.tb
== 0) && (cmp_inst
.f
.ta
== 0)
197 &&(cmp_inst
.f
.c
== 1))
199 } else if ((cmp_inst
.f
.x2
== 2)||(cmp_inst
.f
.x2
== 3)) {
200 /* Integere compare - Immediate Register (A8 type)*/
201 if ((cmp_inst
.f
.ta
== 0) &&(cmp_inst
.f
.c
== 1))
209 * In this function we override the bundle with
210 * the break instruction at the given slot.
212 static void __kprobes
prepare_break_inst(uint
template, uint slot
,
214 unsigned long kprobe_inst
,
217 unsigned long break_inst
= BREAK_INST
;
218 bundle_t
*bundle
= &p
->opcode
.bundle
;
221 * Copy the original kprobe_inst qualifying predicate(qp)
222 * to the break instruction iff !is_cmp_ctype_unc_inst
223 * because for cmp instruction with ctype equal to unc,
224 * which is a special instruction always needs to be
225 * executed regradless of qp
227 if (!is_cmp_ctype_unc_inst(template, slot
, major_opcode
, kprobe_inst
))
228 break_inst
|= (0x3f & kprobe_inst
);
232 bundle
->quad0
.slot0
= break_inst
;
235 bundle
->quad0
.slot1_p0
= break_inst
;
236 bundle
->quad1
.slot1_p1
= break_inst
>> (64-46);
239 bundle
->quad1
.slot2
= break_inst
;
244 * Update the instruction flag, so that we can
245 * emulate the instruction properly after we
246 * single step on original instruction
248 update_kprobe_inst_flag(template, slot
, major_opcode
, kprobe_inst
, p
);
251 static void __kprobes
get_kprobe_inst(bundle_t
*bundle
, uint slot
,
252 unsigned long *kprobe_inst
, uint
*major_opcode
)
254 unsigned long kprobe_inst_p0
, kprobe_inst_p1
;
255 unsigned int template;
257 template = bundle
->quad0
.template;
261 *major_opcode
= (bundle
->quad0
.slot0
>> SLOT0_OPCODE_SHIFT
);
262 *kprobe_inst
= bundle
->quad0
.slot0
;
265 *major_opcode
= (bundle
->quad1
.slot1_p1
>> SLOT1_p1_OPCODE_SHIFT
);
266 kprobe_inst_p0
= bundle
->quad0
.slot1_p0
;
267 kprobe_inst_p1
= bundle
->quad1
.slot1_p1
;
268 *kprobe_inst
= kprobe_inst_p0
| (kprobe_inst_p1
<< (64-46));
271 *major_opcode
= (bundle
->quad1
.slot2
>> SLOT2_OPCODE_SHIFT
);
272 *kprobe_inst
= bundle
->quad1
.slot2
;
277 /* Returns non-zero if the addr is in the Interrupt Vector Table */
278 static int __kprobes
in_ivt_functions(unsigned long addr
)
280 return (addr
>= (unsigned long)__start_ivt_text
281 && addr
< (unsigned long)__end_ivt_text
);
284 static int __kprobes
valid_kprobe_addr(int template, int slot
,
287 if ((slot
> 2) || ((bundle_encoding
[template][1] == L
) && slot
> 1)) {
288 printk(KERN_WARNING
"Attempting to insert unaligned kprobe "
293 if (in_ivt_functions(addr
)) {
294 printk(KERN_WARNING
"Kprobes can't be inserted inside "
295 "IVT functions at 0x%lx\n", addr
);
299 if (slot
== 1 && bundle_encoding
[template][1] != L
) {
300 printk(KERN_WARNING
"Inserting kprobes on slot #1 "
301 "is not supported\n");
308 static void __kprobes
save_previous_kprobe(struct kprobe_ctlblk
*kcb
)
310 kcb
->prev_kprobe
.kp
= kprobe_running();
311 kcb
->prev_kprobe
.status
= kcb
->kprobe_status
;
314 static void __kprobes
restore_previous_kprobe(struct kprobe_ctlblk
*kcb
)
316 __get_cpu_var(current_kprobe
) = kcb
->prev_kprobe
.kp
;
317 kcb
->kprobe_status
= kcb
->prev_kprobe
.status
;
320 static void __kprobes
set_current_kprobe(struct kprobe
*p
,
321 struct kprobe_ctlblk
*kcb
)
323 __get_cpu_var(current_kprobe
) = p
;
326 static void kretprobe_trampoline(void)
331 * At this point the target function has been tricked into
332 * returning into our trampoline. Lookup the associated instance
334 * - call the handler function
335 * - cleanup by marking the instance as unused
336 * - long jump back to the original return address
338 int __kprobes
trampoline_probe_handler(struct kprobe
*p
, struct pt_regs
*regs
)
340 struct kretprobe_instance
*ri
= NULL
;
341 struct hlist_head
*head
, empty_rp
;
342 struct hlist_node
*node
, *tmp
;
343 unsigned long flags
, orig_ret_address
= 0;
344 unsigned long trampoline_address
=
345 ((struct fnptr
*)kretprobe_trampoline
)->ip
;
347 INIT_HLIST_HEAD(&empty_rp
);
348 spin_lock_irqsave(&kretprobe_lock
, flags
);
349 head
= kretprobe_inst_table_head(current
);
352 * It is possible to have multiple instances associated with a given
353 * task either because an multiple functions in the call path
354 * have a return probe installed on them, and/or more then one return
355 * return probe was registered for a target function.
357 * We can handle this because:
358 * - instances are always inserted at the head of the list
359 * - when multiple return probes are registered for the same
360 * function, the first instance's ret_addr will point to the
361 * real return address, and all the rest will point to
362 * kretprobe_trampoline
364 hlist_for_each_entry_safe(ri
, node
, tmp
, head
, hlist
) {
365 if (ri
->task
!= current
)
366 /* another task is sharing our hash bucket */
369 if (ri
->rp
&& ri
->rp
->handler
)
370 ri
->rp
->handler(ri
, regs
);
372 orig_ret_address
= (unsigned long)ri
->ret_addr
;
373 recycle_rp_inst(ri
, &empty_rp
);
375 if (orig_ret_address
!= trampoline_address
)
377 * This is the real return address. Any other
378 * instances associated with this task are for
379 * other calls deeper on the call stack
384 BUG_ON(!orig_ret_address
|| (orig_ret_address
== trampoline_address
));
385 regs
->cr_iip
= orig_ret_address
;
387 reset_current_kprobe();
388 spin_unlock_irqrestore(&kretprobe_lock
, flags
);
389 preempt_enable_no_resched();
391 hlist_for_each_entry_safe(ri
, node
, tmp
, &empty_rp
, hlist
) {
392 hlist_del(&ri
->hlist
);
396 * By returning a non-zero value, we are telling
397 * kprobe_handler() that we don't want the post_handler
398 * to run (and have re-enabled preemption)
403 /* Called with kretprobe_lock held */
404 void __kprobes
arch_prepare_kretprobe(struct kretprobe
*rp
,
405 struct pt_regs
*regs
)
407 struct kretprobe_instance
*ri
;
409 if ((ri
= get_free_rp_inst(rp
)) != NULL
) {
412 ri
->ret_addr
= (kprobe_opcode_t
*)regs
->b0
;
414 /* Replace the return addr with trampoline addr */
415 regs
->b0
= ((struct fnptr
*)kretprobe_trampoline
)->ip
;
423 int __kprobes
arch_prepare_kprobe(struct kprobe
*p
)
425 unsigned long addr
= (unsigned long) p
->addr
;
426 unsigned long *kprobe_addr
= (unsigned long *)(addr
& ~0xFULL
);
427 unsigned long kprobe_inst
=0;
428 unsigned int slot
= addr
& 0xf, template, major_opcode
= 0;
431 bundle
= &((kprobe_opcode_t
*)kprobe_addr
)->bundle
;
432 template = bundle
->quad0
.template;
434 if(valid_kprobe_addr(template, slot
, addr
))
437 /* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */
438 if (slot
== 1 && bundle_encoding
[template][1] == L
)
441 /* Get kprobe_inst and major_opcode from the bundle */
442 get_kprobe_inst(bundle
, slot
, &kprobe_inst
, &major_opcode
);
444 if (unsupported_inst(template, slot
, major_opcode
, kprobe_inst
, addr
))
448 p
->ainsn
.insn
= get_insn_slot();
451 memcpy(&p
->opcode
, kprobe_addr
, sizeof(kprobe_opcode_t
));
452 memcpy(p
->ainsn
.insn
, kprobe_addr
, sizeof(kprobe_opcode_t
));
454 prepare_break_inst(template, slot
, major_opcode
, kprobe_inst
, p
);
459 void __kprobes
arch_arm_kprobe(struct kprobe
*p
)
461 unsigned long addr
= (unsigned long)p
->addr
;
462 unsigned long arm_addr
= addr
& ~0xFULL
;
464 flush_icache_range((unsigned long)p
->ainsn
.insn
,
465 (unsigned long)p
->ainsn
.insn
+ sizeof(kprobe_opcode_t
));
466 memcpy((char *)arm_addr
, &p
->opcode
, sizeof(kprobe_opcode_t
));
467 flush_icache_range(arm_addr
, arm_addr
+ sizeof(kprobe_opcode_t
));
470 void __kprobes
arch_disarm_kprobe(struct kprobe
*p
)
472 unsigned long addr
= (unsigned long)p
->addr
;
473 unsigned long arm_addr
= addr
& ~0xFULL
;
475 /* p->ainsn.insn contains the original unaltered kprobe_opcode_t */
476 memcpy((char *) arm_addr
, (char *) p
->ainsn
.insn
,
477 sizeof(kprobe_opcode_t
));
478 flush_icache_range(arm_addr
, arm_addr
+ sizeof(kprobe_opcode_t
));
481 void __kprobes
arch_remove_kprobe(struct kprobe
*p
)
483 mutex_lock(&kprobe_mutex
);
484 free_insn_slot(p
->ainsn
.insn
, 0);
485 mutex_unlock(&kprobe_mutex
);
488 * We are resuming execution after a single step fault, so the pt_regs
489 * structure reflects the register state after we executed the instruction
490 * located in the kprobe (p->ainsn.insn.bundle). We still need to adjust
491 * the ip to point back to the original stack address. To set the IP address
492 * to original stack address, handle the case where we need to fixup the
493 * relative IP address and/or fixup branch register.
495 static void __kprobes
resume_execution(struct kprobe
*p
, struct pt_regs
*regs
)
497 unsigned long bundle_addr
= (unsigned long) (&p
->ainsn
.insn
->bundle
);
498 unsigned long resume_addr
= (unsigned long)p
->addr
& ~0xFULL
;
499 unsigned long template;
500 int slot
= ((unsigned long)p
->addr
& 0xf);
502 template = p
->ainsn
.insn
->bundle
.quad0
.template;
504 if (slot
== 1 && bundle_encoding
[template][1] == L
)
507 if (p
->ainsn
.inst_flag
) {
509 if (p
->ainsn
.inst_flag
& INST_FLAG_FIX_RELATIVE_IP_ADDR
) {
510 /* Fix relative IP address */
511 regs
->cr_iip
= (regs
->cr_iip
- bundle_addr
) +
515 if (p
->ainsn
.inst_flag
& INST_FLAG_FIX_BRANCH_REG
) {
517 * Fix target branch register, software convention is
518 * to use either b0 or b6 or b7, so just checking
519 * only those registers
521 switch (p
->ainsn
.target_br_reg
) {
523 if ((regs
->b0
== bundle_addr
) ||
524 (regs
->b0
== bundle_addr
+ 0x10)) {
525 regs
->b0
= (regs
->b0
- bundle_addr
) +
530 if ((regs
->b6
== bundle_addr
) ||
531 (regs
->b6
== bundle_addr
+ 0x10)) {
532 regs
->b6
= (regs
->b6
- bundle_addr
) +
537 if ((regs
->b7
== bundle_addr
) ||
538 (regs
->b7
== bundle_addr
+ 0x10)) {
539 regs
->b7
= (regs
->b7
- bundle_addr
) +
549 if (regs
->cr_iip
== bundle_addr
+ 0x10) {
550 regs
->cr_iip
= resume_addr
+ 0x10;
553 if (regs
->cr_iip
== bundle_addr
) {
554 regs
->cr_iip
= resume_addr
;
559 /* Turn off Single Step bit */
560 ia64_psr(regs
)->ss
= 0;
563 static void __kprobes
prepare_ss(struct kprobe
*p
, struct pt_regs
*regs
)
565 unsigned long bundle_addr
= (unsigned long) &p
->ainsn
.insn
->bundle
;
566 unsigned long slot
= (unsigned long)p
->addr
& 0xf;
568 /* single step inline if break instruction */
569 if (p
->ainsn
.inst_flag
== INST_FLAG_BREAK_INST
)
570 regs
->cr_iip
= (unsigned long)p
->addr
& ~0xFULL
;
572 regs
->cr_iip
= bundle_addr
& ~0xFULL
;
577 ia64_psr(regs
)->ri
= slot
;
579 /* turn on single stepping */
580 ia64_psr(regs
)->ss
= 1;
583 static int __kprobes
is_ia64_break_inst(struct pt_regs
*regs
)
585 unsigned int slot
= ia64_psr(regs
)->ri
;
586 unsigned int template, major_opcode
;
587 unsigned long kprobe_inst
;
588 unsigned long *kprobe_addr
= (unsigned long *)regs
->cr_iip
;
591 memcpy(&bundle
, kprobe_addr
, sizeof(bundle_t
));
592 template = bundle
.quad0
.template;
594 /* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */
595 if (slot
== 1 && bundle_encoding
[template][1] == L
)
598 /* Get Kprobe probe instruction at given slot*/
599 get_kprobe_inst(&bundle
, slot
, &kprobe_inst
, &major_opcode
);
601 /* For break instruction,
602 * Bits 37:40 Major opcode to be zero
603 * Bits 27:32 X6 to be zero
604 * Bits 32:35 X3 to be zero
606 if (major_opcode
|| ((kprobe_inst
>> 27) & 0x1FF) ) {
607 /* Not a break instruction */
611 /* Is a break instruction */
615 static int __kprobes
pre_kprobes_handler(struct die_args
*args
)
619 struct pt_regs
*regs
= args
->regs
;
620 kprobe_opcode_t
*addr
= (kprobe_opcode_t
*)instruction_pointer(regs
);
621 struct kprobe_ctlblk
*kcb
;
624 * We don't want to be preempted for the entire
625 * duration of kprobe processing
628 kcb
= get_kprobe_ctlblk();
630 /* Handle recursion cases */
631 if (kprobe_running()) {
632 p
= get_kprobe(addr
);
634 if ((kcb
->kprobe_status
== KPROBE_HIT_SS
) &&
635 (p
->ainsn
.inst_flag
== INST_FLAG_BREAK_INST
)) {
636 ia64_psr(regs
)->ss
= 0;
639 /* We have reentered the pre_kprobe_handler(), since
640 * another probe was hit while within the handler.
641 * We here save the original kprobes variables and
642 * just single step on the instruction of the new probe
643 * without calling any user handlers.
645 save_previous_kprobe(kcb
);
646 set_current_kprobe(p
, kcb
);
647 kprobes_inc_nmissed_count(p
);
649 kcb
->kprobe_status
= KPROBE_REENTER
;
651 } else if (args
->err
== __IA64_BREAK_JPROBE
) {
653 * jprobe instrumented function just completed
655 p
= __get_cpu_var(current_kprobe
);
656 if (p
->break_handler
&& p
->break_handler(p
, regs
)) {
659 } else if (!is_ia64_break_inst(regs
)) {
660 /* The breakpoint instruction was removed by
661 * another cpu right after we hit, no further
662 * handling of this interrupt is appropriate
672 p
= get_kprobe(addr
);
674 if (!is_ia64_break_inst(regs
)) {
676 * The breakpoint instruction was removed right
677 * after we hit it. Another cpu has removed
678 * either a probepoint or a debugger breakpoint
679 * at this address. In either case, no further
680 * handling of this interrupt is appropriate.
686 /* Not one of our break, let kernel handle it */
690 set_current_kprobe(p
, kcb
);
691 kcb
->kprobe_status
= KPROBE_HIT_ACTIVE
;
693 if (p
->pre_handler
&& p
->pre_handler(p
, regs
))
695 * Our pre-handler is specifically requesting that we just
696 * do a return. This is used for both the jprobe pre-handler
697 * and the kretprobe trampoline
703 kcb
->kprobe_status
= KPROBE_HIT_SS
;
707 preempt_enable_no_resched();
711 static int __kprobes
post_kprobes_handler(struct pt_regs
*regs
)
713 struct kprobe
*cur
= kprobe_running();
714 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
719 if ((kcb
->kprobe_status
!= KPROBE_REENTER
) && cur
->post_handler
) {
720 kcb
->kprobe_status
= KPROBE_HIT_SSDONE
;
721 cur
->post_handler(cur
, regs
, 0);
724 resume_execution(cur
, regs
);
726 /*Restore back the original saved kprobes variables and continue. */
727 if (kcb
->kprobe_status
== KPROBE_REENTER
) {
728 restore_previous_kprobe(kcb
);
731 reset_current_kprobe();
734 preempt_enable_no_resched();
738 static int __kprobes
kprobes_fault_handler(struct pt_regs
*regs
, int trapnr
)
740 struct kprobe
*cur
= kprobe_running();
741 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
744 switch(kcb
->kprobe_status
) {
748 * We are here because the instruction being single
749 * stepped caused a page fault. We reset the current
750 * kprobe and the instruction pointer points back to
751 * the probe address and allow the page fault handler
752 * to continue as a normal page fault.
754 regs
->cr_iip
= ((unsigned long)cur
->addr
) & ~0xFULL
;
755 ia64_psr(regs
)->ri
= ((unsigned long)cur
->addr
) & 0xf;
756 if (kcb
->kprobe_status
== KPROBE_REENTER
)
757 restore_previous_kprobe(kcb
);
759 reset_current_kprobe();
760 preempt_enable_no_resched();
762 case KPROBE_HIT_ACTIVE
:
763 case KPROBE_HIT_SSDONE
:
765 * We increment the nmissed count for accounting,
766 * we can also use npre/npostfault count for accouting
767 * these specific fault cases.
769 kprobes_inc_nmissed_count(cur
);
772 * We come here because instructions in the pre/post
773 * handler caused the page_fault, this could happen
774 * if handler tries to access user space by
775 * copy_from_user(), get_user() etc. Let the
776 * user-specified handler try to fix it first.
778 if (cur
->fault_handler
&& cur
->fault_handler(cur
, regs
, trapnr
))
781 * In case the user-specified fault handler returned
782 * zero, try to fix up.
784 if (ia64_done_with_exception(regs
))
788 * Let ia64_do_page_fault() fix it.
798 int __kprobes
kprobe_exceptions_notify(struct notifier_block
*self
,
799 unsigned long val
, void *data
)
801 struct die_args
*args
= (struct die_args
*)data
;
802 int ret
= NOTIFY_DONE
;
804 if (args
->regs
&& user_mode(args
->regs
))
809 /* err is break number from ia64_bad_break() */
810 if (args
->err
== 0x80200 || args
->err
== 0x80300 || args
->err
== 0)
811 if (pre_kprobes_handler(args
))
815 /* err is vector number from ia64_fault() */
817 if (post_kprobes_handler(args
->regs
))
821 /* kprobe_running() needs smp_processor_id() */
823 if (kprobe_running() &&
824 kprobes_fault_handler(args
->regs
, args
->trapnr
))
833 struct param_bsp_cfm
{
839 static void ia64_get_bsp_cfm(struct unw_frame_info
*info
, void *arg
)
842 struct param_bsp_cfm
*lp
= arg
;
845 unw_get_ip(info
, &ip
);
849 unw_get_bsp(info
, (unsigned long*)&lp
->bsp
);
850 unw_get_cfm(info
, (unsigned long*)&lp
->cfm
);
853 } while (unw_unwind(info
) >= 0);
859 int __kprobes
setjmp_pre_handler(struct kprobe
*p
, struct pt_regs
*regs
)
861 struct jprobe
*jp
= container_of(p
, struct jprobe
, kp
);
862 unsigned long addr
= ((struct fnptr
*)(jp
->entry
))->ip
;
863 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
864 struct param_bsp_cfm pa
;
868 * Callee owns the argument space and could overwrite it, eg
869 * tail call optimization. So to be absolutely safe
870 * we save the argument space before transfering the control
871 * to instrumented jprobe function which runs in
872 * the process context
874 pa
.ip
= regs
->cr_iip
;
875 unw_init_running(ia64_get_bsp_cfm
, &pa
);
876 bytes
= (char *)ia64_rse_skip_regs(pa
.bsp
, pa
.cfm
& 0x3f)
878 memcpy( kcb
->jprobes_saved_stacked_regs
,
884 /* save architectural state */
885 kcb
->jprobe_saved_regs
= *regs
;
887 /* after rfi, execute the jprobe instrumented function */
888 regs
->cr_iip
= addr
& ~0xFULL
;
889 ia64_psr(regs
)->ri
= addr
& 0xf;
890 regs
->r1
= ((struct fnptr
*)(jp
->entry
))->gp
;
893 * fix the return address to our jprobe_inst_return() function
894 * in the jprobes.S file
896 regs
->b0
= ((struct fnptr
*)(jprobe_inst_return
))->ip
;
901 int __kprobes
longjmp_break_handler(struct kprobe
*p
, struct pt_regs
*regs
)
903 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
906 /* restoring architectural state */
907 *regs
= kcb
->jprobe_saved_regs
;
909 /* restoring the original argument space */
910 flush_register_stack();
911 bytes
= (char *)ia64_rse_skip_regs(kcb
->bsp
, kcb
->cfm
& 0x3f)
914 kcb
->jprobes_saved_stacked_regs
,
916 invalidate_stacked_regs();
918 preempt_enable_no_resched();
922 static struct kprobe trampoline_p
= {
923 .pre_handler
= trampoline_probe_handler
926 int __init
arch_init_kprobes(void)
929 (kprobe_opcode_t
*)((struct fnptr
*)kretprobe_trampoline
)->ip
;
930 return register_kprobe(&trampoline_p
);