Commit | Line | Data |
---|---|---|
40e084a5 RB |
1 | #include <linux/highmem.h> |
2 | #include <linux/kdebug.h> | |
3 | #include <linux/types.h> | |
4 | #include <linux/notifier.h> | |
5 | #include <linux/sched.h> | |
6 | #include <linux/uprobes.h> | |
7 | ||
8 | #include <asm/branch.h> | |
9 | #include <asm/cpu-features.h> | |
10 | #include <asm/ptrace.h> | |
11 | #include <asm/inst.h> | |
12 | ||
13 | static inline int insn_has_delay_slot(const union mips_instruction insn) | |
14 | { | |
15 | switch (insn.i_format.opcode) { | |
16 | /* | |
17 | * jr and jalr are in r_format format. | |
18 | */ | |
19 | case spec_op: | |
20 | switch (insn.r_format.func) { | |
21 | case jalr_op: | |
22 | case jr_op: | |
23 | return 1; | |
24 | } | |
25 | break; | |
26 | ||
27 | /* | |
28 | * This group contains: | |
29 | * bltz_op, bgez_op, bltzl_op, bgezl_op, | |
30 | * bltzal_op, bgezal_op, bltzall_op, bgezall_op. | |
31 | */ | |
32 | case bcond_op: | |
33 | switch (insn.i_format.rt) { | |
34 | case bltz_op: | |
35 | case bltzl_op: | |
36 | case bgez_op: | |
37 | case bgezl_op: | |
38 | case bltzal_op: | |
39 | case bltzall_op: | |
40 | case bgezal_op: | |
41 | case bgezall_op: | |
42 | case bposge32_op: | |
43 | return 1; | |
44 | } | |
45 | break; | |
46 | ||
47 | /* | |
48 | * These are unconditional and in j_format. | |
49 | */ | |
50 | case jal_op: | |
51 | case j_op: | |
52 | case beq_op: | |
53 | case beql_op: | |
54 | case bne_op: | |
55 | case bnel_op: | |
56 | case blez_op: /* not really i_format */ | |
57 | case blezl_op: | |
58 | case bgtz_op: | |
59 | case bgtzl_op: | |
60 | return 1; | |
61 | ||
62 | /* | |
63 | * And now the FPA/cp1 branch instructions. | |
64 | */ | |
65 | case cop1_op: | |
66 | #ifdef CONFIG_CPU_CAVIUM_OCTEON | |
67 | case lwc2_op: /* This is bbit0 on Octeon */ | |
68 | case ldc2_op: /* This is bbit032 on Octeon */ | |
69 | case swc2_op: /* This is bbit1 on Octeon */ | |
70 | case sdc2_op: /* This is bbit132 on Octeon */ | |
71 | #endif | |
72 | return 1; | |
73 | } | |
74 | ||
75 | return 0; | |
76 | } | |
77 | ||
78 | /** | |
79 | * arch_uprobe_analyze_insn - instruction analysis including validity and fixups. | |
80 | * @mm: the probed address space. | |
81 | * @arch_uprobe: the probepoint information. | |
82 | * @addr: virtual address at which to install the probepoint | |
83 | * Return 0 on success or a -ve number on error. | |
84 | */ | |
85 | int arch_uprobe_analyze_insn(struct arch_uprobe *aup, | |
86 | struct mm_struct *mm, unsigned long addr) | |
87 | { | |
88 | union mips_instruction inst; | |
89 | ||
90 | /* | |
91 | * For the time being this also blocks attempts to use uprobes with | |
92 | * MIPS16 and microMIPS. | |
93 | */ | |
94 | if (addr & 0x03) | |
95 | return -EINVAL; | |
96 | ||
97 | inst.word = aup->insn[0]; | |
98 | aup->ixol[0] = aup->insn[insn_has_delay_slot(inst)]; | |
99 | aup->ixol[1] = UPROBE_BRK_UPROBE_XOL; /* NOP */ | |
100 | ||
101 | return 0; | |
102 | } | |
103 | ||
104 | /** | |
105 | * is_trap_insn - check if the instruction is a trap variant | |
106 | * @insn: instruction to be checked. | |
107 | * Returns true if @insn is a trap variant. | |
108 | * | |
109 | * This definition overrides the weak definition in kernel/events/uprobes.c. | |
110 | * and is needed for the case where an architecture has multiple trap | |
111 | * instructions (like PowerPC or MIPS). We treat BREAK just like the more | |
112 | * modern conditional trap instructions. | |
113 | */ | |
114 | bool is_trap_insn(uprobe_opcode_t *insn) | |
115 | { | |
116 | union mips_instruction inst; | |
117 | ||
118 | inst.word = *insn; | |
119 | ||
120 | switch (inst.i_format.opcode) { | |
121 | case spec_op: | |
122 | switch (inst.r_format.func) { | |
123 | case break_op: | |
124 | case teq_op: | |
125 | case tge_op: | |
126 | case tgeu_op: | |
127 | case tlt_op: | |
128 | case tltu_op: | |
129 | case tne_op: | |
130 | return 1; | |
131 | } | |
132 | break; | |
133 | ||
134 | case bcond_op: /* Yes, really ... */ | |
135 | switch (inst.u_format.rt) { | |
136 | case teqi_op: | |
137 | case tgei_op: | |
138 | case tgeiu_op: | |
139 | case tlti_op: | |
140 | case tltiu_op: | |
141 | case tnei_op: | |
142 | return 1; | |
143 | } | |
144 | break; | |
145 | } | |
146 | ||
147 | return 0; | |
148 | } | |
149 | ||
150 | #define UPROBE_TRAP_NR ULONG_MAX | |
151 | ||
152 | /* | |
153 | * arch_uprobe_pre_xol - prepare to execute out of line. | |
154 | * @auprobe: the probepoint information. | |
155 | * @regs: reflects the saved user state of current task. | |
156 | */ | |
157 | int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs) | |
158 | { | |
159 | struct uprobe_task *utask = current->utask; | |
160 | union mips_instruction insn; | |
161 | ||
162 | /* | |
163 | * Now find the EPC where to resume after the breakpoint has been | |
164 | * dealt with. This may require emulation of a branch. | |
165 | */ | |
166 | aup->resume_epc = regs->cp0_epc + 4; | |
167 | if (insn_has_delay_slot((union mips_instruction) aup->insn[0])) { | |
168 | unsigned long epc; | |
169 | ||
170 | epc = regs->cp0_epc; | |
171 | __compute_return_epc_for_insn(regs, insn); | |
172 | aup->resume_epc = regs->cp0_epc; | |
173 | } | |
174 | ||
175 | utask->autask.saved_trap_nr = current->thread.trap_nr; | |
176 | current->thread.trap_nr = UPROBE_TRAP_NR; | |
177 | regs->cp0_epc = current->utask->xol_vaddr; | |
178 | ||
179 | return 0; | |
180 | } | |
181 | ||
182 | int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs) | |
183 | { | |
184 | struct uprobe_task *utask = current->utask; | |
185 | ||
186 | current->thread.trap_nr = utask->autask.saved_trap_nr; | |
187 | regs->cp0_epc = aup->resume_epc; | |
188 | ||
189 | return 0; | |
190 | } | |
191 | ||
192 | /* | |
193 | * If xol insn itself traps and generates a signal(Say, | |
194 | * SIGILL/SIGSEGV/etc), then detect the case where a singlestepped | |
195 | * instruction jumps back to its own address. It is assumed that anything | |
196 | * like do_page_fault/do_trap/etc sets thread.trap_nr != -1. | |
197 | * | |
198 | * arch_uprobe_pre_xol/arch_uprobe_post_xol save/restore thread.trap_nr, | |
199 | * arch_uprobe_xol_was_trapped() simply checks that ->trap_nr is not equal to | |
200 | * UPROBE_TRAP_NR == -1 set by arch_uprobe_pre_xol(). | |
201 | */ | |
202 | bool arch_uprobe_xol_was_trapped(struct task_struct *tsk) | |
203 | { | |
204 | if (tsk->thread.trap_nr != UPROBE_TRAP_NR) | |
205 | return true; | |
206 | ||
207 | return false; | |
208 | } | |
209 | ||
210 | int arch_uprobe_exception_notify(struct notifier_block *self, | |
211 | unsigned long val, void *data) | |
212 | { | |
213 | struct die_args *args = data; | |
214 | struct pt_regs *regs = args->regs; | |
215 | ||
216 | /* regs == NULL is a kernel bug */ | |
217 | if (WARN_ON(!regs)) | |
218 | return NOTIFY_DONE; | |
219 | ||
220 | /* We are only interested in userspace traps */ | |
221 | if (!user_mode(regs)) | |
222 | return NOTIFY_DONE; | |
223 | ||
224 | switch (val) { | |
225 | case DIE_BREAK: | |
226 | if (uprobe_pre_sstep_notifier(regs)) | |
227 | return NOTIFY_STOP; | |
228 | break; | |
229 | case DIE_UPROBE_XOL: | |
230 | if (uprobe_post_sstep_notifier(regs)) | |
231 | return NOTIFY_STOP; | |
232 | default: | |
233 | break; | |
234 | } | |
235 | ||
236 | return 0; | |
237 | } | |
238 | ||
239 | /* | |
240 | * This function gets called when XOL instruction either gets trapped or | |
241 | * the thread has a fatal signal. Reset the instruction pointer to its | |
242 | * probed address for the potential restart or for post mortem analysis. | |
243 | */ | |
244 | void arch_uprobe_abort_xol(struct arch_uprobe *aup, | |
245 | struct pt_regs *regs) | |
246 | { | |
247 | struct uprobe_task *utask = current->utask; | |
248 | ||
249 | instruction_pointer_set(regs, utask->vaddr); | |
250 | } | |
251 | ||
252 | unsigned long arch_uretprobe_hijack_return_addr( | |
253 | unsigned long trampoline_vaddr, struct pt_regs *regs) | |
254 | { | |
255 | unsigned long ra; | |
256 | ||
257 | ra = regs->regs[31]; | |
258 | ||
259 | /* Replace the return address with the trampoline address */ | |
260 | regs->regs[31] = ra; | |
261 | ||
262 | return ra; | |
263 | } | |
264 | ||
265 | /** | |
266 | * set_swbp - store breakpoint at a given address. | |
267 | * @auprobe: arch specific probepoint information. | |
268 | * @mm: the probed process address space. | |
269 | * @vaddr: the virtual address to insert the opcode. | |
270 | * | |
271 | * For mm @mm, store the breakpoint instruction at @vaddr. | |
272 | * Return 0 (success) or a negative errno. | |
273 | * | |
274 | * This version overrides the weak version in kernel/events/uprobes.c. | |
275 | * It is required to handle MIPS16 and microMIPS. | |
276 | */ | |
277 | int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, | |
278 | unsigned long vaddr) | |
279 | { | |
280 | return uprobe_write_opcode(mm, vaddr, UPROBE_SWBP_INSN); | |
281 | } | |
282 | ||
283 | /** | |
284 | * set_orig_insn - Restore the original instruction. | |
285 | * @mm: the probed process address space. | |
286 | * @auprobe: arch specific probepoint information. | |
287 | * @vaddr: the virtual address to insert the opcode. | |
288 | * | |
289 | * For mm @mm, restore the original opcode (opcode) at @vaddr. | |
290 | * Return 0 (success) or a negative errno. | |
291 | * | |
292 | * This overrides the weak version in kernel/events/uprobes.c. | |
293 | */ | |
294 | int set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, | |
295 | unsigned long vaddr) | |
296 | { | |
297 | return uprobe_write_opcode(mm, vaddr, | |
298 | *(uprobe_opcode_t *)&auprobe->orig_inst[0].word); | |
299 | } | |
300 | ||
301 | void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, | |
302 | void *src, unsigned long len) | |
303 | { | |
304 | void *kaddr; | |
305 | ||
306 | /* Initialize the slot */ | |
307 | kaddr = kmap_atomic(page); | |
308 | memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len); | |
309 | kunmap_atomic(kaddr); | |
310 | ||
311 | /* | |
312 | * The MIPS version of flush_icache_range will operate safely on | |
313 | * user space addresses and more importantly, it doesn't require a | |
314 | * VMA argument. | |
315 | */ | |
316 | flush_icache_range(vaddr, vaddr + len); | |
317 | } | |
318 | ||
319 | /** | |
320 | * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs | |
321 | * @regs: Reflects the saved state of the task after it has hit a breakpoint | |
322 | * instruction. | |
323 | * Return the address of the breakpoint instruction. | |
324 | * | |
325 | * This overrides the weak version in kernel/events/uprobes.c. | |
326 | */ | |
327 | unsigned long uprobe_get_swbp_addr(struct pt_regs *regs) | |
328 | { | |
329 | return instruction_pointer(regs); | |
330 | } | |
331 | ||
332 | /* | |
333 | * See if the instruction can be emulated. | |
334 | * Returns true if instruction was emulated, false otherwise. | |
335 | * | |
336 | * For now we always emulate so this function just returns 0. | |
337 | */ | |
338 | bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) | |
339 | { | |
340 | return 0; | |
341 | } |