Commit | Line | Data |
---|---|---|
3d083395 SR |
1 | /* |
2 | * Code for replacing ftrace calls with jumps. | |
3 | * | |
4 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> | |
5 | * | |
6 | * Thanks goes to Ingo Molnar, for suggesting the idea. | |
7 | * Mathieu Desnoyers, for suggesting postponing the modifications. | |
8 | * Arjan van de Ven, for keeping me straight, and explaining to me | |
9 | * the dangers of modifying code on the run. | |
10 | */ | |
11 | ||
12 | #include <linux/spinlock.h> | |
13 | #include <linux/hardirq.h> | |
6f93fc07 | 14 | #include <linux/uaccess.h> |
3d083395 SR |
15 | #include <linux/ftrace.h> |
16 | #include <linux/percpu.h> | |
19b3e967 | 17 | #include <linux/sched.h> |
3d083395 SR |
18 | #include <linux/init.h> |
19 | #include <linux/list.h> | |
20 | ||
395a59d0 | 21 | #include <asm/ftrace.h> |
caf4b323 | 22 | #include <linux/ftrace.h> |
732f3ca7 | 23 | #include <asm/nops.h> |
caf4b323 | 24 | #include <asm/nmi.h> |
3d083395 | 25 | |
3d083395 | 26 | |
caf4b323 | 27 | #ifdef CONFIG_DYNAMIC_FTRACE |
3d083395 | 28 | |
3d083395 | 29 | union ftrace_code_union { |
395a59d0 | 30 | char code[MCOUNT_INSN_SIZE]; |
3d083395 SR |
31 | struct { |
32 | char e8; | |
33 | int offset; | |
34 | } __attribute__((packed)); | |
35 | }; | |
36 | ||
15adc048 | 37 | static int ftrace_calc_offset(long ip, long addr) |
3c1720f0 SR |
38 | { |
39 | return (int)(addr - ip); | |
40 | } | |
3d083395 | 41 | |
31e88909 | 42 | static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) |
3c1720f0 SR |
43 | { |
44 | static union ftrace_code_union calc; | |
3d083395 | 45 | |
3c1720f0 | 46 | calc.e8 = 0xe8; |
395a59d0 | 47 | calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr); |
3c1720f0 SR |
48 | |
49 | /* | |
50 | * No locking needed, this must be called via kstop_machine | |
51 | * which in essence is like running on a uniprocessor machine. | |
52 | */ | |
53 | return calc.code; | |
3d083395 SR |
54 | } |
55 | ||
17666f02 SR |
56 | /* |
57 | * Modifying code must take extra care. On an SMP machine, if | |
58 | * the code being modified is also being executed on another CPU | |
59 | * that CPU will have undefined results and possibly take a GPF. | |
60 | * We use kstop_machine to stop other CPUS from exectuing code. | |
61 | * But this does not stop NMIs from happening. We still need | |
62 | * to protect against that. We separate out the modification of | |
63 | * the code to take care of this. | |
64 | * | |
65 | * Two buffers are added: An IP buffer and a "code" buffer. | |
66 | * | |
a26a2a27 | 67 | * 1) Put the instruction pointer into the IP buffer |
17666f02 SR |
68 | * and the new code into the "code" buffer. |
69 | * 2) Set a flag that says we are modifying code | |
70 | * 3) Wait for any running NMIs to finish. | |
71 | * 4) Write the code | |
72 | * 5) clear the flag. | |
73 | * 6) Wait for any running NMIs to finish. | |
74 | * | |
75 | * If an NMI is executed, the first thing it does is to call | |
76 | * "ftrace_nmi_enter". This will check if the flag is set to write | |
77 | * and if it is, it will write what is in the IP and "code" buffers. | |
78 | * | |
79 | * The trick is, it does not matter if everyone is writing the same | |
80 | * content to the code location. Also, if a CPU is executing code | |
81 | * it is OK to write to that code location if the contents being written | |
82 | * are the same as what exists. | |
83 | */ | |
84 | ||
a26a2a27 SR |
85 | static atomic_t in_nmi = ATOMIC_INIT(0); |
86 | static int mod_code_status; /* holds return value of text write */ | |
87 | static int mod_code_write; /* set when NMI should do the write */ | |
88 | static void *mod_code_ip; /* holds the IP to write to */ | |
89 | static void *mod_code_newcode; /* holds the text to write to the IP */ | |
17666f02 | 90 | |
a26a2a27 SR |
91 | static unsigned nmi_wait_count; |
92 | static atomic_t nmi_update_count = ATOMIC_INIT(0); | |
b807c3d0 SR |
93 | |
94 | int ftrace_arch_read_dyn_info(char *buf, int size) | |
95 | { | |
96 | int r; | |
97 | ||
98 | r = snprintf(buf, size, "%u %u", | |
99 | nmi_wait_count, | |
100 | atomic_read(&nmi_update_count)); | |
101 | return r; | |
102 | } | |
103 | ||
17666f02 SR |
104 | static void ftrace_mod_code(void) |
105 | { | |
106 | /* | |
107 | * Yes, more than one CPU process can be writing to mod_code_status. | |
108 | * (and the code itself) | |
109 | * But if one were to fail, then they all should, and if one were | |
110 | * to succeed, then they all should. | |
111 | */ | |
112 | mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode, | |
113 | MCOUNT_INSN_SIZE); | |
114 | ||
115 | } | |
116 | ||
117 | void ftrace_nmi_enter(void) | |
118 | { | |
119 | atomic_inc(&in_nmi); | |
120 | /* Must have in_nmi seen before reading write flag */ | |
121 | smp_mb(); | |
b807c3d0 | 122 | if (mod_code_write) { |
17666f02 | 123 | ftrace_mod_code(); |
b807c3d0 SR |
124 | atomic_inc(&nmi_update_count); |
125 | } | |
17666f02 SR |
126 | } |
127 | ||
128 | void ftrace_nmi_exit(void) | |
129 | { | |
130 | /* Finish all executions before clearing in_nmi */ | |
131 | smp_wmb(); | |
132 | atomic_dec(&in_nmi); | |
133 | } | |
134 | ||
135 | static void wait_for_nmi(void) | |
136 | { | |
b807c3d0 SR |
137 | int waited = 0; |
138 | ||
139 | while (atomic_read(&in_nmi)) { | |
140 | waited = 1; | |
17666f02 | 141 | cpu_relax(); |
b807c3d0 SR |
142 | } |
143 | ||
144 | if (waited) | |
145 | nmi_wait_count++; | |
17666f02 SR |
146 | } |
147 | ||
148 | static int | |
149 | do_ftrace_mod_code(unsigned long ip, void *new_code) | |
150 | { | |
151 | mod_code_ip = (void *)ip; | |
152 | mod_code_newcode = new_code; | |
153 | ||
154 | /* The buffers need to be visible before we let NMIs write them */ | |
155 | smp_wmb(); | |
156 | ||
157 | mod_code_write = 1; | |
158 | ||
159 | /* Make sure write bit is visible before we wait on NMIs */ | |
160 | smp_mb(); | |
161 | ||
162 | wait_for_nmi(); | |
163 | ||
164 | /* Make sure all running NMIs have finished before we write the code */ | |
165 | smp_mb(); | |
166 | ||
167 | ftrace_mod_code(); | |
168 | ||
169 | /* Make sure the write happens before clearing the bit */ | |
170 | smp_wmb(); | |
171 | ||
172 | mod_code_write = 0; | |
173 | ||
174 | /* make sure NMIs see the cleared bit */ | |
175 | smp_mb(); | |
176 | ||
177 | wait_for_nmi(); | |
178 | ||
179 | return mod_code_status; | |
180 | } | |
181 | ||
182 | ||
caf4b323 FW |
183 | |
184 | ||
185 | static unsigned char ftrace_nop[MCOUNT_INSN_SIZE]; | |
186 | ||
31e88909 | 187 | static unsigned char *ftrace_nop_replace(void) |
caf4b323 FW |
188 | { |
189 | return ftrace_nop; | |
190 | } | |
191 | ||
31e88909 | 192 | static int |
3d083395 SR |
193 | ftrace_modify_code(unsigned long ip, unsigned char *old_code, |
194 | unsigned char *new_code) | |
195 | { | |
6f93fc07 | 196 | unsigned char replaced[MCOUNT_INSN_SIZE]; |
3d083395 SR |
197 | |
198 | /* | |
199 | * Note: Due to modules and __init, code can | |
200 | * disappear and change, we need to protect against faulting | |
76aefee5 | 201 | * as well as code changing. We do this by using the |
ab9a0918 | 202 | * probe_kernel_* functions. |
3d083395 SR |
203 | * |
204 | * No real locking needed, this code is run through | |
6f93fc07 | 205 | * kstop_machine, or before SMP starts. |
3d083395 | 206 | */ |
76aefee5 SR |
207 | |
208 | /* read the text we want to modify */ | |
ab9a0918 | 209 | if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE)) |
593eb8a2 | 210 | return -EFAULT; |
6f93fc07 | 211 | |
76aefee5 | 212 | /* Make sure it is what we expect it to be */ |
6f93fc07 | 213 | if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0) |
593eb8a2 | 214 | return -EINVAL; |
3d083395 | 215 | |
76aefee5 | 216 | /* replace the text with the new text */ |
17666f02 | 217 | if (do_ftrace_mod_code(ip, new_code)) |
593eb8a2 | 218 | return -EPERM; |
6f93fc07 SR |
219 | |
220 | sync_core(); | |
3d083395 | 221 | |
6f93fc07 | 222 | return 0; |
3d083395 SR |
223 | } |
224 | ||
31e88909 SR |
225 | int ftrace_make_nop(struct module *mod, |
226 | struct dyn_ftrace *rec, unsigned long addr) | |
227 | { | |
228 | unsigned char *new, *old; | |
229 | unsigned long ip = rec->ip; | |
230 | ||
231 | old = ftrace_call_replace(ip, addr); | |
232 | new = ftrace_nop_replace(); | |
233 | ||
234 | return ftrace_modify_code(rec->ip, old, new); | |
235 | } | |
236 | ||
237 | int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | |
238 | { | |
239 | unsigned char *new, *old; | |
240 | unsigned long ip = rec->ip; | |
241 | ||
242 | old = ftrace_nop_replace(); | |
243 | new = ftrace_call_replace(ip, addr); | |
244 | ||
245 | return ftrace_modify_code(rec->ip, old, new); | |
246 | } | |
247 | ||
15adc048 | 248 | int ftrace_update_ftrace_func(ftrace_func_t func) |
d61f82d0 SR |
249 | { |
250 | unsigned long ip = (unsigned long)(&ftrace_call); | |
395a59d0 | 251 | unsigned char old[MCOUNT_INSN_SIZE], *new; |
d61f82d0 SR |
252 | int ret; |
253 | ||
395a59d0 | 254 | memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE); |
d61f82d0 SR |
255 | new = ftrace_call_replace(ip, (unsigned long)func); |
256 | ret = ftrace_modify_code(ip, old, new); | |
257 | ||
258 | return ret; | |
259 | } | |
260 | ||
d61f82d0 | 261 | int __init ftrace_dyn_arch_init(void *data) |
3d083395 | 262 | { |
732f3ca7 SR |
263 | extern const unsigned char ftrace_test_p6nop[]; |
264 | extern const unsigned char ftrace_test_nop5[]; | |
265 | extern const unsigned char ftrace_test_jmp[]; | |
266 | int faulted = 0; | |
d61f82d0 | 267 | |
732f3ca7 SR |
268 | /* |
269 | * There is no good nop for all x86 archs. | |
270 | * We will default to using the P6_NOP5, but first we | |
271 | * will test to make sure that the nop will actually | |
272 | * work on this CPU. If it faults, we will then | |
273 | * go to a lesser efficient 5 byte nop. If that fails | |
274 | * we then just use a jmp as our nop. This isn't the most | |
275 | * efficient nop, but we can not use a multi part nop | |
276 | * since we would then risk being preempted in the middle | |
277 | * of that nop, and if we enabled tracing then, it might | |
278 | * cause a system crash. | |
279 | * | |
280 | * TODO: check the cpuid to determine the best nop. | |
281 | */ | |
282 | asm volatile ( | |
732f3ca7 SR |
283 | "ftrace_test_jmp:" |
284 | "jmp ftrace_test_p6nop\n" | |
8b27386a AK |
285 | "nop\n" |
286 | "nop\n" | |
287 | "nop\n" /* 2 byte jmp + 3 bytes */ | |
732f3ca7 SR |
288 | "ftrace_test_p6nop:" |
289 | P6_NOP5 | |
290 | "jmp 1f\n" | |
291 | "ftrace_test_nop5:" | |
292 | ".byte 0x66,0x66,0x66,0x66,0x90\n" | |
732f3ca7 SR |
293 | "1:" |
294 | ".section .fixup, \"ax\"\n" | |
295 | "2: movl $1, %0\n" | |
296 | " jmp ftrace_test_nop5\n" | |
297 | "3: movl $2, %0\n" | |
298 | " jmp 1b\n" | |
299 | ".previous\n" | |
300 | _ASM_EXTABLE(ftrace_test_p6nop, 2b) | |
301 | _ASM_EXTABLE(ftrace_test_nop5, 3b) | |
302 | : "=r"(faulted) : "0" (faulted)); | |
303 | ||
304 | switch (faulted) { | |
305 | case 0: | |
306 | pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n"); | |
8115f3f0 | 307 | memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE); |
732f3ca7 SR |
308 | break; |
309 | case 1: | |
310 | pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n"); | |
8115f3f0 | 311 | memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE); |
732f3ca7 SR |
312 | break; |
313 | case 2: | |
8b27386a | 314 | pr_info("ftrace: converting mcount calls to jmp . + 5\n"); |
8115f3f0 | 315 | memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE); |
732f3ca7 SR |
316 | break; |
317 | } | |
318 | ||
319 | /* The return code is retured via data */ | |
320 | *(unsigned long *)data = 0; | |
dfa60aba | 321 | |
3d083395 SR |
322 | return 0; |
323 | } | |
caf4b323 | 324 | #endif |
e7d3737e | 325 | |
fb52607a | 326 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
e7d3737e FW |
327 | |
328 | #ifndef CONFIG_DYNAMIC_FTRACE | |
329 | ||
330 | /* | |
331 | * These functions are picked from those used on | |
332 | * this page for dynamic ftrace. They have been | |
333 | * simplified to ignore all traces in NMI context. | |
334 | */ | |
335 | static atomic_t in_nmi; | |
336 | ||
337 | void ftrace_nmi_enter(void) | |
338 | { | |
339 | atomic_inc(&in_nmi); | |
340 | } | |
341 | ||
342 | void ftrace_nmi_exit(void) | |
343 | { | |
344 | atomic_dec(&in_nmi); | |
345 | } | |
346 | #endif /* !CONFIG_DYNAMIC_FTRACE */ | |
347 | ||
348 | /* Add a function return address to the trace stack on thread info.*/ | |
349 | static int push_return_trace(unsigned long ret, unsigned long long time, | |
287b6e68 | 350 | unsigned long func, int *depth) |
e7d3737e FW |
351 | { |
352 | int index; | |
f201ae23 FW |
353 | |
354 | if (!current->ret_stack) | |
355 | return -EBUSY; | |
e7d3737e FW |
356 | |
357 | /* The return trace stack is full */ | |
f201ae23 FW |
358 | if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) { |
359 | atomic_inc(¤t->trace_overrun); | |
e7d3737e | 360 | return -EBUSY; |
0231022c | 361 | } |
e7d3737e | 362 | |
f201ae23 | 363 | index = ++current->curr_ret_stack; |
e7d3737e | 364 | barrier(); |
f201ae23 FW |
365 | current->ret_stack[index].ret = ret; |
366 | current->ret_stack[index].func = func; | |
367 | current->ret_stack[index].calltime = time; | |
287b6e68 | 368 | *depth = index; |
e7d3737e FW |
369 | |
370 | return 0; | |
371 | } | |
372 | ||
373 | /* Retrieve a function return address to the trace stack on thread info.*/ | |
287b6e68 | 374 | static void pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret) |
e7d3737e FW |
375 | { |
376 | int index; | |
377 | ||
f201ae23 FW |
378 | index = current->curr_ret_stack; |
379 | *ret = current->ret_stack[index].ret; | |
287b6e68 FW |
380 | trace->func = current->ret_stack[index].func; |
381 | trace->calltime = current->ret_stack[index].calltime; | |
382 | trace->overrun = atomic_read(¤t->trace_overrun); | |
383 | trace->depth = index; | |
f201ae23 | 384 | current->curr_ret_stack--; |
e7d3737e FW |
385 | } |
386 | ||
387 | /* | |
388 | * Send the trace to the ring-buffer. | |
389 | * @return the original return address. | |
390 | */ | |
391 | unsigned long ftrace_return_to_handler(void) | |
392 | { | |
fb52607a | 393 | struct ftrace_graph_ret trace; |
287b6e68 FW |
394 | unsigned long ret; |
395 | ||
396 | pop_return_trace(&trace, &ret); | |
e7d3737e | 397 | trace.rettime = cpu_clock(raw_smp_processor_id()); |
287b6e68 | 398 | ftrace_graph_return(&trace); |
e7d3737e | 399 | |
287b6e68 | 400 | return ret; |
e7d3737e FW |
401 | } |
402 | ||
403 | /* | |
404 | * Hook the return address and push it in the stack of return addrs | |
405 | * in current thread info. | |
406 | */ | |
407 | void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) | |
408 | { | |
409 | unsigned long old; | |
410 | unsigned long long calltime; | |
411 | int faulted; | |
287b6e68 | 412 | struct ftrace_graph_ent trace; |
e7d3737e FW |
413 | unsigned long return_hooker = (unsigned long) |
414 | &return_to_handler; | |
415 | ||
416 | /* Nmi's are currently unsupported */ | |
417 | if (atomic_read(&in_nmi)) | |
418 | return; | |
419 | ||
420 | /* | |
421 | * Protect against fault, even if it shouldn't | |
422 | * happen. This tool is too much intrusive to | |
423 | * ignore such a protection. | |
424 | */ | |
425 | asm volatile( | |
426 | "1: movl (%[parent_old]), %[old]\n" | |
427 | "2: movl %[return_hooker], (%[parent_replaced])\n" | |
428 | " movl $0, %[faulted]\n" | |
429 | ||
430 | ".section .fixup, \"ax\"\n" | |
431 | "3: movl $1, %[faulted]\n" | |
432 | ".previous\n" | |
433 | ||
434 | ".section __ex_table, \"a\"\n" | |
435 | " .long 1b, 3b\n" | |
436 | " .long 2b, 3b\n" | |
437 | ".previous\n" | |
438 | ||
439 | : [parent_replaced] "=r" (parent), [old] "=r" (old), | |
440 | [faulted] "=r" (faulted) | |
441 | : [parent_old] "0" (parent), [return_hooker] "r" (return_hooker) | |
442 | : "memory" | |
443 | ); | |
444 | ||
445 | if (WARN_ON(faulted)) { | |
fb52607a | 446 | unregister_ftrace_graph(); |
e7d3737e FW |
447 | return; |
448 | } | |
449 | ||
450 | if (WARN_ON(!__kernel_text_address(old))) { | |
fb52607a | 451 | unregister_ftrace_graph(); |
e7d3737e FW |
452 | *parent = old; |
453 | return; | |
454 | } | |
455 | ||
456 | calltime = cpu_clock(raw_smp_processor_id()); | |
457 | ||
287b6e68 FW |
458 | if (push_return_trace(old, calltime, |
459 | self_addr, &trace.depth) == -EBUSY) { | |
e7d3737e | 460 | *parent = old; |
287b6e68 FW |
461 | return; |
462 | } | |
463 | ||
464 | trace.func = self_addr; | |
465 | ftrace_graph_entry(&trace); | |
466 | ||
e7d3737e FW |
467 | } |
468 | ||
fb52607a | 469 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |