Commit | Line | Data |
---|---|---|
3d083395 SR |
1 | /* |
2 | * Code for replacing ftrace calls with jumps. | |
3 | * | |
4 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> | |
5 | * | |
6 | * Thanks goes to Ingo Molnar, for suggesting the idea. | |
7 | * Mathieu Desnoyers, for suggesting postponing the modifications. | |
8 | * Arjan van de Ven, for keeping me straight, and explaining to me | |
9 | * the dangers of modifying code on the run. | |
10 | */ | |
11 | ||
12 | #include <linux/spinlock.h> | |
13 | #include <linux/hardirq.h> | |
6f93fc07 | 14 | #include <linux/uaccess.h> |
3d083395 SR |
15 | #include <linux/ftrace.h> |
16 | #include <linux/percpu.h> | |
17 | #include <linux/init.h> | |
18 | #include <linux/list.h> | |
19 | ||
395a59d0 | 20 | #include <asm/ftrace.h> |
732f3ca7 | 21 | #include <asm/nops.h> |
3d083395 | 22 | |
3d083395 | 23 | |
8115f3f0 | 24 | static unsigned char ftrace_nop[MCOUNT_INSN_SIZE]; |
3d083395 | 25 | |
3d083395 | 26 | union ftrace_code_union { |
395a59d0 | 27 | char code[MCOUNT_INSN_SIZE]; |
3d083395 SR |
28 | struct { |
29 | char e8; | |
30 | int offset; | |
31 | } __attribute__((packed)); | |
32 | }; | |
33 | ||
395a59d0 | 34 | |
15adc048 | 35 | static int ftrace_calc_offset(long ip, long addr) |
3c1720f0 SR |
36 | { |
37 | return (int)(addr - ip); | |
38 | } | |
3d083395 | 39 | |
15adc048 | 40 | unsigned char *ftrace_nop_replace(void) |
3c1720f0 | 41 | { |
8115f3f0 | 42 | return ftrace_nop; |
3c1720f0 SR |
43 | } |
44 | ||
15adc048 | 45 | unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) |
3c1720f0 SR |
46 | { |
47 | static union ftrace_code_union calc; | |
3d083395 | 48 | |
3c1720f0 | 49 | calc.e8 = 0xe8; |
395a59d0 | 50 | calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr); |
3c1720f0 SR |
51 | |
52 | /* | |
53 | * No locking needed, this must be called via kstop_machine | |
54 | * which in essence is like running on a uniprocessor machine. | |
55 | */ | |
56 | return calc.code; | |
3d083395 SR |
57 | } |
58 | ||
17666f02 SR |
59 | /* |
60 | * Modifying code must take extra care. On an SMP machine, if | |
61 | * the code being modified is also being executed on another CPU | |
62 | * that CPU will have undefined results and possibly take a GPF. | |
63 | * We use kstop_machine to stop other CPUS from exectuing code. | |
64 | * But this does not stop NMIs from happening. We still need | |
65 | * to protect against that. We separate out the modification of | |
66 | * the code to take care of this. | |
67 | * | |
68 | * Two buffers are added: An IP buffer and a "code" buffer. | |
69 | * | |
70 | * 1) Put in the instruction pointer into the IP buffer | |
71 | * and the new code into the "code" buffer. | |
72 | * 2) Set a flag that says we are modifying code | |
73 | * 3) Wait for any running NMIs to finish. | |
74 | * 4) Write the code | |
75 | * 5) clear the flag. | |
76 | * 6) Wait for any running NMIs to finish. | |
77 | * | |
78 | * If an NMI is executed, the first thing it does is to call | |
79 | * "ftrace_nmi_enter". This will check if the flag is set to write | |
80 | * and if it is, it will write what is in the IP and "code" buffers. | |
81 | * | |
82 | * The trick is, it does not matter if everyone is writing the same | |
83 | * content to the code location. Also, if a CPU is executing code | |
84 | * it is OK to write to that code location if the contents being written | |
85 | * are the same as what exists. | |
86 | */ | |
87 | ||
88 | static atomic_t in_nmi; | |
89 | static int mod_code_status; | |
90 | static int mod_code_write; | |
91 | static void *mod_code_ip; | |
92 | static void *mod_code_newcode; | |
93 | ||
94 | static void ftrace_mod_code(void) | |
95 | { | |
96 | /* | |
97 | * Yes, more than one CPU process can be writing to mod_code_status. | |
98 | * (and the code itself) | |
99 | * But if one were to fail, then they all should, and if one were | |
100 | * to succeed, then they all should. | |
101 | */ | |
102 | mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode, | |
103 | MCOUNT_INSN_SIZE); | |
104 | ||
105 | } | |
106 | ||
107 | void ftrace_nmi_enter(void) | |
108 | { | |
109 | atomic_inc(&in_nmi); | |
110 | /* Must have in_nmi seen before reading write flag */ | |
111 | smp_mb(); | |
112 | if (mod_code_write) | |
113 | ftrace_mod_code(); | |
114 | } | |
115 | ||
116 | void ftrace_nmi_exit(void) | |
117 | { | |
118 | /* Finish all executions before clearing in_nmi */ | |
119 | smp_wmb(); | |
120 | atomic_dec(&in_nmi); | |
121 | } | |
122 | ||
123 | static void wait_for_nmi(void) | |
124 | { | |
125 | while (atomic_read(&in_nmi)) | |
126 | cpu_relax(); | |
127 | } | |
128 | ||
129 | static int | |
130 | do_ftrace_mod_code(unsigned long ip, void *new_code) | |
131 | { | |
132 | mod_code_ip = (void *)ip; | |
133 | mod_code_newcode = new_code; | |
134 | ||
135 | /* The buffers need to be visible before we let NMIs write them */ | |
136 | smp_wmb(); | |
137 | ||
138 | mod_code_write = 1; | |
139 | ||
140 | /* Make sure write bit is visible before we wait on NMIs */ | |
141 | smp_mb(); | |
142 | ||
143 | wait_for_nmi(); | |
144 | ||
145 | /* Make sure all running NMIs have finished before we write the code */ | |
146 | smp_mb(); | |
147 | ||
148 | ftrace_mod_code(); | |
149 | ||
150 | /* Make sure the write happens before clearing the bit */ | |
151 | smp_wmb(); | |
152 | ||
153 | mod_code_write = 0; | |
154 | ||
155 | /* make sure NMIs see the cleared bit */ | |
156 | smp_mb(); | |
157 | ||
158 | wait_for_nmi(); | |
159 | ||
160 | return mod_code_status; | |
161 | } | |
162 | ||
163 | ||
15adc048 | 164 | int |
3d083395 SR |
165 | ftrace_modify_code(unsigned long ip, unsigned char *old_code, |
166 | unsigned char *new_code) | |
167 | { | |
6f93fc07 | 168 | unsigned char replaced[MCOUNT_INSN_SIZE]; |
3d083395 SR |
169 | |
170 | /* | |
171 | * Note: Due to modules and __init, code can | |
172 | * disappear and change, we need to protect against faulting | |
76aefee5 | 173 | * as well as code changing. We do this by using the |
ab9a0918 | 174 | * probe_kernel_* functions. |
3d083395 SR |
175 | * |
176 | * No real locking needed, this code is run through | |
6f93fc07 | 177 | * kstop_machine, or before SMP starts. |
3d083395 | 178 | */ |
76aefee5 SR |
179 | |
180 | /* read the text we want to modify */ | |
ab9a0918 | 181 | if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE)) |
593eb8a2 | 182 | return -EFAULT; |
6f93fc07 | 183 | |
76aefee5 | 184 | /* Make sure it is what we expect it to be */ |
6f93fc07 | 185 | if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0) |
593eb8a2 | 186 | return -EINVAL; |
3d083395 | 187 | |
76aefee5 | 188 | /* replace the text with the new text */ |
17666f02 | 189 | if (do_ftrace_mod_code(ip, new_code)) |
593eb8a2 | 190 | return -EPERM; |
6f93fc07 SR |
191 | |
192 | sync_core(); | |
3d083395 | 193 | |
6f93fc07 | 194 | return 0; |
3d083395 SR |
195 | } |
196 | ||
15adc048 | 197 | int ftrace_update_ftrace_func(ftrace_func_t func) |
d61f82d0 SR |
198 | { |
199 | unsigned long ip = (unsigned long)(&ftrace_call); | |
395a59d0 | 200 | unsigned char old[MCOUNT_INSN_SIZE], *new; |
d61f82d0 SR |
201 | int ret; |
202 | ||
395a59d0 | 203 | memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE); |
d61f82d0 SR |
204 | new = ftrace_call_replace(ip, (unsigned long)func); |
205 | ret = ftrace_modify_code(ip, old, new); | |
206 | ||
207 | return ret; | |
208 | } | |
209 | ||
d61f82d0 | 210 | int __init ftrace_dyn_arch_init(void *data) |
3d083395 | 211 | { |
732f3ca7 SR |
212 | extern const unsigned char ftrace_test_p6nop[]; |
213 | extern const unsigned char ftrace_test_nop5[]; | |
214 | extern const unsigned char ftrace_test_jmp[]; | |
215 | int faulted = 0; | |
d61f82d0 | 216 | |
732f3ca7 SR |
217 | /* |
218 | * There is no good nop for all x86 archs. | |
219 | * We will default to using the P6_NOP5, but first we | |
220 | * will test to make sure that the nop will actually | |
221 | * work on this CPU. If it faults, we will then | |
222 | * go to a lesser efficient 5 byte nop. If that fails | |
223 | * we then just use a jmp as our nop. This isn't the most | |
224 | * efficient nop, but we can not use a multi part nop | |
225 | * since we would then risk being preempted in the middle | |
226 | * of that nop, and if we enabled tracing then, it might | |
227 | * cause a system crash. | |
228 | * | |
229 | * TODO: check the cpuid to determine the best nop. | |
230 | */ | |
231 | asm volatile ( | |
732f3ca7 SR |
232 | "ftrace_test_jmp:" |
233 | "jmp ftrace_test_p6nop\n" | |
8b27386a AK |
234 | "nop\n" |
235 | "nop\n" | |
236 | "nop\n" /* 2 byte jmp + 3 bytes */ | |
732f3ca7 SR |
237 | "ftrace_test_p6nop:" |
238 | P6_NOP5 | |
239 | "jmp 1f\n" | |
240 | "ftrace_test_nop5:" | |
241 | ".byte 0x66,0x66,0x66,0x66,0x90\n" | |
732f3ca7 SR |
242 | "1:" |
243 | ".section .fixup, \"ax\"\n" | |
244 | "2: movl $1, %0\n" | |
245 | " jmp ftrace_test_nop5\n" | |
246 | "3: movl $2, %0\n" | |
247 | " jmp 1b\n" | |
248 | ".previous\n" | |
249 | _ASM_EXTABLE(ftrace_test_p6nop, 2b) | |
250 | _ASM_EXTABLE(ftrace_test_nop5, 3b) | |
251 | : "=r"(faulted) : "0" (faulted)); | |
252 | ||
253 | switch (faulted) { | |
254 | case 0: | |
255 | pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n"); | |
8115f3f0 | 256 | memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE); |
732f3ca7 SR |
257 | break; |
258 | case 1: | |
259 | pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n"); | |
8115f3f0 | 260 | memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE); |
732f3ca7 SR |
261 | break; |
262 | case 2: | |
8b27386a | 263 | pr_info("ftrace: converting mcount calls to jmp . + 5\n"); |
8115f3f0 | 264 | memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE); |
732f3ca7 SR |
265 | break; |
266 | } | |
267 | ||
268 | /* The return code is retured via data */ | |
269 | *(unsigned long *)data = 0; | |
dfa60aba | 270 | |
3d083395 SR |
271 | return 0; |
272 | } |