6685b0fc1b4401264dfc9670f05121d581693205
[deliverable/linux.git] / arch / x86 / kernel / ftrace.c
1 /*
2 * Code for replacing ftrace calls with jumps.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 *
6 * Thanks goes to Ingo Molnar, for suggesting the idea.
7 * Mathieu Desnoyers, for suggesting postponing the modifications.
8 * Arjan van de Ven, for keeping me straight, and explaining to me
9 * the dangers of modifying code on the run.
10 */
11
12 #include <linux/spinlock.h>
13 #include <linux/hardirq.h>
14 #include <linux/uaccess.h>
15 #include <linux/ftrace.h>
16 #include <linux/percpu.h>
17 #include <linux/init.h>
18 #include <linux/list.h>
19
20 #include <asm/ftrace.h>
21 #include <asm/nops.h>
22
23
24 static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
25
26 union ftrace_code_union {
27 char code[MCOUNT_INSN_SIZE];
28 struct {
29 char e8;
30 int offset;
31 } __attribute__((packed));
32 };
33
34
35 static int ftrace_calc_offset(long ip, long addr)
36 {
37 return (int)(addr - ip);
38 }
39
40 unsigned char *ftrace_nop_replace(void)
41 {
42 return ftrace_nop;
43 }
44
45 unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
46 {
47 static union ftrace_code_union calc;
48
49 calc.e8 = 0xe8;
50 calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr);
51
52 /*
53 * No locking needed, this must be called via kstop_machine
54 * which in essence is like running on a uniprocessor machine.
55 */
56 return calc.code;
57 }
58
59 /*
60 * Modifying code must take extra care. On an SMP machine, if
61 * the code being modified is also being executed on another CPU
62 * that CPU will have undefined results and possibly take a GPF.
63 * We use kstop_machine to stop other CPUS from exectuing code.
64 * But this does not stop NMIs from happening. We still need
65 * to protect against that. We separate out the modification of
66 * the code to take care of this.
67 *
68 * Two buffers are added: An IP buffer and a "code" buffer.
69 *
70 * 1) Put in the instruction pointer into the IP buffer
71 * and the new code into the "code" buffer.
72 * 2) Set a flag that says we are modifying code
73 * 3) Wait for any running NMIs to finish.
74 * 4) Write the code
75 * 5) clear the flag.
76 * 6) Wait for any running NMIs to finish.
77 *
78 * If an NMI is executed, the first thing it does is to call
79 * "ftrace_nmi_enter". This will check if the flag is set to write
80 * and if it is, it will write what is in the IP and "code" buffers.
81 *
82 * The trick is, it does not matter if everyone is writing the same
83 * content to the code location. Also, if a CPU is executing code
84 * it is OK to write to that code location if the contents being written
85 * are the same as what exists.
86 */
87
88 static atomic_t in_nmi;
89 static int mod_code_status;
90 static int mod_code_write;
91 static void *mod_code_ip;
92 static void *mod_code_newcode;
93
94 static int nmi_wait_count;
95 static atomic_t nmi_update_count;
96
97 int ftrace_arch_read_dyn_info(char *buf, int size)
98 {
99 int r;
100
101 r = snprintf(buf, size, "%u %u",
102 nmi_wait_count,
103 atomic_read(&nmi_update_count));
104 return r;
105 }
106
107 static void ftrace_mod_code(void)
108 {
109 /*
110 * Yes, more than one CPU process can be writing to mod_code_status.
111 * (and the code itself)
112 * But if one were to fail, then they all should, and if one were
113 * to succeed, then they all should.
114 */
115 mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode,
116 MCOUNT_INSN_SIZE);
117
118 }
119
120 void ftrace_nmi_enter(void)
121 {
122 atomic_inc(&in_nmi);
123 /* Must have in_nmi seen before reading write flag */
124 smp_mb();
125 if (mod_code_write) {
126 ftrace_mod_code();
127 atomic_inc(&nmi_update_count);
128 }
129 }
130
131 void ftrace_nmi_exit(void)
132 {
133 /* Finish all executions before clearing in_nmi */
134 smp_wmb();
135 atomic_dec(&in_nmi);
136 }
137
138 static void wait_for_nmi(void)
139 {
140 int waited = 0;
141
142 while (atomic_read(&in_nmi)) {
143 waited = 1;
144 cpu_relax();
145 }
146
147 if (waited)
148 nmi_wait_count++;
149 }
150
151 static int
152 do_ftrace_mod_code(unsigned long ip, void *new_code)
153 {
154 mod_code_ip = (void *)ip;
155 mod_code_newcode = new_code;
156
157 /* The buffers need to be visible before we let NMIs write them */
158 smp_wmb();
159
160 mod_code_write = 1;
161
162 /* Make sure write bit is visible before we wait on NMIs */
163 smp_mb();
164
165 wait_for_nmi();
166
167 /* Make sure all running NMIs have finished before we write the code */
168 smp_mb();
169
170 ftrace_mod_code();
171
172 /* Make sure the write happens before clearing the bit */
173 smp_wmb();
174
175 mod_code_write = 0;
176
177 /* make sure NMIs see the cleared bit */
178 smp_mb();
179
180 wait_for_nmi();
181
182 return mod_code_status;
183 }
184
185
186 int
187 ftrace_modify_code(unsigned long ip, unsigned char *old_code,
188 unsigned char *new_code)
189 {
190 unsigned char replaced[MCOUNT_INSN_SIZE];
191
192 /*
193 * Note: Due to modules and __init, code can
194 * disappear and change, we need to protect against faulting
195 * as well as code changing. We do this by using the
196 * probe_kernel_* functions.
197 *
198 * No real locking needed, this code is run through
199 * kstop_machine, or before SMP starts.
200 */
201
202 /* read the text we want to modify */
203 if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
204 return -EFAULT;
205
206 /* Make sure it is what we expect it to be */
207 if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
208 return -EINVAL;
209
210 /* replace the text with the new text */
211 if (do_ftrace_mod_code(ip, new_code))
212 return -EPERM;
213
214 sync_core();
215
216 return 0;
217 }
218
219 int ftrace_update_ftrace_func(ftrace_func_t func)
220 {
221 unsigned long ip = (unsigned long)(&ftrace_call);
222 unsigned char old[MCOUNT_INSN_SIZE], *new;
223 int ret;
224
225 memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
226 new = ftrace_call_replace(ip, (unsigned long)func);
227 ret = ftrace_modify_code(ip, old, new);
228
229 return ret;
230 }
231
232 int __init ftrace_dyn_arch_init(void *data)
233 {
234 extern const unsigned char ftrace_test_p6nop[];
235 extern const unsigned char ftrace_test_nop5[];
236 extern const unsigned char ftrace_test_jmp[];
237 int faulted = 0;
238
239 /*
240 * There is no good nop for all x86 archs.
241 * We will default to using the P6_NOP5, but first we
242 * will test to make sure that the nop will actually
243 * work on this CPU. If it faults, we will then
244 * go to a lesser efficient 5 byte nop. If that fails
245 * we then just use a jmp as our nop. This isn't the most
246 * efficient nop, but we can not use a multi part nop
247 * since we would then risk being preempted in the middle
248 * of that nop, and if we enabled tracing then, it might
249 * cause a system crash.
250 *
251 * TODO: check the cpuid to determine the best nop.
252 */
253 asm volatile (
254 "ftrace_test_jmp:"
255 "jmp ftrace_test_p6nop\n"
256 "nop\n"
257 "nop\n"
258 "nop\n" /* 2 byte jmp + 3 bytes */
259 "ftrace_test_p6nop:"
260 P6_NOP5
261 "jmp 1f\n"
262 "ftrace_test_nop5:"
263 ".byte 0x66,0x66,0x66,0x66,0x90\n"
264 "1:"
265 ".section .fixup, \"ax\"\n"
266 "2: movl $1, %0\n"
267 " jmp ftrace_test_nop5\n"
268 "3: movl $2, %0\n"
269 " jmp 1b\n"
270 ".previous\n"
271 _ASM_EXTABLE(ftrace_test_p6nop, 2b)
272 _ASM_EXTABLE(ftrace_test_nop5, 3b)
273 : "=r"(faulted) : "0" (faulted));
274
275 switch (faulted) {
276 case 0:
277 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
278 memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
279 break;
280 case 1:
281 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
282 memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
283 break;
284 case 2:
285 pr_info("ftrace: converting mcount calls to jmp . + 5\n");
286 memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
287 break;
288 }
289
290 /* The return code is retured via data */
291 *(unsigned long *)data = 0;
292
293 return 0;
294 }
This page took 0.046398 seconds and 5 git commands to generate.