Commit | Line | Data |
---|---|---|
60a11774 SR |
1 | /* Include in trace.c */ |
2 | ||
9cc26a26 | 3 | #include <linux/stringify.h> |
60a11774 | 4 | #include <linux/kthread.h> |
c7aafc54 | 5 | #include <linux/delay.h> |
5a0e3ad6 | 6 | #include <linux/slab.h> |
60a11774 | 7 | |
e309b41d | 8 | static inline int trace_valid_entry(struct trace_entry *entry) |
60a11774 SR |
9 | { |
10 | switch (entry->type) { | |
11 | case TRACE_FN: | |
12 | case TRACE_CTX: | |
57422797 | 13 | case TRACE_WAKE: |
06fa75ab | 14 | case TRACE_STACK: |
dd0e545f | 15 | case TRACE_PRINT: |
80e5ea45 | 16 | case TRACE_BRANCH: |
7447dce9 FW |
17 | case TRACE_GRAPH_ENT: |
18 | case TRACE_GRAPH_RET: | |
60a11774 SR |
19 | return 1; |
20 | } | |
21 | return 0; | |
22 | } | |
23 | ||
3928a8a2 | 24 | static int trace_test_buffer_cpu(struct trace_array *tr, int cpu) |
60a11774 | 25 | { |
3928a8a2 SR |
26 | struct ring_buffer_event *event; |
27 | struct trace_entry *entry; | |
4b3e3d22 | 28 | unsigned int loops = 0; |
60a11774 | 29 | |
66a8cb95 | 30 | while ((event = ring_buffer_consume(tr->buffer, cpu, NULL, NULL))) { |
3928a8a2 | 31 | entry = ring_buffer_event_data(event); |
60a11774 | 32 | |
4b3e3d22 SR |
33 | /* |
34 | * The ring buffer is a size of trace_buf_size, if | |
35 | * we loop more than the size, there's something wrong | |
36 | * with the ring buffer. | |
37 | */ | |
38 | if (loops++ > trace_buf_size) { | |
39 | printk(KERN_CONT ".. bad ring buffer "); | |
40 | goto failed; | |
41 | } | |
3928a8a2 | 42 | if (!trace_valid_entry(entry)) { |
c7aafc54 | 43 | printk(KERN_CONT ".. invalid entry %d ", |
3928a8a2 | 44 | entry->type); |
60a11774 SR |
45 | goto failed; |
46 | } | |
60a11774 | 47 | } |
60a11774 SR |
48 | return 0; |
49 | ||
50 | failed: | |
08bafa0e SR |
51 | /* disable tracing */ |
52 | tracing_disabled = 1; | |
60a11774 SR |
53 | printk(KERN_CONT ".. corrupted trace buffer .. "); |
54 | return -1; | |
55 | } | |
56 | ||
57 | /* | |
58 | * Test the trace buffer to see if all the elements | |
59 | * are still sane. | |
60 | */ | |
61 | static int trace_test_buffer(struct trace_array *tr, unsigned long *count) | |
62 | { | |
30afdcb1 SR |
63 | unsigned long flags, cnt = 0; |
64 | int cpu, ret = 0; | |
60a11774 | 65 | |
30afdcb1 | 66 | /* Don't allow flipping of max traces now */ |
d51ad7ac | 67 | local_irq_save(flags); |
0199c4e6 | 68 | arch_spin_lock(&ftrace_max_lock); |
60a11774 | 69 | |
3928a8a2 | 70 | cnt = ring_buffer_entries(tr->buffer); |
60a11774 | 71 | |
0c5119c1 SR |
72 | /* |
73 | * The trace_test_buffer_cpu runs a while loop to consume all data. | |
74 | * If the calling tracer is broken, and is constantly filling | |
75 | * the buffer, this will run forever, and hard lock the box. | |
76 | * We disable the ring buffer while we do this test to prevent | |
77 | * a hard lock up. | |
78 | */ | |
79 | tracing_off(); | |
3928a8a2 SR |
80 | for_each_possible_cpu(cpu) { |
81 | ret = trace_test_buffer_cpu(tr, cpu); | |
60a11774 SR |
82 | if (ret) |
83 | break; | |
84 | } | |
0c5119c1 | 85 | tracing_on(); |
0199c4e6 | 86 | arch_spin_unlock(&ftrace_max_lock); |
d51ad7ac | 87 | local_irq_restore(flags); |
60a11774 SR |
88 | |
89 | if (count) | |
90 | *count = cnt; | |
91 | ||
92 | return ret; | |
93 | } | |
94 | ||
1c80025a FW |
95 | static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret) |
96 | { | |
97 | printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n", | |
98 | trace->name, init_ret); | |
99 | } | |
606576ce | 100 | #ifdef CONFIG_FUNCTION_TRACER |
77a2b37d SR |
101 | |
102 | #ifdef CONFIG_DYNAMIC_FTRACE | |
103 | ||
95950c2e SR |
104 | static int trace_selftest_test_probe1_cnt; |
105 | static void trace_selftest_test_probe1_func(unsigned long ip, | |
2f5f6ad9 | 106 | unsigned long pip, |
a1e2e31d SR |
107 | struct ftrace_ops *op, |
108 | struct pt_regs *pt_regs) | |
95950c2e SR |
109 | { |
110 | trace_selftest_test_probe1_cnt++; | |
111 | } | |
112 | ||
113 | static int trace_selftest_test_probe2_cnt; | |
114 | static void trace_selftest_test_probe2_func(unsigned long ip, | |
2f5f6ad9 | 115 | unsigned long pip, |
a1e2e31d SR |
116 | struct ftrace_ops *op, |
117 | struct pt_regs *pt_regs) | |
95950c2e SR |
118 | { |
119 | trace_selftest_test_probe2_cnt++; | |
120 | } | |
121 | ||
122 | static int trace_selftest_test_probe3_cnt; | |
123 | static void trace_selftest_test_probe3_func(unsigned long ip, | |
2f5f6ad9 | 124 | unsigned long pip, |
a1e2e31d SR |
125 | struct ftrace_ops *op, |
126 | struct pt_regs *pt_regs) | |
95950c2e SR |
127 | { |
128 | trace_selftest_test_probe3_cnt++; | |
129 | } | |
130 | ||
131 | static int trace_selftest_test_global_cnt; | |
132 | static void trace_selftest_test_global_func(unsigned long ip, | |
2f5f6ad9 | 133 | unsigned long pip, |
a1e2e31d SR |
134 | struct ftrace_ops *op, |
135 | struct pt_regs *pt_regs) | |
95950c2e SR |
136 | { |
137 | trace_selftest_test_global_cnt++; | |
138 | } | |
139 | ||
140 | static int trace_selftest_test_dyn_cnt; | |
141 | static void trace_selftest_test_dyn_func(unsigned long ip, | |
2f5f6ad9 | 142 | unsigned long pip, |
a1e2e31d SR |
143 | struct ftrace_ops *op, |
144 | struct pt_regs *pt_regs) | |
95950c2e SR |
145 | { |
146 | trace_selftest_test_dyn_cnt++; | |
147 | } | |
148 | ||
149 | static struct ftrace_ops test_probe1 = { | |
150 | .func = trace_selftest_test_probe1_func, | |
151 | }; | |
152 | ||
153 | static struct ftrace_ops test_probe2 = { | |
154 | .func = trace_selftest_test_probe2_func, | |
155 | }; | |
156 | ||
157 | static struct ftrace_ops test_probe3 = { | |
158 | .func = trace_selftest_test_probe3_func, | |
159 | }; | |
160 | ||
161 | static struct ftrace_ops test_global = { | |
162 | .func = trace_selftest_test_global_func, | |
163 | .flags = FTRACE_OPS_FL_GLOBAL, | |
164 | }; | |
165 | ||
166 | static void print_counts(void) | |
167 | { | |
168 | printk("(%d %d %d %d %d) ", | |
169 | trace_selftest_test_probe1_cnt, | |
170 | trace_selftest_test_probe2_cnt, | |
171 | trace_selftest_test_probe3_cnt, | |
172 | trace_selftest_test_global_cnt, | |
173 | trace_selftest_test_dyn_cnt); | |
174 | } | |
175 | ||
176 | static void reset_counts(void) | |
177 | { | |
178 | trace_selftest_test_probe1_cnt = 0; | |
179 | trace_selftest_test_probe2_cnt = 0; | |
180 | trace_selftest_test_probe3_cnt = 0; | |
181 | trace_selftest_test_global_cnt = 0; | |
182 | trace_selftest_test_dyn_cnt = 0; | |
183 | } | |
184 | ||
185 | static int trace_selftest_ops(int cnt) | |
186 | { | |
187 | int save_ftrace_enabled = ftrace_enabled; | |
188 | struct ftrace_ops *dyn_ops; | |
189 | char *func1_name; | |
190 | char *func2_name; | |
191 | int len1; | |
192 | int len2; | |
193 | int ret = -1; | |
194 | ||
195 | printk(KERN_CONT "PASSED\n"); | |
196 | pr_info("Testing dynamic ftrace ops #%d: ", cnt); | |
197 | ||
198 | ftrace_enabled = 1; | |
199 | reset_counts(); | |
200 | ||
201 | /* Handle PPC64 '.' name */ | |
202 | func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME); | |
203 | func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2); | |
204 | len1 = strlen(func1_name); | |
205 | len2 = strlen(func2_name); | |
206 | ||
207 | /* | |
208 | * Probe 1 will trace function 1. | |
209 | * Probe 2 will trace function 2. | |
210 | * Probe 3 will trace functions 1 and 2. | |
211 | */ | |
212 | ftrace_set_filter(&test_probe1, func1_name, len1, 1); | |
213 | ftrace_set_filter(&test_probe2, func2_name, len2, 1); | |
214 | ftrace_set_filter(&test_probe3, func1_name, len1, 1); | |
215 | ftrace_set_filter(&test_probe3, func2_name, len2, 0); | |
216 | ||
217 | register_ftrace_function(&test_probe1); | |
218 | register_ftrace_function(&test_probe2); | |
219 | register_ftrace_function(&test_probe3); | |
220 | register_ftrace_function(&test_global); | |
221 | ||
222 | DYN_FTRACE_TEST_NAME(); | |
223 | ||
224 | print_counts(); | |
225 | ||
226 | if (trace_selftest_test_probe1_cnt != 1) | |
227 | goto out; | |
228 | if (trace_selftest_test_probe2_cnt != 0) | |
229 | goto out; | |
230 | if (trace_selftest_test_probe3_cnt != 1) | |
231 | goto out; | |
232 | if (trace_selftest_test_global_cnt == 0) | |
233 | goto out; | |
234 | ||
235 | DYN_FTRACE_TEST_NAME2(); | |
236 | ||
237 | print_counts(); | |
238 | ||
239 | if (trace_selftest_test_probe1_cnt != 1) | |
240 | goto out; | |
241 | if (trace_selftest_test_probe2_cnt != 1) | |
242 | goto out; | |
243 | if (trace_selftest_test_probe3_cnt != 2) | |
244 | goto out; | |
245 | ||
246 | /* Add a dynamic probe */ | |
247 | dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL); | |
248 | if (!dyn_ops) { | |
249 | printk("MEMORY ERROR "); | |
250 | goto out; | |
251 | } | |
252 | ||
253 | dyn_ops->func = trace_selftest_test_dyn_func; | |
254 | ||
255 | register_ftrace_function(dyn_ops); | |
256 | ||
257 | trace_selftest_test_global_cnt = 0; | |
258 | ||
259 | DYN_FTRACE_TEST_NAME(); | |
260 | ||
261 | print_counts(); | |
262 | ||
263 | if (trace_selftest_test_probe1_cnt != 2) | |
264 | goto out_free; | |
265 | if (trace_selftest_test_probe2_cnt != 1) | |
266 | goto out_free; | |
267 | if (trace_selftest_test_probe3_cnt != 3) | |
268 | goto out_free; | |
269 | if (trace_selftest_test_global_cnt == 0) | |
270 | goto out; | |
271 | if (trace_selftest_test_dyn_cnt == 0) | |
272 | goto out_free; | |
273 | ||
274 | DYN_FTRACE_TEST_NAME2(); | |
275 | ||
276 | print_counts(); | |
277 | ||
278 | if (trace_selftest_test_probe1_cnt != 2) | |
279 | goto out_free; | |
280 | if (trace_selftest_test_probe2_cnt != 2) | |
281 | goto out_free; | |
282 | if (trace_selftest_test_probe3_cnt != 4) | |
283 | goto out_free; | |
284 | ||
285 | ret = 0; | |
286 | out_free: | |
287 | unregister_ftrace_function(dyn_ops); | |
288 | kfree(dyn_ops); | |
289 | ||
290 | out: | |
291 | /* Purposely unregister in the same order */ | |
292 | unregister_ftrace_function(&test_probe1); | |
293 | unregister_ftrace_function(&test_probe2); | |
294 | unregister_ftrace_function(&test_probe3); | |
295 | unregister_ftrace_function(&test_global); | |
296 | ||
297 | /* Make sure everything is off */ | |
298 | reset_counts(); | |
299 | DYN_FTRACE_TEST_NAME(); | |
300 | DYN_FTRACE_TEST_NAME(); | |
301 | ||
302 | if (trace_selftest_test_probe1_cnt || | |
303 | trace_selftest_test_probe2_cnt || | |
304 | trace_selftest_test_probe3_cnt || | |
305 | trace_selftest_test_global_cnt || | |
306 | trace_selftest_test_dyn_cnt) | |
307 | ret = -1; | |
308 | ||
309 | ftrace_enabled = save_ftrace_enabled; | |
310 | ||
311 | return ret; | |
312 | } | |
313 | ||
77a2b37d SR |
314 | /* Test dynamic code modification and ftrace filters */ |
315 | int trace_selftest_startup_dynamic_tracing(struct tracer *trace, | |
316 | struct trace_array *tr, | |
317 | int (*func)(void)) | |
318 | { | |
77a2b37d SR |
319 | int save_ftrace_enabled = ftrace_enabled; |
320 | int save_tracer_enabled = tracer_enabled; | |
dd0e545f | 321 | unsigned long count; |
4e491d14 | 322 | char *func_name; |
dd0e545f | 323 | int ret; |
77a2b37d SR |
324 | |
325 | /* The ftrace test PASSED */ | |
326 | printk(KERN_CONT "PASSED\n"); | |
327 | pr_info("Testing dynamic ftrace: "); | |
328 | ||
329 | /* enable tracing, and record the filter function */ | |
330 | ftrace_enabled = 1; | |
331 | tracer_enabled = 1; | |
332 | ||
333 | /* passed in by parameter to fool gcc from optimizing */ | |
334 | func(); | |
335 | ||
4e491d14 | 336 | /* |
73d8b8bc | 337 | * Some archs *cough*PowerPC*cough* add characters to the |
4e491d14 | 338 | * start of the function names. We simply put a '*' to |
73d8b8bc | 339 | * accommodate them. |
4e491d14 | 340 | */ |
9cc26a26 | 341 | func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); |
4e491d14 | 342 | |
77a2b37d | 343 | /* filter only on our function */ |
936e074b | 344 | ftrace_set_global_filter(func_name, strlen(func_name), 1); |
77a2b37d SR |
345 | |
346 | /* enable tracing */ | |
b6f11df2 | 347 | ret = tracer_init(trace, tr); |
1c80025a FW |
348 | if (ret) { |
349 | warn_failed_init_tracer(trace, ret); | |
350 | goto out; | |
351 | } | |
dd0e545f | 352 | |
77a2b37d SR |
353 | /* Sleep for a 1/10 of a second */ |
354 | msleep(100); | |
355 | ||
356 | /* we should have nothing in the buffer */ | |
357 | ret = trace_test_buffer(tr, &count); | |
358 | if (ret) | |
359 | goto out; | |
360 | ||
361 | if (count) { | |
362 | ret = -1; | |
363 | printk(KERN_CONT ".. filter did not filter .. "); | |
364 | goto out; | |
365 | } | |
366 | ||
367 | /* call our function again */ | |
368 | func(); | |
369 | ||
370 | /* sleep again */ | |
371 | msleep(100); | |
372 | ||
373 | /* stop the tracing. */ | |
bbf5b1a0 | 374 | tracing_stop(); |
77a2b37d SR |
375 | ftrace_enabled = 0; |
376 | ||
377 | /* check the trace buffer */ | |
378 | ret = trace_test_buffer(tr, &count); | |
bbf5b1a0 | 379 | tracing_start(); |
77a2b37d SR |
380 | |
381 | /* we should only have one item */ | |
382 | if (!ret && count != 1) { | |
95950c2e | 383 | trace->reset(tr); |
06fa75ab | 384 | printk(KERN_CONT ".. filter failed count=%ld ..", count); |
77a2b37d SR |
385 | ret = -1; |
386 | goto out; | |
387 | } | |
bbf5b1a0 | 388 | |
95950c2e SR |
389 | /* Test the ops with global tracing running */ |
390 | ret = trace_selftest_ops(1); | |
391 | trace->reset(tr); | |
392 | ||
77a2b37d SR |
393 | out: |
394 | ftrace_enabled = save_ftrace_enabled; | |
395 | tracer_enabled = save_tracer_enabled; | |
396 | ||
397 | /* Enable tracing on all functions again */ | |
936e074b | 398 | ftrace_set_global_filter(NULL, 0, 1); |
77a2b37d | 399 | |
95950c2e SR |
400 | /* Test the ops with global tracing off */ |
401 | if (!ret) | |
402 | ret = trace_selftest_ops(2); | |
403 | ||
77a2b37d SR |
404 | return ret; |
405 | } | |
406 | #else | |
407 | # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; }) | |
408 | #endif /* CONFIG_DYNAMIC_FTRACE */ | |
e9a22d1f | 409 | |
60a11774 SR |
410 | /* |
411 | * Simple verification test of ftrace function tracer. | |
412 | * Enable ftrace, sleep 1/10 second, and then read the trace | |
413 | * buffer to see if all is in order. | |
414 | */ | |
415 | int | |
416 | trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) | |
417 | { | |
77a2b37d SR |
418 | int save_ftrace_enabled = ftrace_enabled; |
419 | int save_tracer_enabled = tracer_enabled; | |
dd0e545f SR |
420 | unsigned long count; |
421 | int ret; | |
60a11774 | 422 | |
77a2b37d SR |
423 | /* make sure msleep has been recorded */ |
424 | msleep(1); | |
425 | ||
60a11774 | 426 | /* start the tracing */ |
c7aafc54 | 427 | ftrace_enabled = 1; |
77a2b37d | 428 | tracer_enabled = 1; |
c7aafc54 | 429 | |
b6f11df2 | 430 | ret = tracer_init(trace, tr); |
1c80025a FW |
431 | if (ret) { |
432 | warn_failed_init_tracer(trace, ret); | |
433 | goto out; | |
434 | } | |
435 | ||
60a11774 SR |
436 | /* Sleep for a 1/10 of a second */ |
437 | msleep(100); | |
438 | /* stop the tracing. */ | |
bbf5b1a0 | 439 | tracing_stop(); |
c7aafc54 IM |
440 | ftrace_enabled = 0; |
441 | ||
60a11774 SR |
442 | /* check the trace buffer */ |
443 | ret = trace_test_buffer(tr, &count); | |
444 | trace->reset(tr); | |
bbf5b1a0 | 445 | tracing_start(); |
60a11774 SR |
446 | |
447 | if (!ret && !count) { | |
448 | printk(KERN_CONT ".. no entries found .."); | |
449 | ret = -1; | |
77a2b37d | 450 | goto out; |
60a11774 SR |
451 | } |
452 | ||
77a2b37d SR |
453 | ret = trace_selftest_startup_dynamic_tracing(trace, tr, |
454 | DYN_FTRACE_TEST_NAME); | |
455 | ||
456 | out: | |
457 | ftrace_enabled = save_ftrace_enabled; | |
458 | tracer_enabled = save_tracer_enabled; | |
459 | ||
4eebcc81 SR |
460 | /* kill ftrace totally if we failed */ |
461 | if (ret) | |
462 | ftrace_kill(); | |
463 | ||
60a11774 SR |
464 | return ret; |
465 | } | |
606576ce | 466 | #endif /* CONFIG_FUNCTION_TRACER */ |
60a11774 | 467 | |
7447dce9 FW |
468 | |
469 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | |
cf586b61 FW |
470 | |
471 | /* Maximum number of functions to trace before diagnosing a hang */ | |
472 | #define GRAPH_MAX_FUNC_TEST 100000000 | |
473 | ||
cecbca96 FW |
474 | static void |
475 | __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode); | |
cf586b61 FW |
476 | static unsigned int graph_hang_thresh; |
477 | ||
478 | /* Wrap the real function entry probe to avoid possible hanging */ | |
479 | static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace) | |
480 | { | |
481 | /* This is harmlessly racy, we want to approximately detect a hang */ | |
482 | if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) { | |
483 | ftrace_graph_stop(); | |
484 | printk(KERN_WARNING "BUG: Function graph tracer hang!\n"); | |
485 | if (ftrace_dump_on_oops) | |
cecbca96 | 486 | __ftrace_dump(false, DUMP_ALL); |
cf586b61 FW |
487 | return 0; |
488 | } | |
489 | ||
490 | return trace_graph_entry(trace); | |
491 | } | |
492 | ||
7447dce9 FW |
493 | /* |
494 | * Pretty much the same than for the function tracer from which the selftest | |
495 | * has been borrowed. | |
496 | */ | |
497 | int | |
498 | trace_selftest_startup_function_graph(struct tracer *trace, | |
499 | struct trace_array *tr) | |
500 | { | |
501 | int ret; | |
502 | unsigned long count; | |
503 | ||
cf586b61 FW |
504 | /* |
505 | * Simulate the init() callback but we attach a watchdog callback | |
506 | * to detect and recover from possible hangs | |
507 | */ | |
508 | tracing_reset_online_cpus(tr); | |
1a0799a8 | 509 | set_graph_array(tr); |
cf586b61 FW |
510 | ret = register_ftrace_graph(&trace_graph_return, |
511 | &trace_graph_entry_watchdog); | |
7447dce9 FW |
512 | if (ret) { |
513 | warn_failed_init_tracer(trace, ret); | |
514 | goto out; | |
515 | } | |
cf586b61 | 516 | tracing_start_cmdline_record(); |
7447dce9 FW |
517 | |
518 | /* Sleep for a 1/10 of a second */ | |
519 | msleep(100); | |
520 | ||
cf586b61 FW |
521 | /* Have we just recovered from a hang? */ |
522 | if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) { | |
0cf53ff6 | 523 | tracing_selftest_disabled = true; |
cf586b61 FW |
524 | ret = -1; |
525 | goto out; | |
526 | } | |
527 | ||
7447dce9 FW |
528 | tracing_stop(); |
529 | ||
530 | /* check the trace buffer */ | |
531 | ret = trace_test_buffer(tr, &count); | |
532 | ||
533 | trace->reset(tr); | |
534 | tracing_start(); | |
535 | ||
536 | if (!ret && !count) { | |
537 | printk(KERN_CONT ".. no entries found .."); | |
538 | ret = -1; | |
539 | goto out; | |
540 | } | |
541 | ||
542 | /* Don't test dynamic tracing, the function tracer already did */ | |
543 | ||
544 | out: | |
545 | /* Stop it if we failed */ | |
546 | if (ret) | |
547 | ftrace_graph_stop(); | |
548 | ||
549 | return ret; | |
550 | } | |
551 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | |
552 | ||
553 | ||
60a11774 SR |
554 | #ifdef CONFIG_IRQSOFF_TRACER |
555 | int | |
556 | trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) | |
557 | { | |
558 | unsigned long save_max = tracing_max_latency; | |
559 | unsigned long count; | |
560 | int ret; | |
561 | ||
562 | /* start the tracing */ | |
b6f11df2 | 563 | ret = tracer_init(trace, tr); |
1c80025a FW |
564 | if (ret) { |
565 | warn_failed_init_tracer(trace, ret); | |
566 | return ret; | |
567 | } | |
568 | ||
60a11774 SR |
569 | /* reset the max latency */ |
570 | tracing_max_latency = 0; | |
571 | /* disable interrupts for a bit */ | |
572 | local_irq_disable(); | |
573 | udelay(100); | |
574 | local_irq_enable(); | |
49036200 FW |
575 | |
576 | /* | |
577 | * Stop the tracer to avoid a warning subsequent | |
578 | * to buffer flipping failure because tracing_stop() | |
579 | * disables the tr and max buffers, making flipping impossible | |
580 | * in case of parallels max irqs off latencies. | |
581 | */ | |
582 | trace->stop(tr); | |
60a11774 | 583 | /* stop the tracing. */ |
bbf5b1a0 | 584 | tracing_stop(); |
60a11774 SR |
585 | /* check both trace buffers */ |
586 | ret = trace_test_buffer(tr, NULL); | |
587 | if (!ret) | |
588 | ret = trace_test_buffer(&max_tr, &count); | |
589 | trace->reset(tr); | |
bbf5b1a0 | 590 | tracing_start(); |
60a11774 SR |
591 | |
592 | if (!ret && !count) { | |
593 | printk(KERN_CONT ".. no entries found .."); | |
594 | ret = -1; | |
595 | } | |
596 | ||
597 | tracing_max_latency = save_max; | |
598 | ||
599 | return ret; | |
600 | } | |
601 | #endif /* CONFIG_IRQSOFF_TRACER */ | |
602 | ||
603 | #ifdef CONFIG_PREEMPT_TRACER | |
604 | int | |
605 | trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) | |
606 | { | |
607 | unsigned long save_max = tracing_max_latency; | |
608 | unsigned long count; | |
609 | int ret; | |
610 | ||
769c48eb SR |
611 | /* |
612 | * Now that the big kernel lock is no longer preemptable, | |
613 | * and this is called with the BKL held, it will always | |
614 | * fail. If preemption is already disabled, simply | |
615 | * pass the test. When the BKL is removed, or becomes | |
616 | * preemptible again, we will once again test this, | |
617 | * so keep it in. | |
618 | */ | |
619 | if (preempt_count()) { | |
620 | printk(KERN_CONT "can not test ... force "); | |
621 | return 0; | |
622 | } | |
623 | ||
60a11774 | 624 | /* start the tracing */ |
b6f11df2 | 625 | ret = tracer_init(trace, tr); |
1c80025a FW |
626 | if (ret) { |
627 | warn_failed_init_tracer(trace, ret); | |
628 | return ret; | |
629 | } | |
630 | ||
60a11774 SR |
631 | /* reset the max latency */ |
632 | tracing_max_latency = 0; | |
633 | /* disable preemption for a bit */ | |
634 | preempt_disable(); | |
635 | udelay(100); | |
636 | preempt_enable(); | |
49036200 FW |
637 | |
638 | /* | |
639 | * Stop the tracer to avoid a warning subsequent | |
640 | * to buffer flipping failure because tracing_stop() | |
641 | * disables the tr and max buffers, making flipping impossible | |
642 | * in case of parallels max preempt off latencies. | |
643 | */ | |
644 | trace->stop(tr); | |
60a11774 | 645 | /* stop the tracing. */ |
bbf5b1a0 | 646 | tracing_stop(); |
60a11774 SR |
647 | /* check both trace buffers */ |
648 | ret = trace_test_buffer(tr, NULL); | |
649 | if (!ret) | |
650 | ret = trace_test_buffer(&max_tr, &count); | |
651 | trace->reset(tr); | |
bbf5b1a0 | 652 | tracing_start(); |
60a11774 SR |
653 | |
654 | if (!ret && !count) { | |
655 | printk(KERN_CONT ".. no entries found .."); | |
656 | ret = -1; | |
657 | } | |
658 | ||
659 | tracing_max_latency = save_max; | |
660 | ||
661 | return ret; | |
662 | } | |
663 | #endif /* CONFIG_PREEMPT_TRACER */ | |
664 | ||
665 | #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER) | |
666 | int | |
667 | trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr) | |
668 | { | |
669 | unsigned long save_max = tracing_max_latency; | |
670 | unsigned long count; | |
671 | int ret; | |
672 | ||
769c48eb SR |
673 | /* |
674 | * Now that the big kernel lock is no longer preemptable, | |
675 | * and this is called with the BKL held, it will always | |
676 | * fail. If preemption is already disabled, simply | |
677 | * pass the test. When the BKL is removed, or becomes | |
678 | * preemptible again, we will once again test this, | |
679 | * so keep it in. | |
680 | */ | |
681 | if (preempt_count()) { | |
682 | printk(KERN_CONT "can not test ... force "); | |
683 | return 0; | |
684 | } | |
685 | ||
60a11774 | 686 | /* start the tracing */ |
b6f11df2 | 687 | ret = tracer_init(trace, tr); |
1c80025a FW |
688 | if (ret) { |
689 | warn_failed_init_tracer(trace, ret); | |
ac1d52d0 | 690 | goto out_no_start; |
1c80025a | 691 | } |
60a11774 SR |
692 | |
693 | /* reset the max latency */ | |
694 | tracing_max_latency = 0; | |
695 | ||
696 | /* disable preemption and interrupts for a bit */ | |
697 | preempt_disable(); | |
698 | local_irq_disable(); | |
699 | udelay(100); | |
700 | preempt_enable(); | |
701 | /* reverse the order of preempt vs irqs */ | |
702 | local_irq_enable(); | |
703 | ||
49036200 FW |
704 | /* |
705 | * Stop the tracer to avoid a warning subsequent | |
706 | * to buffer flipping failure because tracing_stop() | |
707 | * disables the tr and max buffers, making flipping impossible | |
708 | * in case of parallels max irqs/preempt off latencies. | |
709 | */ | |
710 | trace->stop(tr); | |
60a11774 | 711 | /* stop the tracing. */ |
bbf5b1a0 | 712 | tracing_stop(); |
60a11774 SR |
713 | /* check both trace buffers */ |
714 | ret = trace_test_buffer(tr, NULL); | |
ac1d52d0 | 715 | if (ret) |
60a11774 SR |
716 | goto out; |
717 | ||
718 | ret = trace_test_buffer(&max_tr, &count); | |
ac1d52d0 | 719 | if (ret) |
60a11774 SR |
720 | goto out; |
721 | ||
722 | if (!ret && !count) { | |
723 | printk(KERN_CONT ".. no entries found .."); | |
724 | ret = -1; | |
725 | goto out; | |
726 | } | |
727 | ||
728 | /* do the test by disabling interrupts first this time */ | |
729 | tracing_max_latency = 0; | |
bbf5b1a0 | 730 | tracing_start(); |
49036200 FW |
731 | trace->start(tr); |
732 | ||
60a11774 SR |
733 | preempt_disable(); |
734 | local_irq_disable(); | |
735 | udelay(100); | |
736 | preempt_enable(); | |
737 | /* reverse the order of preempt vs irqs */ | |
738 | local_irq_enable(); | |
739 | ||
49036200 | 740 | trace->stop(tr); |
60a11774 | 741 | /* stop the tracing. */ |
bbf5b1a0 | 742 | tracing_stop(); |
60a11774 SR |
743 | /* check both trace buffers */ |
744 | ret = trace_test_buffer(tr, NULL); | |
745 | if (ret) | |
746 | goto out; | |
747 | ||
748 | ret = trace_test_buffer(&max_tr, &count); | |
749 | ||
750 | if (!ret && !count) { | |
751 | printk(KERN_CONT ".. no entries found .."); | |
752 | ret = -1; | |
753 | goto out; | |
754 | } | |
755 | ||
ac1d52d0 | 756 | out: |
bbf5b1a0 | 757 | tracing_start(); |
ac1d52d0 FW |
758 | out_no_start: |
759 | trace->reset(tr); | |
60a11774 SR |
760 | tracing_max_latency = save_max; |
761 | ||
762 | return ret; | |
763 | } | |
764 | #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */ | |
765 | ||
fb1b6d8b SN |
766 | #ifdef CONFIG_NOP_TRACER |
767 | int | |
768 | trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr) | |
769 | { | |
770 | /* What could possibly go wrong? */ | |
771 | return 0; | |
772 | } | |
773 | #endif | |
774 | ||
60a11774 SR |
775 | #ifdef CONFIG_SCHED_TRACER |
776 | static int trace_wakeup_test_thread(void *data) | |
777 | { | |
60a11774 | 778 | /* Make this a RT thread, doesn't need to be too high */ |
c9b5f501 | 779 | static const struct sched_param param = { .sched_priority = 5 }; |
05bd68c5 | 780 | struct completion *x = data; |
60a11774 | 781 | |
05bd68c5 | 782 | sched_setscheduler(current, SCHED_FIFO, ¶m); |
60a11774 SR |
783 | |
784 | /* Make it know we have a new prio */ | |
785 | complete(x); | |
786 | ||
787 | /* now go to sleep and let the test wake us up */ | |
788 | set_current_state(TASK_INTERRUPTIBLE); | |
789 | schedule(); | |
790 | ||
791 | /* we are awake, now wait to disappear */ | |
792 | while (!kthread_should_stop()) { | |
793 | /* | |
794 | * This is an RT task, do short sleeps to let | |
795 | * others run. | |
796 | */ | |
797 | msleep(100); | |
798 | } | |
799 | ||
800 | return 0; | |
801 | } | |
802 | ||
803 | int | |
804 | trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) | |
805 | { | |
806 | unsigned long save_max = tracing_max_latency; | |
807 | struct task_struct *p; | |
808 | struct completion isrt; | |
809 | unsigned long count; | |
810 | int ret; | |
811 | ||
812 | init_completion(&isrt); | |
813 | ||
814 | /* create a high prio thread */ | |
815 | p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test"); | |
c7aafc54 | 816 | if (IS_ERR(p)) { |
60a11774 SR |
817 | printk(KERN_CONT "Failed to create ftrace wakeup test thread "); |
818 | return -1; | |
819 | } | |
820 | ||
821 | /* make sure the thread is running at an RT prio */ | |
822 | wait_for_completion(&isrt); | |
823 | ||
824 | /* start the tracing */ | |
b6f11df2 | 825 | ret = tracer_init(trace, tr); |
1c80025a FW |
826 | if (ret) { |
827 | warn_failed_init_tracer(trace, ret); | |
828 | return ret; | |
829 | } | |
830 | ||
60a11774 SR |
831 | /* reset the max latency */ |
832 | tracing_max_latency = 0; | |
833 | ||
834 | /* sleep to let the RT thread sleep too */ | |
835 | msleep(100); | |
836 | ||
837 | /* | |
838 | * Yes this is slightly racy. It is possible that for some | |
839 | * strange reason that the RT thread we created, did not | |
840 | * call schedule for 100ms after doing the completion, | |
841 | * and we do a wakeup on a task that already is awake. | |
842 | * But that is extremely unlikely, and the worst thing that | |
843 | * happens in such a case, is that we disable tracing. | |
844 | * Honestly, if this race does happen something is horrible | |
845 | * wrong with the system. | |
846 | */ | |
847 | ||
848 | wake_up_process(p); | |
849 | ||
5aa60c60 SR |
850 | /* give a little time to let the thread wake up */ |
851 | msleep(100); | |
852 | ||
60a11774 | 853 | /* stop the tracing. */ |
bbf5b1a0 | 854 | tracing_stop(); |
60a11774 SR |
855 | /* check both trace buffers */ |
856 | ret = trace_test_buffer(tr, NULL); | |
857 | if (!ret) | |
858 | ret = trace_test_buffer(&max_tr, &count); | |
859 | ||
860 | ||
861 | trace->reset(tr); | |
bbf5b1a0 | 862 | tracing_start(); |
60a11774 SR |
863 | |
864 | tracing_max_latency = save_max; | |
865 | ||
866 | /* kill the thread */ | |
867 | kthread_stop(p); | |
868 | ||
869 | if (!ret && !count) { | |
870 | printk(KERN_CONT ".. no entries found .."); | |
871 | ret = -1; | |
872 | } | |
873 | ||
874 | return ret; | |
875 | } | |
876 | #endif /* CONFIG_SCHED_TRACER */ | |
877 | ||
878 | #ifdef CONFIG_CONTEXT_SWITCH_TRACER | |
879 | int | |
880 | trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr) | |
881 | { | |
882 | unsigned long count; | |
883 | int ret; | |
884 | ||
885 | /* start the tracing */ | |
b6f11df2 | 886 | ret = tracer_init(trace, tr); |
1c80025a FW |
887 | if (ret) { |
888 | warn_failed_init_tracer(trace, ret); | |
889 | return ret; | |
890 | } | |
891 | ||
60a11774 SR |
892 | /* Sleep for a 1/10 of a second */ |
893 | msleep(100); | |
894 | /* stop the tracing. */ | |
bbf5b1a0 | 895 | tracing_stop(); |
60a11774 SR |
896 | /* check the trace buffer */ |
897 | ret = trace_test_buffer(tr, &count); | |
898 | trace->reset(tr); | |
bbf5b1a0 | 899 | tracing_start(); |
60a11774 SR |
900 | |
901 | if (!ret && !count) { | |
902 | printk(KERN_CONT ".. no entries found .."); | |
903 | ret = -1; | |
904 | } | |
905 | ||
906 | return ret; | |
907 | } | |
908 | #endif /* CONFIG_CONTEXT_SWITCH_TRACER */ | |
a6dd24f8 | 909 | |
80e5ea45 SR |
910 | #ifdef CONFIG_BRANCH_TRACER |
911 | int | |
912 | trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) | |
913 | { | |
914 | unsigned long count; | |
915 | int ret; | |
916 | ||
917 | /* start the tracing */ | |
b6f11df2 | 918 | ret = tracer_init(trace, tr); |
1c80025a FW |
919 | if (ret) { |
920 | warn_failed_init_tracer(trace, ret); | |
921 | return ret; | |
922 | } | |
923 | ||
80e5ea45 SR |
924 | /* Sleep for a 1/10 of a second */ |
925 | msleep(100); | |
926 | /* stop the tracing. */ | |
927 | tracing_stop(); | |
928 | /* check the trace buffer */ | |
929 | ret = trace_test_buffer(tr, &count); | |
930 | trace->reset(tr); | |
931 | tracing_start(); | |
932 | ||
d2ef7c2f WH |
933 | if (!ret && !count) { |
934 | printk(KERN_CONT ".. no entries found .."); | |
935 | ret = -1; | |
936 | } | |
937 | ||
80e5ea45 SR |
938 | return ret; |
939 | } | |
940 | #endif /* CONFIG_BRANCH_TRACER */ | |
321bb5e1 | 941 |