Commit | Line | Data |
---|---|---|
60a11774 SR |
1 | /* Include in trace.c */ |
2 | ||
9cc26a26 | 3 | #include <linux/stringify.h> |
60a11774 | 4 | #include <linux/kthread.h> |
c7aafc54 | 5 | #include <linux/delay.h> |
60a11774 | 6 | |
e309b41d | 7 | static inline int trace_valid_entry(struct trace_entry *entry) |
60a11774 SR |
8 | { |
9 | switch (entry->type) { | |
10 | case TRACE_FN: | |
11 | case TRACE_CTX: | |
57422797 | 12 | case TRACE_WAKE: |
06fa75ab | 13 | case TRACE_STACK: |
dd0e545f | 14 | case TRACE_PRINT: |
06fa75ab | 15 | case TRACE_SPECIAL: |
80e5ea45 | 16 | case TRACE_BRANCH: |
7447dce9 FW |
17 | case TRACE_GRAPH_ENT: |
18 | case TRACE_GRAPH_RET: | |
321bb5e1 | 19 | case TRACE_HW_BRANCHES: |
0722db01 | 20 | case TRACE_KSYM: |
60a11774 SR |
21 | return 1; |
22 | } | |
23 | return 0; | |
24 | } | |
25 | ||
3928a8a2 | 26 | static int trace_test_buffer_cpu(struct trace_array *tr, int cpu) |
60a11774 | 27 | { |
3928a8a2 SR |
28 | struct ring_buffer_event *event; |
29 | struct trace_entry *entry; | |
4b3e3d22 | 30 | unsigned int loops = 0; |
60a11774 | 31 | |
3928a8a2 SR |
32 | while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) { |
33 | entry = ring_buffer_event_data(event); | |
60a11774 | 34 | |
4b3e3d22 SR |
35 | /* |
36 | * The ring buffer is a size of trace_buf_size, if | |
37 | * we loop more than the size, there's something wrong | |
38 | * with the ring buffer. | |
39 | */ | |
40 | if (loops++ > trace_buf_size) { | |
41 | printk(KERN_CONT ".. bad ring buffer "); | |
42 | goto failed; | |
43 | } | |
3928a8a2 | 44 | if (!trace_valid_entry(entry)) { |
c7aafc54 | 45 | printk(KERN_CONT ".. invalid entry %d ", |
3928a8a2 | 46 | entry->type); |
60a11774 SR |
47 | goto failed; |
48 | } | |
60a11774 | 49 | } |
60a11774 SR |
50 | return 0; |
51 | ||
52 | failed: | |
08bafa0e SR |
53 | /* disable tracing */ |
54 | tracing_disabled = 1; | |
60a11774 SR |
55 | printk(KERN_CONT ".. corrupted trace buffer .. "); |
56 | return -1; | |
57 | } | |
58 | ||
59 | /* | |
60 | * Test the trace buffer to see if all the elements | |
61 | * are still sane. | |
62 | */ | |
63 | static int trace_test_buffer(struct trace_array *tr, unsigned long *count) | |
64 | { | |
30afdcb1 SR |
65 | unsigned long flags, cnt = 0; |
66 | int cpu, ret = 0; | |
60a11774 | 67 | |
30afdcb1 | 68 | /* Don't allow flipping of max traces now */ |
d51ad7ac | 69 | local_irq_save(flags); |
0199c4e6 | 70 | arch_spin_lock(&ftrace_max_lock); |
60a11774 | 71 | |
3928a8a2 | 72 | cnt = ring_buffer_entries(tr->buffer); |
60a11774 | 73 | |
0c5119c1 SR |
74 | /* |
75 | * The trace_test_buffer_cpu runs a while loop to consume all data. | |
76 | * If the calling tracer is broken, and is constantly filling | |
77 | * the buffer, this will run forever, and hard lock the box. | |
78 | * We disable the ring buffer while we do this test to prevent | |
79 | * a hard lock up. | |
80 | */ | |
81 | tracing_off(); | |
3928a8a2 SR |
82 | for_each_possible_cpu(cpu) { |
83 | ret = trace_test_buffer_cpu(tr, cpu); | |
60a11774 SR |
84 | if (ret) |
85 | break; | |
86 | } | |
0c5119c1 | 87 | tracing_on(); |
0199c4e6 | 88 | arch_spin_unlock(&ftrace_max_lock); |
d51ad7ac | 89 | local_irq_restore(flags); |
60a11774 SR |
90 | |
91 | if (count) | |
92 | *count = cnt; | |
93 | ||
94 | return ret; | |
95 | } | |
96 | ||
1c80025a FW |
97 | static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret) |
98 | { | |
99 | printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n", | |
100 | trace->name, init_ret); | |
101 | } | |
606576ce | 102 | #ifdef CONFIG_FUNCTION_TRACER |
77a2b37d SR |
103 | |
104 | #ifdef CONFIG_DYNAMIC_FTRACE | |
105 | ||
77a2b37d SR |
106 | /* Test dynamic code modification and ftrace filters */ |
107 | int trace_selftest_startup_dynamic_tracing(struct tracer *trace, | |
108 | struct trace_array *tr, | |
109 | int (*func)(void)) | |
110 | { | |
77a2b37d SR |
111 | int save_ftrace_enabled = ftrace_enabled; |
112 | int save_tracer_enabled = tracer_enabled; | |
dd0e545f | 113 | unsigned long count; |
4e491d14 | 114 | char *func_name; |
dd0e545f | 115 | int ret; |
77a2b37d SR |
116 | |
117 | /* The ftrace test PASSED */ | |
118 | printk(KERN_CONT "PASSED\n"); | |
119 | pr_info("Testing dynamic ftrace: "); | |
120 | ||
121 | /* enable tracing, and record the filter function */ | |
122 | ftrace_enabled = 1; | |
123 | tracer_enabled = 1; | |
124 | ||
125 | /* passed in by parameter to fool gcc from optimizing */ | |
126 | func(); | |
127 | ||
4e491d14 | 128 | /* |
73d8b8bc | 129 | * Some archs *cough*PowerPC*cough* add characters to the |
4e491d14 | 130 | * start of the function names. We simply put a '*' to |
73d8b8bc | 131 | * accommodate them. |
4e491d14 | 132 | */ |
9cc26a26 | 133 | func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); |
4e491d14 | 134 | |
77a2b37d | 135 | /* filter only on our function */ |
4e491d14 | 136 | ftrace_set_filter(func_name, strlen(func_name), 1); |
77a2b37d SR |
137 | |
138 | /* enable tracing */ | |
b6f11df2 | 139 | ret = tracer_init(trace, tr); |
1c80025a FW |
140 | if (ret) { |
141 | warn_failed_init_tracer(trace, ret); | |
142 | goto out; | |
143 | } | |
dd0e545f | 144 | |
77a2b37d SR |
145 | /* Sleep for a 1/10 of a second */ |
146 | msleep(100); | |
147 | ||
148 | /* we should have nothing in the buffer */ | |
149 | ret = trace_test_buffer(tr, &count); | |
150 | if (ret) | |
151 | goto out; | |
152 | ||
153 | if (count) { | |
154 | ret = -1; | |
155 | printk(KERN_CONT ".. filter did not filter .. "); | |
156 | goto out; | |
157 | } | |
158 | ||
159 | /* call our function again */ | |
160 | func(); | |
161 | ||
162 | /* sleep again */ | |
163 | msleep(100); | |
164 | ||
165 | /* stop the tracing. */ | |
bbf5b1a0 | 166 | tracing_stop(); |
77a2b37d SR |
167 | ftrace_enabled = 0; |
168 | ||
169 | /* check the trace buffer */ | |
170 | ret = trace_test_buffer(tr, &count); | |
171 | trace->reset(tr); | |
bbf5b1a0 | 172 | tracing_start(); |
77a2b37d SR |
173 | |
174 | /* we should only have one item */ | |
175 | if (!ret && count != 1) { | |
06fa75ab | 176 | printk(KERN_CONT ".. filter failed count=%ld ..", count); |
77a2b37d SR |
177 | ret = -1; |
178 | goto out; | |
179 | } | |
bbf5b1a0 | 180 | |
77a2b37d SR |
181 | out: |
182 | ftrace_enabled = save_ftrace_enabled; | |
183 | tracer_enabled = save_tracer_enabled; | |
184 | ||
185 | /* Enable tracing on all functions again */ | |
186 | ftrace_set_filter(NULL, 0, 1); | |
187 | ||
188 | return ret; | |
189 | } | |
190 | #else | |
191 | # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; }) | |
192 | #endif /* CONFIG_DYNAMIC_FTRACE */ | |
e9a22d1f | 193 | |
60a11774 SR |
194 | /* |
195 | * Simple verification test of ftrace function tracer. | |
196 | * Enable ftrace, sleep 1/10 second, and then read the trace | |
197 | * buffer to see if all is in order. | |
198 | */ | |
199 | int | |
200 | trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) | |
201 | { | |
77a2b37d SR |
202 | int save_ftrace_enabled = ftrace_enabled; |
203 | int save_tracer_enabled = tracer_enabled; | |
dd0e545f SR |
204 | unsigned long count; |
205 | int ret; | |
60a11774 | 206 | |
77a2b37d SR |
207 | /* make sure msleep has been recorded */ |
208 | msleep(1); | |
209 | ||
60a11774 | 210 | /* start the tracing */ |
c7aafc54 | 211 | ftrace_enabled = 1; |
77a2b37d | 212 | tracer_enabled = 1; |
c7aafc54 | 213 | |
b6f11df2 | 214 | ret = tracer_init(trace, tr); |
1c80025a FW |
215 | if (ret) { |
216 | warn_failed_init_tracer(trace, ret); | |
217 | goto out; | |
218 | } | |
219 | ||
60a11774 SR |
220 | /* Sleep for a 1/10 of a second */ |
221 | msleep(100); | |
222 | /* stop the tracing. */ | |
bbf5b1a0 | 223 | tracing_stop(); |
c7aafc54 IM |
224 | ftrace_enabled = 0; |
225 | ||
60a11774 SR |
226 | /* check the trace buffer */ |
227 | ret = trace_test_buffer(tr, &count); | |
228 | trace->reset(tr); | |
bbf5b1a0 | 229 | tracing_start(); |
60a11774 SR |
230 | |
231 | if (!ret && !count) { | |
232 | printk(KERN_CONT ".. no entries found .."); | |
233 | ret = -1; | |
77a2b37d | 234 | goto out; |
60a11774 SR |
235 | } |
236 | ||
77a2b37d SR |
237 | ret = trace_selftest_startup_dynamic_tracing(trace, tr, |
238 | DYN_FTRACE_TEST_NAME); | |
239 | ||
240 | out: | |
241 | ftrace_enabled = save_ftrace_enabled; | |
242 | tracer_enabled = save_tracer_enabled; | |
243 | ||
4eebcc81 SR |
244 | /* kill ftrace totally if we failed */ |
245 | if (ret) | |
246 | ftrace_kill(); | |
247 | ||
60a11774 SR |
248 | return ret; |
249 | } | |
606576ce | 250 | #endif /* CONFIG_FUNCTION_TRACER */ |
60a11774 | 251 | |
7447dce9 FW |
252 | |
253 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | |
cf586b61 FW |
254 | |
255 | /* Maximum number of functions to trace before diagnosing a hang */ | |
256 | #define GRAPH_MAX_FUNC_TEST 100000000 | |
257 | ||
258 | static void __ftrace_dump(bool disable_tracing); | |
259 | static unsigned int graph_hang_thresh; | |
260 | ||
261 | /* Wrap the real function entry probe to avoid possible hanging */ | |
262 | static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace) | |
263 | { | |
264 | /* This is harmlessly racy, we want to approximately detect a hang */ | |
265 | if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) { | |
266 | ftrace_graph_stop(); | |
267 | printk(KERN_WARNING "BUG: Function graph tracer hang!\n"); | |
268 | if (ftrace_dump_on_oops) | |
269 | __ftrace_dump(false); | |
270 | return 0; | |
271 | } | |
272 | ||
273 | return trace_graph_entry(trace); | |
274 | } | |
275 | ||
7447dce9 FW |
276 | /* |
277 | * Pretty much the same than for the function tracer from which the selftest | |
278 | * has been borrowed. | |
279 | */ | |
280 | int | |
281 | trace_selftest_startup_function_graph(struct tracer *trace, | |
282 | struct trace_array *tr) | |
283 | { | |
284 | int ret; | |
285 | unsigned long count; | |
286 | ||
cf586b61 FW |
287 | /* |
288 | * Simulate the init() callback but we attach a watchdog callback | |
289 | * to detect and recover from possible hangs | |
290 | */ | |
291 | tracing_reset_online_cpus(tr); | |
1a0799a8 | 292 | set_graph_array(tr); |
cf586b61 FW |
293 | ret = register_ftrace_graph(&trace_graph_return, |
294 | &trace_graph_entry_watchdog); | |
7447dce9 FW |
295 | if (ret) { |
296 | warn_failed_init_tracer(trace, ret); | |
297 | goto out; | |
298 | } | |
cf586b61 | 299 | tracing_start_cmdline_record(); |
7447dce9 FW |
300 | |
301 | /* Sleep for a 1/10 of a second */ | |
302 | msleep(100); | |
303 | ||
cf586b61 FW |
304 | /* Have we just recovered from a hang? */ |
305 | if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) { | |
0cf53ff6 | 306 | tracing_selftest_disabled = true; |
cf586b61 FW |
307 | ret = -1; |
308 | goto out; | |
309 | } | |
310 | ||
7447dce9 FW |
311 | tracing_stop(); |
312 | ||
313 | /* check the trace buffer */ | |
314 | ret = trace_test_buffer(tr, &count); | |
315 | ||
316 | trace->reset(tr); | |
317 | tracing_start(); | |
318 | ||
319 | if (!ret && !count) { | |
320 | printk(KERN_CONT ".. no entries found .."); | |
321 | ret = -1; | |
322 | goto out; | |
323 | } | |
324 | ||
325 | /* Don't test dynamic tracing, the function tracer already did */ | |
326 | ||
327 | out: | |
328 | /* Stop it if we failed */ | |
329 | if (ret) | |
330 | ftrace_graph_stop(); | |
331 | ||
332 | return ret; | |
333 | } | |
334 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | |
335 | ||
336 | ||
60a11774 SR |
337 | #ifdef CONFIG_IRQSOFF_TRACER |
338 | int | |
339 | trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) | |
340 | { | |
341 | unsigned long save_max = tracing_max_latency; | |
342 | unsigned long count; | |
343 | int ret; | |
344 | ||
345 | /* start the tracing */ | |
b6f11df2 | 346 | ret = tracer_init(trace, tr); |
1c80025a FW |
347 | if (ret) { |
348 | warn_failed_init_tracer(trace, ret); | |
349 | return ret; | |
350 | } | |
351 | ||
60a11774 SR |
352 | /* reset the max latency */ |
353 | tracing_max_latency = 0; | |
354 | /* disable interrupts for a bit */ | |
355 | local_irq_disable(); | |
356 | udelay(100); | |
357 | local_irq_enable(); | |
49036200 FW |
358 | |
359 | /* | |
360 | * Stop the tracer to avoid a warning subsequent | |
361 | * to buffer flipping failure because tracing_stop() | |
362 | * disables the tr and max buffers, making flipping impossible | |
363 | * in case of parallels max irqs off latencies. | |
364 | */ | |
365 | trace->stop(tr); | |
60a11774 | 366 | /* stop the tracing. */ |
bbf5b1a0 | 367 | tracing_stop(); |
60a11774 SR |
368 | /* check both trace buffers */ |
369 | ret = trace_test_buffer(tr, NULL); | |
370 | if (!ret) | |
371 | ret = trace_test_buffer(&max_tr, &count); | |
372 | trace->reset(tr); | |
bbf5b1a0 | 373 | tracing_start(); |
60a11774 SR |
374 | |
375 | if (!ret && !count) { | |
376 | printk(KERN_CONT ".. no entries found .."); | |
377 | ret = -1; | |
378 | } | |
379 | ||
380 | tracing_max_latency = save_max; | |
381 | ||
382 | return ret; | |
383 | } | |
384 | #endif /* CONFIG_IRQSOFF_TRACER */ | |
385 | ||
386 | #ifdef CONFIG_PREEMPT_TRACER | |
387 | int | |
388 | trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) | |
389 | { | |
390 | unsigned long save_max = tracing_max_latency; | |
391 | unsigned long count; | |
392 | int ret; | |
393 | ||
769c48eb SR |
394 | /* |
395 | * Now that the big kernel lock is no longer preemptable, | |
396 | * and this is called with the BKL held, it will always | |
397 | * fail. If preemption is already disabled, simply | |
398 | * pass the test. When the BKL is removed, or becomes | |
399 | * preemptible again, we will once again test this, | |
400 | * so keep it in. | |
401 | */ | |
402 | if (preempt_count()) { | |
403 | printk(KERN_CONT "can not test ... force "); | |
404 | return 0; | |
405 | } | |
406 | ||
60a11774 | 407 | /* start the tracing */ |
b6f11df2 | 408 | ret = tracer_init(trace, tr); |
1c80025a FW |
409 | if (ret) { |
410 | warn_failed_init_tracer(trace, ret); | |
411 | return ret; | |
412 | } | |
413 | ||
60a11774 SR |
414 | /* reset the max latency */ |
415 | tracing_max_latency = 0; | |
416 | /* disable preemption for a bit */ | |
417 | preempt_disable(); | |
418 | udelay(100); | |
419 | preempt_enable(); | |
49036200 FW |
420 | |
421 | /* | |
422 | * Stop the tracer to avoid a warning subsequent | |
423 | * to buffer flipping failure because tracing_stop() | |
424 | * disables the tr and max buffers, making flipping impossible | |
425 | * in case of parallels max preempt off latencies. | |
426 | */ | |
427 | trace->stop(tr); | |
60a11774 | 428 | /* stop the tracing. */ |
bbf5b1a0 | 429 | tracing_stop(); |
60a11774 SR |
430 | /* check both trace buffers */ |
431 | ret = trace_test_buffer(tr, NULL); | |
432 | if (!ret) | |
433 | ret = trace_test_buffer(&max_tr, &count); | |
434 | trace->reset(tr); | |
bbf5b1a0 | 435 | tracing_start(); |
60a11774 SR |
436 | |
437 | if (!ret && !count) { | |
438 | printk(KERN_CONT ".. no entries found .."); | |
439 | ret = -1; | |
440 | } | |
441 | ||
442 | tracing_max_latency = save_max; | |
443 | ||
444 | return ret; | |
445 | } | |
446 | #endif /* CONFIG_PREEMPT_TRACER */ | |
447 | ||
448 | #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER) | |
449 | int | |
450 | trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr) | |
451 | { | |
452 | unsigned long save_max = tracing_max_latency; | |
453 | unsigned long count; | |
454 | int ret; | |
455 | ||
769c48eb SR |
456 | /* |
457 | * Now that the big kernel lock is no longer preemptable, | |
458 | * and this is called with the BKL held, it will always | |
459 | * fail. If preemption is already disabled, simply | |
460 | * pass the test. When the BKL is removed, or becomes | |
461 | * preemptible again, we will once again test this, | |
462 | * so keep it in. | |
463 | */ | |
464 | if (preempt_count()) { | |
465 | printk(KERN_CONT "can not test ... force "); | |
466 | return 0; | |
467 | } | |
468 | ||
60a11774 | 469 | /* start the tracing */ |
b6f11df2 | 470 | ret = tracer_init(trace, tr); |
1c80025a FW |
471 | if (ret) { |
472 | warn_failed_init_tracer(trace, ret); | |
ac1d52d0 | 473 | goto out_no_start; |
1c80025a | 474 | } |
60a11774 SR |
475 | |
476 | /* reset the max latency */ | |
477 | tracing_max_latency = 0; | |
478 | ||
479 | /* disable preemption and interrupts for a bit */ | |
480 | preempt_disable(); | |
481 | local_irq_disable(); | |
482 | udelay(100); | |
483 | preempt_enable(); | |
484 | /* reverse the order of preempt vs irqs */ | |
485 | local_irq_enable(); | |
486 | ||
49036200 FW |
487 | /* |
488 | * Stop the tracer to avoid a warning subsequent | |
489 | * to buffer flipping failure because tracing_stop() | |
490 | * disables the tr and max buffers, making flipping impossible | |
491 | * in case of parallels max irqs/preempt off latencies. | |
492 | */ | |
493 | trace->stop(tr); | |
60a11774 | 494 | /* stop the tracing. */ |
bbf5b1a0 | 495 | tracing_stop(); |
60a11774 SR |
496 | /* check both trace buffers */ |
497 | ret = trace_test_buffer(tr, NULL); | |
ac1d52d0 | 498 | if (ret) |
60a11774 SR |
499 | goto out; |
500 | ||
501 | ret = trace_test_buffer(&max_tr, &count); | |
ac1d52d0 | 502 | if (ret) |
60a11774 SR |
503 | goto out; |
504 | ||
505 | if (!ret && !count) { | |
506 | printk(KERN_CONT ".. no entries found .."); | |
507 | ret = -1; | |
508 | goto out; | |
509 | } | |
510 | ||
511 | /* do the test by disabling interrupts first this time */ | |
512 | tracing_max_latency = 0; | |
bbf5b1a0 | 513 | tracing_start(); |
49036200 FW |
514 | trace->start(tr); |
515 | ||
60a11774 SR |
516 | preempt_disable(); |
517 | local_irq_disable(); | |
518 | udelay(100); | |
519 | preempt_enable(); | |
520 | /* reverse the order of preempt vs irqs */ | |
521 | local_irq_enable(); | |
522 | ||
49036200 | 523 | trace->stop(tr); |
60a11774 | 524 | /* stop the tracing. */ |
bbf5b1a0 | 525 | tracing_stop(); |
60a11774 SR |
526 | /* check both trace buffers */ |
527 | ret = trace_test_buffer(tr, NULL); | |
528 | if (ret) | |
529 | goto out; | |
530 | ||
531 | ret = trace_test_buffer(&max_tr, &count); | |
532 | ||
533 | if (!ret && !count) { | |
534 | printk(KERN_CONT ".. no entries found .."); | |
535 | ret = -1; | |
536 | goto out; | |
537 | } | |
538 | ||
ac1d52d0 | 539 | out: |
bbf5b1a0 | 540 | tracing_start(); |
ac1d52d0 FW |
541 | out_no_start: |
542 | trace->reset(tr); | |
60a11774 SR |
543 | tracing_max_latency = save_max; |
544 | ||
545 | return ret; | |
546 | } | |
547 | #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */ | |
548 | ||
fb1b6d8b SN |
549 | #ifdef CONFIG_NOP_TRACER |
550 | int | |
551 | trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr) | |
552 | { | |
553 | /* What could possibly go wrong? */ | |
554 | return 0; | |
555 | } | |
556 | #endif | |
557 | ||
60a11774 SR |
558 | #ifdef CONFIG_SCHED_TRACER |
559 | static int trace_wakeup_test_thread(void *data) | |
560 | { | |
60a11774 | 561 | /* Make this a RT thread, doesn't need to be too high */ |
05bd68c5 SR |
562 | struct sched_param param = { .sched_priority = 5 }; |
563 | struct completion *x = data; | |
60a11774 | 564 | |
05bd68c5 | 565 | sched_setscheduler(current, SCHED_FIFO, ¶m); |
60a11774 SR |
566 | |
567 | /* Make it know we have a new prio */ | |
568 | complete(x); | |
569 | ||
570 | /* now go to sleep and let the test wake us up */ | |
571 | set_current_state(TASK_INTERRUPTIBLE); | |
572 | schedule(); | |
573 | ||
574 | /* we are awake, now wait to disappear */ | |
575 | while (!kthread_should_stop()) { | |
576 | /* | |
577 | * This is an RT task, do short sleeps to let | |
578 | * others run. | |
579 | */ | |
580 | msleep(100); | |
581 | } | |
582 | ||
583 | return 0; | |
584 | } | |
585 | ||
586 | int | |
587 | trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) | |
588 | { | |
589 | unsigned long save_max = tracing_max_latency; | |
590 | struct task_struct *p; | |
591 | struct completion isrt; | |
592 | unsigned long count; | |
593 | int ret; | |
594 | ||
595 | init_completion(&isrt); | |
596 | ||
597 | /* create a high prio thread */ | |
598 | p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test"); | |
c7aafc54 | 599 | if (IS_ERR(p)) { |
60a11774 SR |
600 | printk(KERN_CONT "Failed to create ftrace wakeup test thread "); |
601 | return -1; | |
602 | } | |
603 | ||
604 | /* make sure the thread is running at an RT prio */ | |
605 | wait_for_completion(&isrt); | |
606 | ||
607 | /* start the tracing */ | |
b6f11df2 | 608 | ret = tracer_init(trace, tr); |
1c80025a FW |
609 | if (ret) { |
610 | warn_failed_init_tracer(trace, ret); | |
611 | return ret; | |
612 | } | |
613 | ||
60a11774 SR |
614 | /* reset the max latency */ |
615 | tracing_max_latency = 0; | |
616 | ||
617 | /* sleep to let the RT thread sleep too */ | |
618 | msleep(100); | |
619 | ||
620 | /* | |
621 | * Yes this is slightly racy. It is possible that for some | |
622 | * strange reason that the RT thread we created, did not | |
623 | * call schedule for 100ms after doing the completion, | |
624 | * and we do a wakeup on a task that already is awake. | |
625 | * But that is extremely unlikely, and the worst thing that | |
626 | * happens in such a case, is that we disable tracing. | |
627 | * Honestly, if this race does happen something is horrible | |
628 | * wrong with the system. | |
629 | */ | |
630 | ||
631 | wake_up_process(p); | |
632 | ||
5aa60c60 SR |
633 | /* give a little time to let the thread wake up */ |
634 | msleep(100); | |
635 | ||
60a11774 | 636 | /* stop the tracing. */ |
bbf5b1a0 | 637 | tracing_stop(); |
60a11774 SR |
638 | /* check both trace buffers */ |
639 | ret = trace_test_buffer(tr, NULL); | |
640 | if (!ret) | |
641 | ret = trace_test_buffer(&max_tr, &count); | |
642 | ||
643 | ||
644 | trace->reset(tr); | |
bbf5b1a0 | 645 | tracing_start(); |
60a11774 SR |
646 | |
647 | tracing_max_latency = save_max; | |
648 | ||
649 | /* kill the thread */ | |
650 | kthread_stop(p); | |
651 | ||
652 | if (!ret && !count) { | |
653 | printk(KERN_CONT ".. no entries found .."); | |
654 | ret = -1; | |
655 | } | |
656 | ||
657 | return ret; | |
658 | } | |
659 | #endif /* CONFIG_SCHED_TRACER */ | |
660 | ||
661 | #ifdef CONFIG_CONTEXT_SWITCH_TRACER | |
662 | int | |
663 | trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr) | |
664 | { | |
665 | unsigned long count; | |
666 | int ret; | |
667 | ||
668 | /* start the tracing */ | |
b6f11df2 | 669 | ret = tracer_init(trace, tr); |
1c80025a FW |
670 | if (ret) { |
671 | warn_failed_init_tracer(trace, ret); | |
672 | return ret; | |
673 | } | |
674 | ||
60a11774 SR |
675 | /* Sleep for a 1/10 of a second */ |
676 | msleep(100); | |
677 | /* stop the tracing. */ | |
bbf5b1a0 | 678 | tracing_stop(); |
60a11774 SR |
679 | /* check the trace buffer */ |
680 | ret = trace_test_buffer(tr, &count); | |
681 | trace->reset(tr); | |
bbf5b1a0 | 682 | tracing_start(); |
60a11774 SR |
683 | |
684 | if (!ret && !count) { | |
685 | printk(KERN_CONT ".. no entries found .."); | |
686 | ret = -1; | |
687 | } | |
688 | ||
689 | return ret; | |
690 | } | |
691 | #endif /* CONFIG_CONTEXT_SWITCH_TRACER */ | |
a6dd24f8 IM |
692 | |
693 | #ifdef CONFIG_SYSPROF_TRACER | |
694 | int | |
695 | trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr) | |
696 | { | |
697 | unsigned long count; | |
698 | int ret; | |
699 | ||
700 | /* start the tracing */ | |
b6f11df2 | 701 | ret = tracer_init(trace, tr); |
1c80025a FW |
702 | if (ret) { |
703 | warn_failed_init_tracer(trace, ret); | |
d2ef7c2f | 704 | return ret; |
1c80025a FW |
705 | } |
706 | ||
a6dd24f8 IM |
707 | /* Sleep for a 1/10 of a second */ |
708 | msleep(100); | |
709 | /* stop the tracing. */ | |
bbf5b1a0 | 710 | tracing_stop(); |
a6dd24f8 IM |
711 | /* check the trace buffer */ |
712 | ret = trace_test_buffer(tr, &count); | |
713 | trace->reset(tr); | |
bbf5b1a0 | 714 | tracing_start(); |
a6dd24f8 | 715 | |
d2ef7c2f WH |
716 | if (!ret && !count) { |
717 | printk(KERN_CONT ".. no entries found .."); | |
718 | ret = -1; | |
719 | } | |
720 | ||
a6dd24f8 IM |
721 | return ret; |
722 | } | |
723 | #endif /* CONFIG_SYSPROF_TRACER */ | |
80e5ea45 SR |
724 | |
725 | #ifdef CONFIG_BRANCH_TRACER | |
726 | int | |
727 | trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) | |
728 | { | |
729 | unsigned long count; | |
730 | int ret; | |
731 | ||
732 | /* start the tracing */ | |
b6f11df2 | 733 | ret = tracer_init(trace, tr); |
1c80025a FW |
734 | if (ret) { |
735 | warn_failed_init_tracer(trace, ret); | |
736 | return ret; | |
737 | } | |
738 | ||
80e5ea45 SR |
739 | /* Sleep for a 1/10 of a second */ |
740 | msleep(100); | |
741 | /* stop the tracing. */ | |
742 | tracing_stop(); | |
743 | /* check the trace buffer */ | |
744 | ret = trace_test_buffer(tr, &count); | |
745 | trace->reset(tr); | |
746 | tracing_start(); | |
747 | ||
d2ef7c2f WH |
748 | if (!ret && !count) { |
749 | printk(KERN_CONT ".. no entries found .."); | |
750 | ret = -1; | |
751 | } | |
752 | ||
80e5ea45 SR |
753 | return ret; |
754 | } | |
755 | #endif /* CONFIG_BRANCH_TRACER */ | |
321bb5e1 MM |
756 | |
757 | #ifdef CONFIG_HW_BRANCH_TRACER | |
758 | int | |
759 | trace_selftest_startup_hw_branches(struct tracer *trace, | |
760 | struct trace_array *tr) | |
761 | { | |
4d657e51 | 762 | struct trace_iterator *iter; |
321bb5e1 | 763 | struct tracer tracer; |
e9a22d1f IM |
764 | unsigned long count; |
765 | int ret; | |
321bb5e1 MM |
766 | |
767 | if (!trace->open) { | |
768 | printk(KERN_CONT "missing open function..."); | |
769 | return -1; | |
770 | } | |
771 | ||
772 | ret = tracer_init(trace, tr); | |
773 | if (ret) { | |
774 | warn_failed_init_tracer(trace, ret); | |
775 | return ret; | |
776 | } | |
777 | ||
778 | /* | |
779 | * The hw-branch tracer needs to collect the trace from the various | |
780 | * cpu trace buffers - before tracing is stopped. | |
781 | */ | |
4d657e51 MM |
782 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); |
783 | if (!iter) | |
784 | return -ENOMEM; | |
785 | ||
321bb5e1 MM |
786 | memcpy(&tracer, trace, sizeof(tracer)); |
787 | ||
4d657e51 MM |
788 | iter->trace = &tracer; |
789 | iter->tr = tr; | |
790 | iter->pos = -1; | |
791 | mutex_init(&iter->mutex); | |
321bb5e1 | 792 | |
4d657e51 | 793 | trace->open(iter); |
321bb5e1 | 794 | |
4d657e51 MM |
795 | mutex_destroy(&iter->mutex); |
796 | kfree(iter); | |
321bb5e1 MM |
797 | |
798 | tracing_stop(); | |
799 | ||
800 | ret = trace_test_buffer(tr, &count); | |
801 | trace->reset(tr); | |
802 | tracing_start(); | |
803 | ||
804 | if (!ret && !count) { | |
805 | printk(KERN_CONT "no entries found.."); | |
806 | ret = -1; | |
807 | } | |
808 | ||
809 | return ret; | |
810 | } | |
811 | #endif /* CONFIG_HW_BRANCH_TRACER */ | |
0722db01 P |
812 | |
813 | #ifdef CONFIG_KSYM_TRACER | |
814 | static int ksym_selftest_dummy; | |
815 | ||
816 | int | |
817 | trace_selftest_startup_ksym(struct tracer *trace, struct trace_array *tr) | |
818 | { | |
819 | unsigned long count; | |
820 | int ret; | |
821 | ||
822 | /* start the tracing */ | |
823 | ret = tracer_init(trace, tr); | |
824 | if (ret) { | |
825 | warn_failed_init_tracer(trace, ret); | |
826 | return ret; | |
827 | } | |
828 | ||
829 | ksym_selftest_dummy = 0; | |
830 | /* Register the read-write tracing request */ | |
30ff21e3 LZ |
831 | |
832 | ret = process_new_ksym_entry("ksym_selftest_dummy", | |
24f1e32c | 833 | HW_BREAKPOINT_R | HW_BREAKPOINT_W, |
0722db01 P |
834 | (unsigned long)(&ksym_selftest_dummy)); |
835 | ||
836 | if (ret < 0) { | |
837 | printk(KERN_CONT "ksym_trace read-write startup test failed\n"); | |
838 | goto ret_path; | |
839 | } | |
840 | /* Perform a read and a write operation over the dummy variable to | |
841 | * trigger the tracer | |
842 | */ | |
843 | if (ksym_selftest_dummy == 0) | |
844 | ksym_selftest_dummy++; | |
845 | ||
846 | /* stop the tracing. */ | |
847 | tracing_stop(); | |
848 | /* check the trace buffer */ | |
849 | ret = trace_test_buffer(tr, &count); | |
850 | trace->reset(tr); | |
851 | tracing_start(); | |
852 | ||
853 | /* read & write operations - one each is performed on the dummy variable | |
854 | * triggering two entries in the trace buffer | |
855 | */ | |
856 | if (!ret && count != 2) { | |
857 | printk(KERN_CONT "Ksym tracer startup test failed"); | |
858 | ret = -1; | |
859 | } | |
860 | ||
861 | ret_path: | |
862 | return ret; | |
863 | } | |
864 | #endif /* CONFIG_KSYM_TRACER */ | |
865 |