Commit | Line | Data |
---|---|---|
60a11774 SR |
1 | /* Include in trace.c */ |
2 | ||
9cc26a26 | 3 | #include <linux/stringify.h> |
60a11774 | 4 | #include <linux/kthread.h> |
c7aafc54 | 5 | #include <linux/delay.h> |
5a0e3ad6 | 6 | #include <linux/slab.h> |
60a11774 | 7 | |
e309b41d | 8 | static inline int trace_valid_entry(struct trace_entry *entry) |
60a11774 SR |
9 | { |
10 | switch (entry->type) { | |
11 | case TRACE_FN: | |
12 | case TRACE_CTX: | |
57422797 | 13 | case TRACE_WAKE: |
06fa75ab | 14 | case TRACE_STACK: |
dd0e545f | 15 | case TRACE_PRINT: |
80e5ea45 | 16 | case TRACE_BRANCH: |
7447dce9 FW |
17 | case TRACE_GRAPH_ENT: |
18 | case TRACE_GRAPH_RET: | |
60a11774 SR |
19 | return 1; |
20 | } | |
21 | return 0; | |
22 | } | |
23 | ||
3928a8a2 | 24 | static int trace_test_buffer_cpu(struct trace_array *tr, int cpu) |
60a11774 | 25 | { |
3928a8a2 SR |
26 | struct ring_buffer_event *event; |
27 | struct trace_entry *entry; | |
4b3e3d22 | 28 | unsigned int loops = 0; |
60a11774 | 29 | |
66a8cb95 | 30 | while ((event = ring_buffer_consume(tr->buffer, cpu, NULL, NULL))) { |
3928a8a2 | 31 | entry = ring_buffer_event_data(event); |
60a11774 | 32 | |
4b3e3d22 SR |
33 | /* |
34 | * The ring buffer is a size of trace_buf_size, if | |
35 | * we loop more than the size, there's something wrong | |
36 | * with the ring buffer. | |
37 | */ | |
38 | if (loops++ > trace_buf_size) { | |
39 | printk(KERN_CONT ".. bad ring buffer "); | |
40 | goto failed; | |
41 | } | |
3928a8a2 | 42 | if (!trace_valid_entry(entry)) { |
c7aafc54 | 43 | printk(KERN_CONT ".. invalid entry %d ", |
3928a8a2 | 44 | entry->type); |
60a11774 SR |
45 | goto failed; |
46 | } | |
60a11774 | 47 | } |
60a11774 SR |
48 | return 0; |
49 | ||
50 | failed: | |
08bafa0e SR |
51 | /* disable tracing */ |
52 | tracing_disabled = 1; | |
60a11774 SR |
53 | printk(KERN_CONT ".. corrupted trace buffer .. "); |
54 | return -1; | |
55 | } | |
56 | ||
57 | /* | |
58 | * Test the trace buffer to see if all the elements | |
59 | * are still sane. | |
60 | */ | |
61 | static int trace_test_buffer(struct trace_array *tr, unsigned long *count) | |
62 | { | |
30afdcb1 SR |
63 | unsigned long flags, cnt = 0; |
64 | int cpu, ret = 0; | |
60a11774 | 65 | |
30afdcb1 | 66 | /* Don't allow flipping of max traces now */ |
d51ad7ac | 67 | local_irq_save(flags); |
0199c4e6 | 68 | arch_spin_lock(&ftrace_max_lock); |
60a11774 | 69 | |
3928a8a2 | 70 | cnt = ring_buffer_entries(tr->buffer); |
60a11774 | 71 | |
0c5119c1 SR |
72 | /* |
73 | * The trace_test_buffer_cpu runs a while loop to consume all data. | |
74 | * If the calling tracer is broken, and is constantly filling | |
75 | * the buffer, this will run forever, and hard lock the box. | |
76 | * We disable the ring buffer while we do this test to prevent | |
77 | * a hard lock up. | |
78 | */ | |
79 | tracing_off(); | |
3928a8a2 SR |
80 | for_each_possible_cpu(cpu) { |
81 | ret = trace_test_buffer_cpu(tr, cpu); | |
60a11774 SR |
82 | if (ret) |
83 | break; | |
84 | } | |
0c5119c1 | 85 | tracing_on(); |
0199c4e6 | 86 | arch_spin_unlock(&ftrace_max_lock); |
d51ad7ac | 87 | local_irq_restore(flags); |
60a11774 SR |
88 | |
89 | if (count) | |
90 | *count = cnt; | |
91 | ||
92 | return ret; | |
93 | } | |
94 | ||
1c80025a FW |
95 | static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret) |
96 | { | |
97 | printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n", | |
98 | trace->name, init_ret); | |
99 | } | |
606576ce | 100 | #ifdef CONFIG_FUNCTION_TRACER |
77a2b37d SR |
101 | |
102 | #ifdef CONFIG_DYNAMIC_FTRACE | |
103 | ||
95950c2e SR |
104 | static int trace_selftest_test_probe1_cnt; |
105 | static void trace_selftest_test_probe1_func(unsigned long ip, | |
2f5f6ad9 | 106 | unsigned long pip, |
a1e2e31d SR |
107 | struct ftrace_ops *op, |
108 | struct pt_regs *pt_regs) | |
95950c2e SR |
109 | { |
110 | trace_selftest_test_probe1_cnt++; | |
111 | } | |
112 | ||
113 | static int trace_selftest_test_probe2_cnt; | |
114 | static void trace_selftest_test_probe2_func(unsigned long ip, | |
2f5f6ad9 | 115 | unsigned long pip, |
a1e2e31d SR |
116 | struct ftrace_ops *op, |
117 | struct pt_regs *pt_regs) | |
95950c2e SR |
118 | { |
119 | trace_selftest_test_probe2_cnt++; | |
120 | } | |
121 | ||
122 | static int trace_selftest_test_probe3_cnt; | |
123 | static void trace_selftest_test_probe3_func(unsigned long ip, | |
2f5f6ad9 | 124 | unsigned long pip, |
a1e2e31d SR |
125 | struct ftrace_ops *op, |
126 | struct pt_regs *pt_regs) | |
95950c2e SR |
127 | { |
128 | trace_selftest_test_probe3_cnt++; | |
129 | } | |
130 | ||
131 | static int trace_selftest_test_global_cnt; | |
132 | static void trace_selftest_test_global_func(unsigned long ip, | |
2f5f6ad9 | 133 | unsigned long pip, |
a1e2e31d SR |
134 | struct ftrace_ops *op, |
135 | struct pt_regs *pt_regs) | |
95950c2e SR |
136 | { |
137 | trace_selftest_test_global_cnt++; | |
138 | } | |
139 | ||
140 | static int trace_selftest_test_dyn_cnt; | |
141 | static void trace_selftest_test_dyn_func(unsigned long ip, | |
2f5f6ad9 | 142 | unsigned long pip, |
a1e2e31d SR |
143 | struct ftrace_ops *op, |
144 | struct pt_regs *pt_regs) | |
95950c2e SR |
145 | { |
146 | trace_selftest_test_dyn_cnt++; | |
147 | } | |
148 | ||
149 | static struct ftrace_ops test_probe1 = { | |
150 | .func = trace_selftest_test_probe1_func, | |
4740974a | 151 | .flags = FTRACE_OPS_FL_RECURSION_SAFE, |
95950c2e SR |
152 | }; |
153 | ||
154 | static struct ftrace_ops test_probe2 = { | |
155 | .func = trace_selftest_test_probe2_func, | |
4740974a | 156 | .flags = FTRACE_OPS_FL_RECURSION_SAFE, |
95950c2e SR |
157 | }; |
158 | ||
159 | static struct ftrace_ops test_probe3 = { | |
160 | .func = trace_selftest_test_probe3_func, | |
4740974a | 161 | .flags = FTRACE_OPS_FL_RECURSION_SAFE, |
95950c2e SR |
162 | }; |
163 | ||
164 | static struct ftrace_ops test_global = { | |
4740974a SR |
165 | .func = trace_selftest_test_global_func, |
166 | .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE, | |
95950c2e SR |
167 | }; |
168 | ||
169 | static void print_counts(void) | |
170 | { | |
171 | printk("(%d %d %d %d %d) ", | |
172 | trace_selftest_test_probe1_cnt, | |
173 | trace_selftest_test_probe2_cnt, | |
174 | trace_selftest_test_probe3_cnt, | |
175 | trace_selftest_test_global_cnt, | |
176 | trace_selftest_test_dyn_cnt); | |
177 | } | |
178 | ||
179 | static void reset_counts(void) | |
180 | { | |
181 | trace_selftest_test_probe1_cnt = 0; | |
182 | trace_selftest_test_probe2_cnt = 0; | |
183 | trace_selftest_test_probe3_cnt = 0; | |
184 | trace_selftest_test_global_cnt = 0; | |
185 | trace_selftest_test_dyn_cnt = 0; | |
186 | } | |
187 | ||
188 | static int trace_selftest_ops(int cnt) | |
189 | { | |
190 | int save_ftrace_enabled = ftrace_enabled; | |
191 | struct ftrace_ops *dyn_ops; | |
192 | char *func1_name; | |
193 | char *func2_name; | |
194 | int len1; | |
195 | int len2; | |
196 | int ret = -1; | |
197 | ||
198 | printk(KERN_CONT "PASSED\n"); | |
199 | pr_info("Testing dynamic ftrace ops #%d: ", cnt); | |
200 | ||
201 | ftrace_enabled = 1; | |
202 | reset_counts(); | |
203 | ||
204 | /* Handle PPC64 '.' name */ | |
205 | func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME); | |
206 | func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2); | |
207 | len1 = strlen(func1_name); | |
208 | len2 = strlen(func2_name); | |
209 | ||
210 | /* | |
211 | * Probe 1 will trace function 1. | |
212 | * Probe 2 will trace function 2. | |
213 | * Probe 3 will trace functions 1 and 2. | |
214 | */ | |
215 | ftrace_set_filter(&test_probe1, func1_name, len1, 1); | |
216 | ftrace_set_filter(&test_probe2, func2_name, len2, 1); | |
217 | ftrace_set_filter(&test_probe3, func1_name, len1, 1); | |
218 | ftrace_set_filter(&test_probe3, func2_name, len2, 0); | |
219 | ||
220 | register_ftrace_function(&test_probe1); | |
221 | register_ftrace_function(&test_probe2); | |
222 | register_ftrace_function(&test_probe3); | |
223 | register_ftrace_function(&test_global); | |
224 | ||
225 | DYN_FTRACE_TEST_NAME(); | |
226 | ||
227 | print_counts(); | |
228 | ||
229 | if (trace_selftest_test_probe1_cnt != 1) | |
230 | goto out; | |
231 | if (trace_selftest_test_probe2_cnt != 0) | |
232 | goto out; | |
233 | if (trace_selftest_test_probe3_cnt != 1) | |
234 | goto out; | |
235 | if (trace_selftest_test_global_cnt == 0) | |
236 | goto out; | |
237 | ||
238 | DYN_FTRACE_TEST_NAME2(); | |
239 | ||
240 | print_counts(); | |
241 | ||
242 | if (trace_selftest_test_probe1_cnt != 1) | |
243 | goto out; | |
244 | if (trace_selftest_test_probe2_cnt != 1) | |
245 | goto out; | |
246 | if (trace_selftest_test_probe3_cnt != 2) | |
247 | goto out; | |
248 | ||
249 | /* Add a dynamic probe */ | |
250 | dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL); | |
251 | if (!dyn_ops) { | |
252 | printk("MEMORY ERROR "); | |
253 | goto out; | |
254 | } | |
255 | ||
256 | dyn_ops->func = trace_selftest_test_dyn_func; | |
257 | ||
258 | register_ftrace_function(dyn_ops); | |
259 | ||
260 | trace_selftest_test_global_cnt = 0; | |
261 | ||
262 | DYN_FTRACE_TEST_NAME(); | |
263 | ||
264 | print_counts(); | |
265 | ||
266 | if (trace_selftest_test_probe1_cnt != 2) | |
267 | goto out_free; | |
268 | if (trace_selftest_test_probe2_cnt != 1) | |
269 | goto out_free; | |
270 | if (trace_selftest_test_probe3_cnt != 3) | |
271 | goto out_free; | |
272 | if (trace_selftest_test_global_cnt == 0) | |
273 | goto out; | |
274 | if (trace_selftest_test_dyn_cnt == 0) | |
275 | goto out_free; | |
276 | ||
277 | DYN_FTRACE_TEST_NAME2(); | |
278 | ||
279 | print_counts(); | |
280 | ||
281 | if (trace_selftest_test_probe1_cnt != 2) | |
282 | goto out_free; | |
283 | if (trace_selftest_test_probe2_cnt != 2) | |
284 | goto out_free; | |
285 | if (trace_selftest_test_probe3_cnt != 4) | |
286 | goto out_free; | |
287 | ||
288 | ret = 0; | |
289 | out_free: | |
290 | unregister_ftrace_function(dyn_ops); | |
291 | kfree(dyn_ops); | |
292 | ||
293 | out: | |
294 | /* Purposely unregister in the same order */ | |
295 | unregister_ftrace_function(&test_probe1); | |
296 | unregister_ftrace_function(&test_probe2); | |
297 | unregister_ftrace_function(&test_probe3); | |
298 | unregister_ftrace_function(&test_global); | |
299 | ||
300 | /* Make sure everything is off */ | |
301 | reset_counts(); | |
302 | DYN_FTRACE_TEST_NAME(); | |
303 | DYN_FTRACE_TEST_NAME(); | |
304 | ||
305 | if (trace_selftest_test_probe1_cnt || | |
306 | trace_selftest_test_probe2_cnt || | |
307 | trace_selftest_test_probe3_cnt || | |
308 | trace_selftest_test_global_cnt || | |
309 | trace_selftest_test_dyn_cnt) | |
310 | ret = -1; | |
311 | ||
312 | ftrace_enabled = save_ftrace_enabled; | |
313 | ||
314 | return ret; | |
315 | } | |
316 | ||
77a2b37d SR |
317 | /* Test dynamic code modification and ftrace filters */ |
318 | int trace_selftest_startup_dynamic_tracing(struct tracer *trace, | |
319 | struct trace_array *tr, | |
320 | int (*func)(void)) | |
321 | { | |
77a2b37d SR |
322 | int save_ftrace_enabled = ftrace_enabled; |
323 | int save_tracer_enabled = tracer_enabled; | |
dd0e545f | 324 | unsigned long count; |
4e491d14 | 325 | char *func_name; |
dd0e545f | 326 | int ret; |
77a2b37d SR |
327 | |
328 | /* The ftrace test PASSED */ | |
329 | printk(KERN_CONT "PASSED\n"); | |
330 | pr_info("Testing dynamic ftrace: "); | |
331 | ||
332 | /* enable tracing, and record the filter function */ | |
333 | ftrace_enabled = 1; | |
334 | tracer_enabled = 1; | |
335 | ||
336 | /* passed in by parameter to fool gcc from optimizing */ | |
337 | func(); | |
338 | ||
4e491d14 | 339 | /* |
73d8b8bc | 340 | * Some archs *cough*PowerPC*cough* add characters to the |
4e491d14 | 341 | * start of the function names. We simply put a '*' to |
73d8b8bc | 342 | * accommodate them. |
4e491d14 | 343 | */ |
9cc26a26 | 344 | func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); |
4e491d14 | 345 | |
77a2b37d | 346 | /* filter only on our function */ |
936e074b | 347 | ftrace_set_global_filter(func_name, strlen(func_name), 1); |
77a2b37d SR |
348 | |
349 | /* enable tracing */ | |
b6f11df2 | 350 | ret = tracer_init(trace, tr); |
1c80025a FW |
351 | if (ret) { |
352 | warn_failed_init_tracer(trace, ret); | |
353 | goto out; | |
354 | } | |
dd0e545f | 355 | |
77a2b37d SR |
356 | /* Sleep for a 1/10 of a second */ |
357 | msleep(100); | |
358 | ||
359 | /* we should have nothing in the buffer */ | |
360 | ret = trace_test_buffer(tr, &count); | |
361 | if (ret) | |
362 | goto out; | |
363 | ||
364 | if (count) { | |
365 | ret = -1; | |
366 | printk(KERN_CONT ".. filter did not filter .. "); | |
367 | goto out; | |
368 | } | |
369 | ||
370 | /* call our function again */ | |
371 | func(); | |
372 | ||
373 | /* sleep again */ | |
374 | msleep(100); | |
375 | ||
376 | /* stop the tracing. */ | |
bbf5b1a0 | 377 | tracing_stop(); |
77a2b37d SR |
378 | ftrace_enabled = 0; |
379 | ||
380 | /* check the trace buffer */ | |
381 | ret = trace_test_buffer(tr, &count); | |
bbf5b1a0 | 382 | tracing_start(); |
77a2b37d SR |
383 | |
384 | /* we should only have one item */ | |
385 | if (!ret && count != 1) { | |
95950c2e | 386 | trace->reset(tr); |
06fa75ab | 387 | printk(KERN_CONT ".. filter failed count=%ld ..", count); |
77a2b37d SR |
388 | ret = -1; |
389 | goto out; | |
390 | } | |
bbf5b1a0 | 391 | |
95950c2e SR |
392 | /* Test the ops with global tracing running */ |
393 | ret = trace_selftest_ops(1); | |
394 | trace->reset(tr); | |
395 | ||
77a2b37d SR |
396 | out: |
397 | ftrace_enabled = save_ftrace_enabled; | |
398 | tracer_enabled = save_tracer_enabled; | |
399 | ||
400 | /* Enable tracing on all functions again */ | |
936e074b | 401 | ftrace_set_global_filter(NULL, 0, 1); |
77a2b37d | 402 | |
95950c2e SR |
403 | /* Test the ops with global tracing off */ |
404 | if (!ret) | |
405 | ret = trace_selftest_ops(2); | |
406 | ||
77a2b37d SR |
407 | return ret; |
408 | } | |
ea701f11 SR |
409 | |
410 | static int trace_selftest_recursion_cnt; | |
411 | static void trace_selftest_test_recursion_func(unsigned long ip, | |
412 | unsigned long pip, | |
413 | struct ftrace_ops *op, | |
414 | struct pt_regs *pt_regs) | |
415 | { | |
416 | /* | |
417 | * This function is registered without the recursion safe flag. | |
418 | * The ftrace infrastructure should provide the recursion | |
419 | * protection. If not, this will crash the kernel! | |
420 | */ | |
421 | trace_selftest_recursion_cnt++; | |
422 | DYN_FTRACE_TEST_NAME(); | |
423 | } | |
424 | ||
425 | static void trace_selftest_test_recursion_safe_func(unsigned long ip, | |
426 | unsigned long pip, | |
427 | struct ftrace_ops *op, | |
428 | struct pt_regs *pt_regs) | |
429 | { | |
430 | /* | |
431 | * We said we would provide our own recursion. By calling | |
432 | * this function again, we should recurse back into this function | |
433 | * and count again. But this only happens if the arch supports | |
434 | * all of ftrace features and nothing else is using the function | |
435 | * tracing utility. | |
436 | */ | |
437 | if (trace_selftest_recursion_cnt++) | |
438 | return; | |
439 | DYN_FTRACE_TEST_NAME(); | |
440 | } | |
441 | ||
442 | static struct ftrace_ops test_rec_probe = { | |
443 | .func = trace_selftest_test_recursion_func, | |
444 | }; | |
445 | ||
446 | static struct ftrace_ops test_recsafe_probe = { | |
447 | .func = trace_selftest_test_recursion_safe_func, | |
448 | .flags = FTRACE_OPS_FL_RECURSION_SAFE, | |
449 | }; | |
450 | ||
451 | static int | |
452 | trace_selftest_function_recursion(void) | |
453 | { | |
454 | int save_ftrace_enabled = ftrace_enabled; | |
455 | int save_tracer_enabled = tracer_enabled; | |
456 | char *func_name; | |
457 | int len; | |
458 | int ret; | |
459 | int cnt; | |
460 | ||
461 | /* The previous test PASSED */ | |
462 | pr_cont("PASSED\n"); | |
463 | pr_info("Testing ftrace recursion: "); | |
464 | ||
465 | ||
466 | /* enable tracing, and record the filter function */ | |
467 | ftrace_enabled = 1; | |
468 | tracer_enabled = 1; | |
469 | ||
470 | /* Handle PPC64 '.' name */ | |
471 | func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); | |
472 | len = strlen(func_name); | |
473 | ||
474 | ret = ftrace_set_filter(&test_rec_probe, func_name, len, 1); | |
475 | if (ret) { | |
476 | pr_cont("*Could not set filter* "); | |
477 | goto out; | |
478 | } | |
479 | ||
480 | ret = register_ftrace_function(&test_rec_probe); | |
481 | if (ret) { | |
482 | pr_cont("*could not register callback* "); | |
483 | goto out; | |
484 | } | |
485 | ||
486 | DYN_FTRACE_TEST_NAME(); | |
487 | ||
488 | unregister_ftrace_function(&test_rec_probe); | |
489 | ||
490 | ret = -1; | |
491 | if (trace_selftest_recursion_cnt != 1) { | |
492 | pr_cont("*callback not called once (%d)* ", | |
493 | trace_selftest_recursion_cnt); | |
494 | goto out; | |
495 | } | |
496 | ||
497 | trace_selftest_recursion_cnt = 1; | |
498 | ||
499 | pr_cont("PASSED\n"); | |
500 | pr_info("Testing ftrace recursion safe: "); | |
501 | ||
502 | ret = ftrace_set_filter(&test_recsafe_probe, func_name, len, 1); | |
503 | if (ret) { | |
504 | pr_cont("*Could not set filter* "); | |
505 | goto out; | |
506 | } | |
507 | ||
508 | ret = register_ftrace_function(&test_recsafe_probe); | |
509 | if (ret) { | |
510 | pr_cont("*could not register callback* "); | |
511 | goto out; | |
512 | } | |
513 | ||
514 | DYN_FTRACE_TEST_NAME(); | |
515 | ||
516 | unregister_ftrace_function(&test_recsafe_probe); | |
517 | ||
518 | /* | |
519 | * If arch supports all ftrace features, and no other task | |
520 | * was on the list, we should be fine. | |
521 | */ | |
522 | if (!ftrace_nr_registered_ops() && !FTRACE_FORCE_LIST_FUNC) | |
523 | cnt = 2; /* Should have recursed */ | |
524 | else | |
525 | cnt = 1; | |
526 | ||
527 | ret = -1; | |
528 | if (trace_selftest_recursion_cnt != cnt) { | |
529 | pr_cont("*callback not called expected %d times (%d)* ", | |
530 | cnt, trace_selftest_recursion_cnt); | |
531 | goto out; | |
532 | } | |
533 | ||
534 | ret = 0; | |
535 | out: | |
536 | ftrace_enabled = save_ftrace_enabled; | |
537 | tracer_enabled = save_tracer_enabled; | |
538 | ||
539 | return ret; | |
540 | } | |
77a2b37d SR |
541 | #else |
542 | # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; }) | |
ea701f11 | 543 | # define trace_selftest_function_recursion() ({ 0; }) |
77a2b37d | 544 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
e9a22d1f | 545 | |
ad97772a SR |
546 | static enum { |
547 | TRACE_SELFTEST_REGS_START, | |
548 | TRACE_SELFTEST_REGS_FOUND, | |
549 | TRACE_SELFTEST_REGS_NOT_FOUND, | |
550 | } trace_selftest_regs_stat; | |
551 | ||
552 | static void trace_selftest_test_regs_func(unsigned long ip, | |
553 | unsigned long pip, | |
554 | struct ftrace_ops *op, | |
555 | struct pt_regs *pt_regs) | |
556 | { | |
557 | if (pt_regs) | |
558 | trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND; | |
559 | else | |
560 | trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND; | |
561 | } | |
562 | ||
563 | static struct ftrace_ops test_regs_probe = { | |
564 | .func = trace_selftest_test_regs_func, | |
565 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_SAVE_REGS, | |
566 | }; | |
567 | ||
568 | static int | |
569 | trace_selftest_function_regs(void) | |
570 | { | |
571 | int save_ftrace_enabled = ftrace_enabled; | |
572 | int save_tracer_enabled = tracer_enabled; | |
573 | char *func_name; | |
574 | int len; | |
575 | int ret; | |
576 | int supported = 0; | |
577 | ||
578 | #ifdef ARCH_SUPPORTS_FTRACE_SAVE_REGS | |
579 | supported = 1; | |
580 | #endif | |
581 | ||
582 | /* The previous test PASSED */ | |
583 | pr_cont("PASSED\n"); | |
584 | pr_info("Testing ftrace regs%s: ", | |
585 | !supported ? "(no arch support)" : ""); | |
586 | ||
587 | /* enable tracing, and record the filter function */ | |
588 | ftrace_enabled = 1; | |
589 | tracer_enabled = 1; | |
590 | ||
591 | /* Handle PPC64 '.' name */ | |
592 | func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); | |
593 | len = strlen(func_name); | |
594 | ||
595 | ret = ftrace_set_filter(&test_regs_probe, func_name, len, 1); | |
596 | /* | |
597 | * If DYNAMIC_FTRACE is not set, then we just trace all functions. | |
598 | * This test really doesn't care. | |
599 | */ | |
600 | if (ret && ret != -ENODEV) { | |
601 | pr_cont("*Could not set filter* "); | |
602 | goto out; | |
603 | } | |
604 | ||
605 | ret = register_ftrace_function(&test_regs_probe); | |
606 | /* | |
607 | * Now if the arch does not support passing regs, then this should | |
608 | * have failed. | |
609 | */ | |
610 | if (!supported) { | |
611 | if (!ret) { | |
612 | pr_cont("*registered save-regs without arch support* "); | |
613 | goto out; | |
614 | } | |
615 | test_regs_probe.flags |= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED; | |
616 | ret = register_ftrace_function(&test_regs_probe); | |
617 | } | |
618 | if (ret) { | |
619 | pr_cont("*could not register callback* "); | |
620 | goto out; | |
621 | } | |
622 | ||
623 | ||
624 | DYN_FTRACE_TEST_NAME(); | |
625 | ||
626 | unregister_ftrace_function(&test_regs_probe); | |
627 | ||
628 | ret = -1; | |
629 | ||
630 | switch (trace_selftest_regs_stat) { | |
631 | case TRACE_SELFTEST_REGS_START: | |
632 | pr_cont("*callback never called* "); | |
633 | goto out; | |
634 | ||
635 | case TRACE_SELFTEST_REGS_FOUND: | |
636 | if (supported) | |
637 | break; | |
638 | pr_cont("*callback received regs without arch support* "); | |
639 | goto out; | |
640 | ||
641 | case TRACE_SELFTEST_REGS_NOT_FOUND: | |
642 | if (!supported) | |
643 | break; | |
644 | pr_cont("*callback received NULL regs* "); | |
645 | goto out; | |
646 | } | |
647 | ||
648 | ret = 0; | |
649 | out: | |
650 | ftrace_enabled = save_ftrace_enabled; | |
651 | tracer_enabled = save_tracer_enabled; | |
652 | ||
653 | return ret; | |
654 | } | |
655 | ||
60a11774 SR |
656 | /* |
657 | * Simple verification test of ftrace function tracer. | |
658 | * Enable ftrace, sleep 1/10 second, and then read the trace | |
659 | * buffer to see if all is in order. | |
660 | */ | |
661 | int | |
662 | trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) | |
663 | { | |
77a2b37d SR |
664 | int save_ftrace_enabled = ftrace_enabled; |
665 | int save_tracer_enabled = tracer_enabled; | |
dd0e545f SR |
666 | unsigned long count; |
667 | int ret; | |
60a11774 | 668 | |
77a2b37d SR |
669 | /* make sure msleep has been recorded */ |
670 | msleep(1); | |
671 | ||
60a11774 | 672 | /* start the tracing */ |
c7aafc54 | 673 | ftrace_enabled = 1; |
77a2b37d | 674 | tracer_enabled = 1; |
c7aafc54 | 675 | |
b6f11df2 | 676 | ret = tracer_init(trace, tr); |
1c80025a FW |
677 | if (ret) { |
678 | warn_failed_init_tracer(trace, ret); | |
679 | goto out; | |
680 | } | |
681 | ||
60a11774 SR |
682 | /* Sleep for a 1/10 of a second */ |
683 | msleep(100); | |
684 | /* stop the tracing. */ | |
bbf5b1a0 | 685 | tracing_stop(); |
c7aafc54 IM |
686 | ftrace_enabled = 0; |
687 | ||
60a11774 SR |
688 | /* check the trace buffer */ |
689 | ret = trace_test_buffer(tr, &count); | |
690 | trace->reset(tr); | |
bbf5b1a0 | 691 | tracing_start(); |
60a11774 SR |
692 | |
693 | if (!ret && !count) { | |
694 | printk(KERN_CONT ".. no entries found .."); | |
695 | ret = -1; | |
77a2b37d | 696 | goto out; |
60a11774 SR |
697 | } |
698 | ||
77a2b37d SR |
699 | ret = trace_selftest_startup_dynamic_tracing(trace, tr, |
700 | DYN_FTRACE_TEST_NAME); | |
ea701f11 SR |
701 | if (ret) |
702 | goto out; | |
77a2b37d | 703 | |
ea701f11 | 704 | ret = trace_selftest_function_recursion(); |
ad97772a SR |
705 | if (ret) |
706 | goto out; | |
707 | ||
708 | ret = trace_selftest_function_regs(); | |
77a2b37d SR |
709 | out: |
710 | ftrace_enabled = save_ftrace_enabled; | |
711 | tracer_enabled = save_tracer_enabled; | |
712 | ||
4eebcc81 SR |
713 | /* kill ftrace totally if we failed */ |
714 | if (ret) | |
715 | ftrace_kill(); | |
716 | ||
60a11774 SR |
717 | return ret; |
718 | } | |
606576ce | 719 | #endif /* CONFIG_FUNCTION_TRACER */ |
60a11774 | 720 | |
7447dce9 FW |
721 | |
722 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | |
cf586b61 FW |
723 | |
724 | /* Maximum number of functions to trace before diagnosing a hang */ | |
725 | #define GRAPH_MAX_FUNC_TEST 100000000 | |
726 | ||
cecbca96 FW |
727 | static void |
728 | __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode); | |
cf586b61 FW |
729 | static unsigned int graph_hang_thresh; |
730 | ||
731 | /* Wrap the real function entry probe to avoid possible hanging */ | |
732 | static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace) | |
733 | { | |
734 | /* This is harmlessly racy, we want to approximately detect a hang */ | |
735 | if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) { | |
736 | ftrace_graph_stop(); | |
737 | printk(KERN_WARNING "BUG: Function graph tracer hang!\n"); | |
738 | if (ftrace_dump_on_oops) | |
cecbca96 | 739 | __ftrace_dump(false, DUMP_ALL); |
cf586b61 FW |
740 | return 0; |
741 | } | |
742 | ||
743 | return trace_graph_entry(trace); | |
744 | } | |
745 | ||
7447dce9 FW |
746 | /* |
747 | * Pretty much the same than for the function tracer from which the selftest | |
748 | * has been borrowed. | |
749 | */ | |
750 | int | |
751 | trace_selftest_startup_function_graph(struct tracer *trace, | |
752 | struct trace_array *tr) | |
753 | { | |
754 | int ret; | |
755 | unsigned long count; | |
756 | ||
cf586b61 FW |
757 | /* |
758 | * Simulate the init() callback but we attach a watchdog callback | |
759 | * to detect and recover from possible hangs | |
760 | */ | |
761 | tracing_reset_online_cpus(tr); | |
1a0799a8 | 762 | set_graph_array(tr); |
cf586b61 FW |
763 | ret = register_ftrace_graph(&trace_graph_return, |
764 | &trace_graph_entry_watchdog); | |
7447dce9 FW |
765 | if (ret) { |
766 | warn_failed_init_tracer(trace, ret); | |
767 | goto out; | |
768 | } | |
cf586b61 | 769 | tracing_start_cmdline_record(); |
7447dce9 FW |
770 | |
771 | /* Sleep for a 1/10 of a second */ | |
772 | msleep(100); | |
773 | ||
cf586b61 FW |
774 | /* Have we just recovered from a hang? */ |
775 | if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) { | |
0cf53ff6 | 776 | tracing_selftest_disabled = true; |
cf586b61 FW |
777 | ret = -1; |
778 | goto out; | |
779 | } | |
780 | ||
7447dce9 FW |
781 | tracing_stop(); |
782 | ||
783 | /* check the trace buffer */ | |
784 | ret = trace_test_buffer(tr, &count); | |
785 | ||
786 | trace->reset(tr); | |
787 | tracing_start(); | |
788 | ||
789 | if (!ret && !count) { | |
790 | printk(KERN_CONT ".. no entries found .."); | |
791 | ret = -1; | |
792 | goto out; | |
793 | } | |
794 | ||
795 | /* Don't test dynamic tracing, the function tracer already did */ | |
796 | ||
797 | out: | |
798 | /* Stop it if we failed */ | |
799 | if (ret) | |
800 | ftrace_graph_stop(); | |
801 | ||
802 | return ret; | |
803 | } | |
804 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | |
805 | ||
806 | ||
60a11774 SR |
807 | #ifdef CONFIG_IRQSOFF_TRACER |
808 | int | |
809 | trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) | |
810 | { | |
811 | unsigned long save_max = tracing_max_latency; | |
812 | unsigned long count; | |
813 | int ret; | |
814 | ||
815 | /* start the tracing */ | |
b6f11df2 | 816 | ret = tracer_init(trace, tr); |
1c80025a FW |
817 | if (ret) { |
818 | warn_failed_init_tracer(trace, ret); | |
819 | return ret; | |
820 | } | |
821 | ||
60a11774 SR |
822 | /* reset the max latency */ |
823 | tracing_max_latency = 0; | |
824 | /* disable interrupts for a bit */ | |
825 | local_irq_disable(); | |
826 | udelay(100); | |
827 | local_irq_enable(); | |
49036200 FW |
828 | |
829 | /* | |
830 | * Stop the tracer to avoid a warning subsequent | |
831 | * to buffer flipping failure because tracing_stop() | |
832 | * disables the tr and max buffers, making flipping impossible | |
833 | * in case of parallels max irqs off latencies. | |
834 | */ | |
835 | trace->stop(tr); | |
60a11774 | 836 | /* stop the tracing. */ |
bbf5b1a0 | 837 | tracing_stop(); |
60a11774 SR |
838 | /* check both trace buffers */ |
839 | ret = trace_test_buffer(tr, NULL); | |
840 | if (!ret) | |
841 | ret = trace_test_buffer(&max_tr, &count); | |
842 | trace->reset(tr); | |
bbf5b1a0 | 843 | tracing_start(); |
60a11774 SR |
844 | |
845 | if (!ret && !count) { | |
846 | printk(KERN_CONT ".. no entries found .."); | |
847 | ret = -1; | |
848 | } | |
849 | ||
850 | tracing_max_latency = save_max; | |
851 | ||
852 | return ret; | |
853 | } | |
854 | #endif /* CONFIG_IRQSOFF_TRACER */ | |
855 | ||
856 | #ifdef CONFIG_PREEMPT_TRACER | |
857 | int | |
858 | trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) | |
859 | { | |
860 | unsigned long save_max = tracing_max_latency; | |
861 | unsigned long count; | |
862 | int ret; | |
863 | ||
769c48eb SR |
864 | /* |
865 | * Now that the big kernel lock is no longer preemptable, | |
866 | * and this is called with the BKL held, it will always | |
867 | * fail. If preemption is already disabled, simply | |
868 | * pass the test. When the BKL is removed, or becomes | |
869 | * preemptible again, we will once again test this, | |
870 | * so keep it in. | |
871 | */ | |
872 | if (preempt_count()) { | |
873 | printk(KERN_CONT "can not test ... force "); | |
874 | return 0; | |
875 | } | |
876 | ||
60a11774 | 877 | /* start the tracing */ |
b6f11df2 | 878 | ret = tracer_init(trace, tr); |
1c80025a FW |
879 | if (ret) { |
880 | warn_failed_init_tracer(trace, ret); | |
881 | return ret; | |
882 | } | |
883 | ||
60a11774 SR |
884 | /* reset the max latency */ |
885 | tracing_max_latency = 0; | |
886 | /* disable preemption for a bit */ | |
887 | preempt_disable(); | |
888 | udelay(100); | |
889 | preempt_enable(); | |
49036200 FW |
890 | |
891 | /* | |
892 | * Stop the tracer to avoid a warning subsequent | |
893 | * to buffer flipping failure because tracing_stop() | |
894 | * disables the tr and max buffers, making flipping impossible | |
895 | * in case of parallels max preempt off latencies. | |
896 | */ | |
897 | trace->stop(tr); | |
60a11774 | 898 | /* stop the tracing. */ |
bbf5b1a0 | 899 | tracing_stop(); |
60a11774 SR |
900 | /* check both trace buffers */ |
901 | ret = trace_test_buffer(tr, NULL); | |
902 | if (!ret) | |
903 | ret = trace_test_buffer(&max_tr, &count); | |
904 | trace->reset(tr); | |
bbf5b1a0 | 905 | tracing_start(); |
60a11774 SR |
906 | |
907 | if (!ret && !count) { | |
908 | printk(KERN_CONT ".. no entries found .."); | |
909 | ret = -1; | |
910 | } | |
911 | ||
912 | tracing_max_latency = save_max; | |
913 | ||
914 | return ret; | |
915 | } | |
916 | #endif /* CONFIG_PREEMPT_TRACER */ | |
917 | ||
918 | #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER) | |
919 | int | |
920 | trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr) | |
921 | { | |
922 | unsigned long save_max = tracing_max_latency; | |
923 | unsigned long count; | |
924 | int ret; | |
925 | ||
769c48eb SR |
926 | /* |
927 | * Now that the big kernel lock is no longer preemptable, | |
928 | * and this is called with the BKL held, it will always | |
929 | * fail. If preemption is already disabled, simply | |
930 | * pass the test. When the BKL is removed, or becomes | |
931 | * preemptible again, we will once again test this, | |
932 | * so keep it in. | |
933 | */ | |
934 | if (preempt_count()) { | |
935 | printk(KERN_CONT "can not test ... force "); | |
936 | return 0; | |
937 | } | |
938 | ||
60a11774 | 939 | /* start the tracing */ |
b6f11df2 | 940 | ret = tracer_init(trace, tr); |
1c80025a FW |
941 | if (ret) { |
942 | warn_failed_init_tracer(trace, ret); | |
ac1d52d0 | 943 | goto out_no_start; |
1c80025a | 944 | } |
60a11774 SR |
945 | |
946 | /* reset the max latency */ | |
947 | tracing_max_latency = 0; | |
948 | ||
949 | /* disable preemption and interrupts for a bit */ | |
950 | preempt_disable(); | |
951 | local_irq_disable(); | |
952 | udelay(100); | |
953 | preempt_enable(); | |
954 | /* reverse the order of preempt vs irqs */ | |
955 | local_irq_enable(); | |
956 | ||
49036200 FW |
957 | /* |
958 | * Stop the tracer to avoid a warning subsequent | |
959 | * to buffer flipping failure because tracing_stop() | |
960 | * disables the tr and max buffers, making flipping impossible | |
961 | * in case of parallels max irqs/preempt off latencies. | |
962 | */ | |
963 | trace->stop(tr); | |
60a11774 | 964 | /* stop the tracing. */ |
bbf5b1a0 | 965 | tracing_stop(); |
60a11774 SR |
966 | /* check both trace buffers */ |
967 | ret = trace_test_buffer(tr, NULL); | |
ac1d52d0 | 968 | if (ret) |
60a11774 SR |
969 | goto out; |
970 | ||
971 | ret = trace_test_buffer(&max_tr, &count); | |
ac1d52d0 | 972 | if (ret) |
60a11774 SR |
973 | goto out; |
974 | ||
975 | if (!ret && !count) { | |
976 | printk(KERN_CONT ".. no entries found .."); | |
977 | ret = -1; | |
978 | goto out; | |
979 | } | |
980 | ||
981 | /* do the test by disabling interrupts first this time */ | |
982 | tracing_max_latency = 0; | |
bbf5b1a0 | 983 | tracing_start(); |
49036200 FW |
984 | trace->start(tr); |
985 | ||
60a11774 SR |
986 | preempt_disable(); |
987 | local_irq_disable(); | |
988 | udelay(100); | |
989 | preempt_enable(); | |
990 | /* reverse the order of preempt vs irqs */ | |
991 | local_irq_enable(); | |
992 | ||
49036200 | 993 | trace->stop(tr); |
60a11774 | 994 | /* stop the tracing. */ |
bbf5b1a0 | 995 | tracing_stop(); |
60a11774 SR |
996 | /* check both trace buffers */ |
997 | ret = trace_test_buffer(tr, NULL); | |
998 | if (ret) | |
999 | goto out; | |
1000 | ||
1001 | ret = trace_test_buffer(&max_tr, &count); | |
1002 | ||
1003 | if (!ret && !count) { | |
1004 | printk(KERN_CONT ".. no entries found .."); | |
1005 | ret = -1; | |
1006 | goto out; | |
1007 | } | |
1008 | ||
ac1d52d0 | 1009 | out: |
bbf5b1a0 | 1010 | tracing_start(); |
ac1d52d0 FW |
1011 | out_no_start: |
1012 | trace->reset(tr); | |
60a11774 SR |
1013 | tracing_max_latency = save_max; |
1014 | ||
1015 | return ret; | |
1016 | } | |
1017 | #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */ | |
1018 | ||
fb1b6d8b SN |
1019 | #ifdef CONFIG_NOP_TRACER |
1020 | int | |
1021 | trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr) | |
1022 | { | |
1023 | /* What could possibly go wrong? */ | |
1024 | return 0; | |
1025 | } | |
1026 | #endif | |
1027 | ||
60a11774 SR |
1028 | #ifdef CONFIG_SCHED_TRACER |
1029 | static int trace_wakeup_test_thread(void *data) | |
1030 | { | |
60a11774 | 1031 | /* Make this a RT thread, doesn't need to be too high */ |
c9b5f501 | 1032 | static const struct sched_param param = { .sched_priority = 5 }; |
05bd68c5 | 1033 | struct completion *x = data; |
60a11774 | 1034 | |
05bd68c5 | 1035 | sched_setscheduler(current, SCHED_FIFO, ¶m); |
60a11774 SR |
1036 | |
1037 | /* Make it know we have a new prio */ | |
1038 | complete(x); | |
1039 | ||
1040 | /* now go to sleep and let the test wake us up */ | |
1041 | set_current_state(TASK_INTERRUPTIBLE); | |
1042 | schedule(); | |
1043 | ||
3c18c10b SR |
1044 | complete(x); |
1045 | ||
60a11774 SR |
1046 | /* we are awake, now wait to disappear */ |
1047 | while (!kthread_should_stop()) { | |
1048 | /* | |
1049 | * This is an RT task, do short sleeps to let | |
1050 | * others run. | |
1051 | */ | |
1052 | msleep(100); | |
1053 | } | |
1054 | ||
1055 | return 0; | |
1056 | } | |
1057 | ||
1058 | int | |
1059 | trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) | |
1060 | { | |
1061 | unsigned long save_max = tracing_max_latency; | |
1062 | struct task_struct *p; | |
1063 | struct completion isrt; | |
1064 | unsigned long count; | |
1065 | int ret; | |
1066 | ||
1067 | init_completion(&isrt); | |
1068 | ||
1069 | /* create a high prio thread */ | |
1070 | p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test"); | |
c7aafc54 | 1071 | if (IS_ERR(p)) { |
60a11774 SR |
1072 | printk(KERN_CONT "Failed to create ftrace wakeup test thread "); |
1073 | return -1; | |
1074 | } | |
1075 | ||
1076 | /* make sure the thread is running at an RT prio */ | |
1077 | wait_for_completion(&isrt); | |
1078 | ||
1079 | /* start the tracing */ | |
b6f11df2 | 1080 | ret = tracer_init(trace, tr); |
1c80025a FW |
1081 | if (ret) { |
1082 | warn_failed_init_tracer(trace, ret); | |
1083 | return ret; | |
1084 | } | |
1085 | ||
60a11774 SR |
1086 | /* reset the max latency */ |
1087 | tracing_max_latency = 0; | |
1088 | ||
3c18c10b SR |
1089 | while (p->on_rq) { |
1090 | /* | |
1091 | * Sleep to make sure the RT thread is asleep too. | |
1092 | * On virtual machines we can't rely on timings, | |
1093 | * but we want to make sure this test still works. | |
1094 | */ | |
1095 | msleep(100); | |
1096 | } | |
60a11774 | 1097 | |
3c18c10b | 1098 | init_completion(&isrt); |
60a11774 SR |
1099 | |
1100 | wake_up_process(p); | |
1101 | ||
3c18c10b SR |
1102 | /* Wait for the task to wake up */ |
1103 | wait_for_completion(&isrt); | |
5aa60c60 | 1104 | |
60a11774 | 1105 | /* stop the tracing. */ |
bbf5b1a0 | 1106 | tracing_stop(); |
60a11774 SR |
1107 | /* check both trace buffers */ |
1108 | ret = trace_test_buffer(tr, NULL); | |
1109 | if (!ret) | |
1110 | ret = trace_test_buffer(&max_tr, &count); | |
1111 | ||
1112 | ||
1113 | trace->reset(tr); | |
bbf5b1a0 | 1114 | tracing_start(); |
60a11774 SR |
1115 | |
1116 | tracing_max_latency = save_max; | |
1117 | ||
1118 | /* kill the thread */ | |
1119 | kthread_stop(p); | |
1120 | ||
1121 | if (!ret && !count) { | |
1122 | printk(KERN_CONT ".. no entries found .."); | |
1123 | ret = -1; | |
1124 | } | |
1125 | ||
1126 | return ret; | |
1127 | } | |
1128 | #endif /* CONFIG_SCHED_TRACER */ | |
1129 | ||
1130 | #ifdef CONFIG_CONTEXT_SWITCH_TRACER | |
1131 | int | |
1132 | trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr) | |
1133 | { | |
1134 | unsigned long count; | |
1135 | int ret; | |
1136 | ||
1137 | /* start the tracing */ | |
b6f11df2 | 1138 | ret = tracer_init(trace, tr); |
1c80025a FW |
1139 | if (ret) { |
1140 | warn_failed_init_tracer(trace, ret); | |
1141 | return ret; | |
1142 | } | |
1143 | ||
60a11774 SR |
1144 | /* Sleep for a 1/10 of a second */ |
1145 | msleep(100); | |
1146 | /* stop the tracing. */ | |
bbf5b1a0 | 1147 | tracing_stop(); |
60a11774 SR |
1148 | /* check the trace buffer */ |
1149 | ret = trace_test_buffer(tr, &count); | |
1150 | trace->reset(tr); | |
bbf5b1a0 | 1151 | tracing_start(); |
60a11774 SR |
1152 | |
1153 | if (!ret && !count) { | |
1154 | printk(KERN_CONT ".. no entries found .."); | |
1155 | ret = -1; | |
1156 | } | |
1157 | ||
1158 | return ret; | |
1159 | } | |
1160 | #endif /* CONFIG_CONTEXT_SWITCH_TRACER */ | |
a6dd24f8 | 1161 | |
80e5ea45 SR |
1162 | #ifdef CONFIG_BRANCH_TRACER |
1163 | int | |
1164 | trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) | |
1165 | { | |
1166 | unsigned long count; | |
1167 | int ret; | |
1168 | ||
1169 | /* start the tracing */ | |
b6f11df2 | 1170 | ret = tracer_init(trace, tr); |
1c80025a FW |
1171 | if (ret) { |
1172 | warn_failed_init_tracer(trace, ret); | |
1173 | return ret; | |
1174 | } | |
1175 | ||
80e5ea45 SR |
1176 | /* Sleep for a 1/10 of a second */ |
1177 | msleep(100); | |
1178 | /* stop the tracing. */ | |
1179 | tracing_stop(); | |
1180 | /* check the trace buffer */ | |
1181 | ret = trace_test_buffer(tr, &count); | |
1182 | trace->reset(tr); | |
1183 | tracing_start(); | |
1184 | ||
d2ef7c2f WH |
1185 | if (!ret && !count) { |
1186 | printk(KERN_CONT ".. no entries found .."); | |
1187 | ret = -1; | |
1188 | } | |
1189 | ||
80e5ea45 SR |
1190 | return ret; |
1191 | } | |
1192 | #endif /* CONFIG_BRANCH_TRACER */ | |
321bb5e1 | 1193 |