2 * ring buffer tester and benchmark
4 * Copyright (C) 2009 Steven Rostedt <srostedt@redhat.com>
6 #include <linux/ring_buffer.h>
7 #include <linux/completion.h>
8 #include <linux/kthread.h>
9 #include <linux/module.h>
10 #include <linux/time.h>
18 /* run time and sleep time in seconds */
22 /* number of events for writer to wake up the reader */
23 static int wakeup_interval
= 100;
25 static int reader_finish
;
26 static struct completion read_start
;
27 static struct completion read_done
;
29 static struct ring_buffer
*buffer
;
30 static struct task_struct
*producer
;
31 static struct task_struct
*consumer
;
32 static unsigned long read
;
34 static int disable_reader
;
35 module_param(disable_reader
, uint
, 0644);
36 MODULE_PARM_DESC(disable_reader
, "only run producer");
38 static int read_events
;
55 static enum event_status
read_event(int cpu
)
57 struct ring_buffer_event
*event
;
61 event
= ring_buffer_consume(buffer
, cpu
, &ts
);
65 entry
= ring_buffer_event_data(event
);
75 static enum event_status
read_page(int cpu
)
77 struct ring_buffer_event
*event
;
78 struct rb_page
*rpage
;
86 bpage
= ring_buffer_alloc_read_page(buffer
);
90 ret
= ring_buffer_read_page(buffer
, &bpage
, PAGE_SIZE
, cpu
, 1);
93 commit
= local_read(&rpage
->commit
);
94 for (i
= 0; i
< commit
&& !kill_test
; i
+= inc
) {
96 if (i
>= (PAGE_SIZE
- offsetof(struct rb_page
, data
))) {
102 event
= (void *)&rpage
->data
[i
];
103 switch (event
->type_len
) {
104 case RINGBUF_TYPE_PADDING
:
105 /* We don't expect any padding */
108 case RINGBUF_TYPE_TIME_EXTEND
:
112 entry
= ring_buffer_event_data(event
);
118 if (!event
->array
[0]) {
122 inc
= event
->array
[0];
125 entry
= ring_buffer_event_data(event
);
131 inc
= ((event
->type_len
+ 1) * 4);
142 ring_buffer_free_read_page(buffer
, bpage
);
145 return EVENT_DROPPED
;
149 static void ring_buffer_consumer(void)
151 /* toggle between reading pages and events */
155 while (!reader_finish
&& !kill_test
) {
162 for_each_online_cpu(cpu
) {
163 enum event_status stat
;
166 stat
= read_event(cpu
);
168 stat
= read_page(cpu
);
172 if (stat
== EVENT_FOUND
)
175 } while (found
&& !kill_test
);
177 set_current_state(TASK_INTERRUPTIBLE
);
182 __set_current_state(TASK_RUNNING
);
185 complete(&read_done
);
189 * If we are a non preempt kernel, the 10 second run will
190 * stop everything while it runs. Instead, we will call cond_resched
191 * and also add any time that was lost by a rescedule.
193 #ifdef CONFIG_PREEMPT
194 static void sched_if_needed(struct timeval
*start_tv
, struct timeval
*end_tv
)
198 static void sched_if_needed(struct timeval
*start_tv
, struct timeval
*end_tv
)
203 do_gettimeofday(&tv
);
204 if (tv
.tv_usec
< end_tv
->tv_usec
) {
205 tv
.tv_usec
+= 1000000;
208 start_tv
->tv_sec
+= tv
.tv_sec
- end_tv
->tv_sec
;
209 start_tv
->tv_usec
+= tv
.tv_usec
- end_tv
->tv_usec
;
210 if (start_tv
->tv_usec
> 1000000) {
211 start_tv
->tv_usec
-= 1000000;
217 static void ring_buffer_producer(void)
219 struct timeval start_tv
;
220 struct timeval end_tv
;
221 unsigned long long time
;
222 unsigned long long entries
;
223 unsigned long long overruns
;
224 unsigned long missed
= 0;
225 unsigned long hit
= 0;
230 * Hammer the buffer for 10 secs (this may
231 * make the system stall)
233 pr_info("Starting ring buffer hammer\n");
234 do_gettimeofday(&start_tv
);
236 struct ring_buffer_event
*event
;
239 event
= ring_buffer_lock_reserve(buffer
, 10);
244 entry
= ring_buffer_event_data(event
);
245 *entry
= smp_processor_id();
246 ring_buffer_unlock_commit(buffer
, event
);
248 do_gettimeofday(&end_tv
);
250 if (consumer
&& !(++cnt
% wakeup_interval
))
251 wake_up_process(consumer
);
253 sched_if_needed(&start_tv
, &end_tv
);
255 } while (end_tv
.tv_sec
< (start_tv
.tv_sec
+ RUN_TIME
) && !kill_test
);
256 pr_info("End ring buffer hammer\n");
259 /* Init both completions here to avoid races */
260 init_completion(&read_start
);
261 init_completion(&read_done
);
262 /* the completions must be visible before the finish var */
265 /* finish var visible before waking up the consumer */
267 wake_up_process(consumer
);
268 wait_for_completion(&read_done
);
271 time
= end_tv
.tv_sec
- start_tv
.tv_sec
;
273 time
+= (long long)((long)end_tv
.tv_usec
- (long)start_tv
.tv_usec
);
275 entries
= ring_buffer_entries(buffer
);
276 overruns
= ring_buffer_overruns(buffer
);
280 pr_info("Time: %lld (usecs)\n", time
);
281 pr_info("Overruns: %lld\n", overruns
);
283 pr_info("Read: (reader disabled)\n");
285 pr_info("Read: %ld (by %s)\n", read
,
286 read_events
? "events" : "pages");
287 pr_info("Entries: %lld\n", entries
);
288 pr_info("Total: %lld\n", entries
+ overruns
+ read
);
289 pr_info("Missed: %ld\n", missed
);
290 pr_info("Hit: %ld\n", hit
);
296 pr_info("TIME IS ZERO??\n");
298 pr_info("Entries per millisec: %ld\n", hit
);
302 pr_info("%ld ns per entry\n", avg
);
306 static void wait_to_die(void)
308 set_current_state(TASK_INTERRUPTIBLE
);
309 while (!kthread_should_stop()) {
311 set_current_state(TASK_INTERRUPTIBLE
);
313 __set_current_state(TASK_RUNNING
);
316 static int ring_buffer_consumer_thread(void *arg
)
318 while (!kthread_should_stop() && !kill_test
) {
319 complete(&read_start
);
321 ring_buffer_consumer();
323 set_current_state(TASK_INTERRUPTIBLE
);
324 if (kthread_should_stop() || kill_test
)
328 __set_current_state(TASK_RUNNING
);
330 __set_current_state(TASK_RUNNING
);
338 static int ring_buffer_producer_thread(void *arg
)
340 init_completion(&read_start
);
342 while (!kthread_should_stop() && !kill_test
) {
343 ring_buffer_reset(buffer
);
347 wake_up_process(consumer
);
348 wait_for_completion(&read_start
);
351 ring_buffer_producer();
353 pr_info("Sleeping for 10 secs\n");
354 set_current_state(TASK_INTERRUPTIBLE
);
355 schedule_timeout(HZ
* SLEEP_TIME
);
356 __set_current_state(TASK_RUNNING
);
365 static int __init
ring_buffer_benchmark_init(void)
369 /* make a one meg buffer in overwite mode */
370 buffer
= ring_buffer_alloc(1000000, RB_FL_OVERWRITE
);
374 if (!disable_reader
) {
375 consumer
= kthread_create(ring_buffer_consumer_thread
,
376 NULL
, "rb_consumer");
377 ret
= PTR_ERR(consumer
);
378 if (IS_ERR(consumer
))
382 producer
= kthread_run(ring_buffer_producer_thread
,
383 NULL
, "rb_producer");
384 ret
= PTR_ERR(producer
);
386 if (IS_ERR(producer
))
393 kthread_stop(consumer
);
396 ring_buffer_free(buffer
);
400 static void __exit
ring_buffer_benchmark_exit(void)
402 kthread_stop(producer
);
404 kthread_stop(consumer
);
405 ring_buffer_free(buffer
);
408 module_init(ring_buffer_benchmark_init
);
409 module_exit(ring_buffer_benchmark_exit
);
411 MODULE_AUTHOR("Steven Rostedt");
412 MODULE_DESCRIPTION("ring_buffer_benchmark");
413 MODULE_LICENSE("GPL");