4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
6 #include <linux/ring_buffer.h>
7 #include <linux/spinlock.h>
8 #include <linux/debugfs.h>
9 #include <linux/uaccess.h>
10 #include <linux/module.h>
11 #include <linux/percpu.h>
12 #include <linux/mutex.h>
13 #include <linux/sched.h> /* used for sched_clock() (for now) */
14 #include <linux/init.h>
15 #include <linux/hash.h>
16 #include <linux/list.h>
22 * A fast way to enable or disable all ring buffers is to
23 * call tracing_on or tracing_off. Turning off the ring buffers
24 * prevents all ring buffers from being recorded to.
25 * Turning this switch on, makes it OK to write to the
26 * ring buffer, if the ring buffer is enabled itself.
28 * There's three layers that must be on in order to write
31 * 1) This global flag must be set.
32 * 2) The ring buffer must be enabled for recording.
33 * 3) The per cpu buffer must be enabled for recording.
35 * In case of an anomaly, this global flag has a bit set that
36 * will permantly disable all ring buffers.
40 * Global flag to disable all recording to ring buffers
41 * This has two bits: ON, DISABLED
45 * 0 0 : ring buffers are off
46 * 1 0 : ring buffers are on
47 * X 1 : ring buffers are permanently disabled
51 RB_BUFFERS_ON_BIT
= 0,
52 RB_BUFFERS_DISABLED_BIT
= 1,
56 RB_BUFFERS_ON
= 1 << RB_BUFFERS_ON_BIT
,
57 RB_BUFFERS_DISABLED
= 1 << RB_BUFFERS_DISABLED_BIT
,
60 static long ring_buffer_flags __read_mostly
= RB_BUFFERS_ON
;
63 * tracing_on - enable all tracing buffers
65 * This function enables all tracing buffers that may have been
66 * disabled with tracing_off.
70 set_bit(RB_BUFFERS_ON_BIT
, &ring_buffer_flags
);
72 EXPORT_SYMBOL_GPL(tracing_on
);
75 * tracing_off - turn off all tracing buffers
77 * This function stops all tracing buffers from recording data.
78 * It does not disable any overhead the tracers themselves may
79 * be causing. This function simply causes all recording to
80 * the ring buffers to fail.
82 void tracing_off(void)
84 clear_bit(RB_BUFFERS_ON_BIT
, &ring_buffer_flags
);
86 EXPORT_SYMBOL_GPL(tracing_off
);
89 * tracing_off_permanent - permanently disable ring buffers
91 * This function, once called, will disable all ring buffers
94 void tracing_off_permanent(void)
96 set_bit(RB_BUFFERS_DISABLED_BIT
, &ring_buffer_flags
);
101 /* Up this if you want to test the TIME_EXTENTS and normalization */
102 #define DEBUG_SHIFT 0
105 u64
ring_buffer_time_stamp(int cpu
)
109 preempt_disable_notrace();
110 /* shift to debug/test normalization and TIME_EXTENTS */
111 time
= sched_clock() << DEBUG_SHIFT
;
112 preempt_enable_no_resched_notrace();
116 EXPORT_SYMBOL_GPL(ring_buffer_time_stamp
);
118 void ring_buffer_normalize_time_stamp(int cpu
, u64
*ts
)
120 /* Just stupid testing the normalize function and deltas */
123 EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp
);
125 #define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event))
126 #define RB_ALIGNMENT_SHIFT 2
127 #define RB_ALIGNMENT (1 << RB_ALIGNMENT_SHIFT)
128 #define RB_MAX_SMALL_DATA 28
131 RB_LEN_TIME_EXTEND
= 8,
132 RB_LEN_TIME_STAMP
= 16,
135 /* inline for ring buffer fast paths */
136 static inline unsigned
137 rb_event_length(struct ring_buffer_event
*event
)
141 switch (event
->type
) {
142 case RINGBUF_TYPE_PADDING
:
146 case RINGBUF_TYPE_TIME_EXTEND
:
147 return RB_LEN_TIME_EXTEND
;
149 case RINGBUF_TYPE_TIME_STAMP
:
150 return RB_LEN_TIME_STAMP
;
152 case RINGBUF_TYPE_DATA
:
154 length
= event
->len
<< RB_ALIGNMENT_SHIFT
;
156 length
= event
->array
[0];
157 return length
+ RB_EVNT_HDR_SIZE
;
166 * ring_buffer_event_length - return the length of the event
167 * @event: the event to get the length of
169 unsigned ring_buffer_event_length(struct ring_buffer_event
*event
)
171 return rb_event_length(event
);
173 EXPORT_SYMBOL_GPL(ring_buffer_event_length
);
175 /* inline for ring buffer fast paths */
177 rb_event_data(struct ring_buffer_event
*event
)
179 BUG_ON(event
->type
!= RINGBUF_TYPE_DATA
);
180 /* If length is in len field, then array[0] has the data */
182 return (void *)&event
->array
[0];
183 /* Otherwise length is in array[0] and array[1] has the data */
184 return (void *)&event
->array
[1];
188 * ring_buffer_event_data - return the data of the event
189 * @event: the event to get the data from
191 void *ring_buffer_event_data(struct ring_buffer_event
*event
)
193 return rb_event_data(event
);
195 EXPORT_SYMBOL_GPL(ring_buffer_event_data
);
197 #define for_each_buffer_cpu(buffer, cpu) \
198 for_each_cpu(cpu, buffer->cpumask)
201 #define TS_MASK ((1ULL << TS_SHIFT) - 1)
202 #define TS_DELTA_TEST (~TS_MASK)
204 struct buffer_data_page
{
205 u64 time_stamp
; /* page time stamp */
206 local_t commit
; /* write commited index */
207 unsigned char data
[]; /* data of buffer page */
211 local_t write
; /* index for next write */
212 unsigned read
; /* index for next read */
213 struct list_head list
; /* list of free pages */
214 struct buffer_data_page
*page
; /* Actual data page */
217 static void rb_init_page(struct buffer_data_page
*bpage
)
219 local_set(&bpage
->commit
, 0);
223 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
226 static inline void free_buffer_page(struct buffer_page
*bpage
)
229 free_page((unsigned long)bpage
->page
);
234 * We need to fit the time_stamp delta into 27 bits.
236 static inline int test_time_stamp(u64 delta
)
238 if (delta
& TS_DELTA_TEST
)
243 #define BUF_PAGE_SIZE (PAGE_SIZE - sizeof(struct buffer_data_page))
246 * head_page == tail_page && head == tail then buffer is empty.
248 struct ring_buffer_per_cpu
{
250 struct ring_buffer
*buffer
;
251 spinlock_t reader_lock
; /* serialize readers */
253 struct lock_class_key lock_key
;
254 struct list_head pages
;
255 struct buffer_page
*head_page
; /* read from head */
256 struct buffer_page
*tail_page
; /* write to tail */
257 struct buffer_page
*commit_page
; /* commited pages */
258 struct buffer_page
*reader_page
;
259 unsigned long overrun
;
260 unsigned long entries
;
263 atomic_t record_disabled
;
270 cpumask_var_t cpumask
;
271 atomic_t record_disabled
;
275 struct ring_buffer_per_cpu
**buffers
;
278 struct ring_buffer_iter
{
279 struct ring_buffer_per_cpu
*cpu_buffer
;
281 struct buffer_page
*head_page
;
285 /* buffer may be either ring_buffer or ring_buffer_per_cpu */
286 #define RB_WARN_ON(buffer, cond) \
288 int _____ret = unlikely(cond); \
290 atomic_inc(&buffer->record_disabled); \
297 * check_pages - integrity check of buffer pages
298 * @cpu_buffer: CPU buffer with pages to test
300 * As a safty measure we check to make sure the data pages have not
303 static int rb_check_pages(struct ring_buffer_per_cpu
*cpu_buffer
)
305 struct list_head
*head
= &cpu_buffer
->pages
;
306 struct buffer_page
*bpage
, *tmp
;
308 if (RB_WARN_ON(cpu_buffer
, head
->next
->prev
!= head
))
310 if (RB_WARN_ON(cpu_buffer
, head
->prev
->next
!= head
))
313 list_for_each_entry_safe(bpage
, tmp
, head
, list
) {
314 if (RB_WARN_ON(cpu_buffer
,
315 bpage
->list
.next
->prev
!= &bpage
->list
))
317 if (RB_WARN_ON(cpu_buffer
,
318 bpage
->list
.prev
->next
!= &bpage
->list
))
325 static int rb_allocate_pages(struct ring_buffer_per_cpu
*cpu_buffer
,
328 struct list_head
*head
= &cpu_buffer
->pages
;
329 struct buffer_page
*bpage
, *tmp
;
334 for (i
= 0; i
< nr_pages
; i
++) {
335 bpage
= kzalloc_node(ALIGN(sizeof(*bpage
), cache_line_size()),
336 GFP_KERNEL
, cpu_to_node(cpu_buffer
->cpu
));
339 list_add(&bpage
->list
, &pages
);
341 addr
= __get_free_page(GFP_KERNEL
);
344 bpage
->page
= (void *)addr
;
345 rb_init_page(bpage
->page
);
348 list_splice(&pages
, head
);
350 rb_check_pages(cpu_buffer
);
355 list_for_each_entry_safe(bpage
, tmp
, &pages
, list
) {
356 list_del_init(&bpage
->list
);
357 free_buffer_page(bpage
);
362 static struct ring_buffer_per_cpu
*
363 rb_allocate_cpu_buffer(struct ring_buffer
*buffer
, int cpu
)
365 struct ring_buffer_per_cpu
*cpu_buffer
;
366 struct buffer_page
*bpage
;
370 cpu_buffer
= kzalloc_node(ALIGN(sizeof(*cpu_buffer
), cache_line_size()),
371 GFP_KERNEL
, cpu_to_node(cpu
));
375 cpu_buffer
->cpu
= cpu
;
376 cpu_buffer
->buffer
= buffer
;
377 spin_lock_init(&cpu_buffer
->reader_lock
);
378 cpu_buffer
->lock
= (raw_spinlock_t
)__RAW_SPIN_LOCK_UNLOCKED
;
379 INIT_LIST_HEAD(&cpu_buffer
->pages
);
381 bpage
= kzalloc_node(ALIGN(sizeof(*bpage
), cache_line_size()),
382 GFP_KERNEL
, cpu_to_node(cpu
));
384 goto fail_free_buffer
;
386 cpu_buffer
->reader_page
= bpage
;
387 addr
= __get_free_page(GFP_KERNEL
);
389 goto fail_free_reader
;
390 bpage
->page
= (void *)addr
;
391 rb_init_page(bpage
->page
);
393 INIT_LIST_HEAD(&cpu_buffer
->reader_page
->list
);
395 ret
= rb_allocate_pages(cpu_buffer
, buffer
->pages
);
397 goto fail_free_reader
;
399 cpu_buffer
->head_page
400 = list_entry(cpu_buffer
->pages
.next
, struct buffer_page
, list
);
401 cpu_buffer
->tail_page
= cpu_buffer
->commit_page
= cpu_buffer
->head_page
;
406 free_buffer_page(cpu_buffer
->reader_page
);
413 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu
*cpu_buffer
)
415 struct list_head
*head
= &cpu_buffer
->pages
;
416 struct buffer_page
*bpage
, *tmp
;
418 list_del_init(&cpu_buffer
->reader_page
->list
);
419 free_buffer_page(cpu_buffer
->reader_page
);
421 list_for_each_entry_safe(bpage
, tmp
, head
, list
) {
422 list_del_init(&bpage
->list
);
423 free_buffer_page(bpage
);
429 * Causes compile errors if the struct buffer_page gets bigger
430 * than the struct page.
432 extern int ring_buffer_page_too_big(void);
435 * ring_buffer_alloc - allocate a new ring_buffer
436 * @size: the size in bytes per cpu that is needed.
437 * @flags: attributes to set for the ring buffer.
439 * Currently the only flag that is available is the RB_FL_OVERWRITE
440 * flag. This flag means that the buffer will overwrite old data
441 * when the buffer wraps. If this flag is not set, the buffer will
442 * drop data when the tail hits the head.
444 struct ring_buffer
*ring_buffer_alloc(unsigned long size
, unsigned flags
)
446 struct ring_buffer
*buffer
;
450 /* Paranoid! Optimizes out when all is well */
451 if (sizeof(struct buffer_page
) > sizeof(struct page
))
452 ring_buffer_page_too_big();
455 /* keep it in its own cache line */
456 buffer
= kzalloc(ALIGN(sizeof(*buffer
), cache_line_size()),
461 if (!alloc_cpumask_var(&buffer
->cpumask
, GFP_KERNEL
))
462 goto fail_free_buffer
;
464 buffer
->pages
= DIV_ROUND_UP(size
, BUF_PAGE_SIZE
);
465 buffer
->flags
= flags
;
467 /* need at least two pages */
468 if (buffer
->pages
== 1)
471 cpumask_copy(buffer
->cpumask
, cpu_possible_mask
);
472 buffer
->cpus
= nr_cpu_ids
;
474 bsize
= sizeof(void *) * nr_cpu_ids
;
475 buffer
->buffers
= kzalloc(ALIGN(bsize
, cache_line_size()),
477 if (!buffer
->buffers
)
478 goto fail_free_cpumask
;
480 for_each_buffer_cpu(buffer
, cpu
) {
481 buffer
->buffers
[cpu
] =
482 rb_allocate_cpu_buffer(buffer
, cpu
);
483 if (!buffer
->buffers
[cpu
])
484 goto fail_free_buffers
;
487 mutex_init(&buffer
->mutex
);
492 for_each_buffer_cpu(buffer
, cpu
) {
493 if (buffer
->buffers
[cpu
])
494 rb_free_cpu_buffer(buffer
->buffers
[cpu
]);
496 kfree(buffer
->buffers
);
499 free_cpumask_var(buffer
->cpumask
);
505 EXPORT_SYMBOL_GPL(ring_buffer_alloc
);
508 * ring_buffer_free - free a ring buffer.
509 * @buffer: the buffer to free.
512 ring_buffer_free(struct ring_buffer
*buffer
)
516 for_each_buffer_cpu(buffer
, cpu
)
517 rb_free_cpu_buffer(buffer
->buffers
[cpu
]);
519 free_cpumask_var(buffer
->cpumask
);
523 EXPORT_SYMBOL_GPL(ring_buffer_free
);
525 static void rb_reset_cpu(struct ring_buffer_per_cpu
*cpu_buffer
);
528 rb_remove_pages(struct ring_buffer_per_cpu
*cpu_buffer
, unsigned nr_pages
)
530 struct buffer_page
*bpage
;
534 atomic_inc(&cpu_buffer
->record_disabled
);
537 for (i
= 0; i
< nr_pages
; i
++) {
538 if (RB_WARN_ON(cpu_buffer
, list_empty(&cpu_buffer
->pages
)))
540 p
= cpu_buffer
->pages
.next
;
541 bpage
= list_entry(p
, struct buffer_page
, list
);
542 list_del_init(&bpage
->list
);
543 free_buffer_page(bpage
);
545 if (RB_WARN_ON(cpu_buffer
, list_empty(&cpu_buffer
->pages
)))
548 rb_reset_cpu(cpu_buffer
);
550 rb_check_pages(cpu_buffer
);
552 atomic_dec(&cpu_buffer
->record_disabled
);
557 rb_insert_pages(struct ring_buffer_per_cpu
*cpu_buffer
,
558 struct list_head
*pages
, unsigned nr_pages
)
560 struct buffer_page
*bpage
;
564 atomic_inc(&cpu_buffer
->record_disabled
);
567 for (i
= 0; i
< nr_pages
; i
++) {
568 if (RB_WARN_ON(cpu_buffer
, list_empty(pages
)))
571 bpage
= list_entry(p
, struct buffer_page
, list
);
572 list_del_init(&bpage
->list
);
573 list_add_tail(&bpage
->list
, &cpu_buffer
->pages
);
575 rb_reset_cpu(cpu_buffer
);
577 rb_check_pages(cpu_buffer
);
579 atomic_dec(&cpu_buffer
->record_disabled
);
583 * ring_buffer_resize - resize the ring buffer
584 * @buffer: the buffer to resize.
585 * @size: the new size.
587 * The tracer is responsible for making sure that the buffer is
588 * not being used while changing the size.
589 * Note: We may be able to change the above requirement by using
590 * RCU synchronizations.
592 * Minimum size is 2 * BUF_PAGE_SIZE.
594 * Returns -1 on failure.
596 int ring_buffer_resize(struct ring_buffer
*buffer
, unsigned long size
)
598 struct ring_buffer_per_cpu
*cpu_buffer
;
599 unsigned nr_pages
, rm_pages
, new_pages
;
600 struct buffer_page
*bpage
, *tmp
;
601 unsigned long buffer_size
;
607 * Always succeed at resizing a non-existent buffer:
612 size
= DIV_ROUND_UP(size
, BUF_PAGE_SIZE
);
613 size
*= BUF_PAGE_SIZE
;
614 buffer_size
= buffer
->pages
* BUF_PAGE_SIZE
;
616 /* we need a minimum of two pages */
617 if (size
< BUF_PAGE_SIZE
* 2)
618 size
= BUF_PAGE_SIZE
* 2;
620 if (size
== buffer_size
)
623 mutex_lock(&buffer
->mutex
);
625 nr_pages
= DIV_ROUND_UP(size
, BUF_PAGE_SIZE
);
627 if (size
< buffer_size
) {
629 /* easy case, just free pages */
630 if (RB_WARN_ON(buffer
, nr_pages
>= buffer
->pages
)) {
631 mutex_unlock(&buffer
->mutex
);
635 rm_pages
= buffer
->pages
- nr_pages
;
637 for_each_buffer_cpu(buffer
, cpu
) {
638 cpu_buffer
= buffer
->buffers
[cpu
];
639 rb_remove_pages(cpu_buffer
, rm_pages
);
645 * This is a bit more difficult. We only want to add pages
646 * when we can allocate enough for all CPUs. We do this
647 * by allocating all the pages and storing them on a local
648 * link list. If we succeed in our allocation, then we
649 * add these pages to the cpu_buffers. Otherwise we just free
650 * them all and return -ENOMEM;
652 if (RB_WARN_ON(buffer
, nr_pages
<= buffer
->pages
)) {
653 mutex_unlock(&buffer
->mutex
);
657 new_pages
= nr_pages
- buffer
->pages
;
659 for_each_buffer_cpu(buffer
, cpu
) {
660 for (i
= 0; i
< new_pages
; i
++) {
661 bpage
= kzalloc_node(ALIGN(sizeof(*bpage
),
663 GFP_KERNEL
, cpu_to_node(cpu
));
666 list_add(&bpage
->list
, &pages
);
667 addr
= __get_free_page(GFP_KERNEL
);
670 bpage
->page
= (void *)addr
;
671 rb_init_page(bpage
->page
);
675 for_each_buffer_cpu(buffer
, cpu
) {
676 cpu_buffer
= buffer
->buffers
[cpu
];
677 rb_insert_pages(cpu_buffer
, &pages
, new_pages
);
680 if (RB_WARN_ON(buffer
, !list_empty(&pages
))) {
681 mutex_unlock(&buffer
->mutex
);
686 buffer
->pages
= nr_pages
;
687 mutex_unlock(&buffer
->mutex
);
692 list_for_each_entry_safe(bpage
, tmp
, &pages
, list
) {
693 list_del_init(&bpage
->list
);
694 free_buffer_page(bpage
);
696 mutex_unlock(&buffer
->mutex
);
699 EXPORT_SYMBOL_GPL(ring_buffer_resize
);
701 static inline int rb_null_event(struct ring_buffer_event
*event
)
703 return event
->type
== RINGBUF_TYPE_PADDING
;
707 __rb_data_page_index(struct buffer_data_page
*bpage
, unsigned index
)
709 return bpage
->data
+ index
;
712 static inline void *__rb_page_index(struct buffer_page
*bpage
, unsigned index
)
714 return bpage
->page
->data
+ index
;
717 static inline struct ring_buffer_event
*
718 rb_reader_event(struct ring_buffer_per_cpu
*cpu_buffer
)
720 return __rb_page_index(cpu_buffer
->reader_page
,
721 cpu_buffer
->reader_page
->read
);
724 static inline struct ring_buffer_event
*
725 rb_head_event(struct ring_buffer_per_cpu
*cpu_buffer
)
727 return __rb_page_index(cpu_buffer
->head_page
,
728 cpu_buffer
->head_page
->read
);
731 static inline struct ring_buffer_event
*
732 rb_iter_head_event(struct ring_buffer_iter
*iter
)
734 return __rb_page_index(iter
->head_page
, iter
->head
);
737 static inline unsigned rb_page_write(struct buffer_page
*bpage
)
739 return local_read(&bpage
->write
);
742 static inline unsigned rb_page_commit(struct buffer_page
*bpage
)
744 return local_read(&bpage
->page
->commit
);
747 /* Size is determined by what has been commited */
748 static inline unsigned rb_page_size(struct buffer_page
*bpage
)
750 return rb_page_commit(bpage
);
753 static inline unsigned
754 rb_commit_index(struct ring_buffer_per_cpu
*cpu_buffer
)
756 return rb_page_commit(cpu_buffer
->commit_page
);
759 static inline unsigned rb_head_size(struct ring_buffer_per_cpu
*cpu_buffer
)
761 return rb_page_commit(cpu_buffer
->head_page
);
765 * When the tail hits the head and the buffer is in overwrite mode,
766 * the head jumps to the next page and all content on the previous
767 * page is discarded. But before doing so, we update the overrun
768 * variable of the buffer.
770 static void rb_update_overflow(struct ring_buffer_per_cpu
*cpu_buffer
)
772 struct ring_buffer_event
*event
;
775 for (head
= 0; head
< rb_head_size(cpu_buffer
);
776 head
+= rb_event_length(event
)) {
778 event
= __rb_page_index(cpu_buffer
->head_page
, head
);
779 if (RB_WARN_ON(cpu_buffer
, rb_null_event(event
)))
781 /* Only count data entries */
782 if (event
->type
!= RINGBUF_TYPE_DATA
)
784 cpu_buffer
->overrun
++;
785 cpu_buffer
->entries
--;
789 static inline void rb_inc_page(struct ring_buffer_per_cpu
*cpu_buffer
,
790 struct buffer_page
**bpage
)
792 struct list_head
*p
= (*bpage
)->list
.next
;
794 if (p
== &cpu_buffer
->pages
)
797 *bpage
= list_entry(p
, struct buffer_page
, list
);
800 static inline unsigned
801 rb_event_index(struct ring_buffer_event
*event
)
803 unsigned long addr
= (unsigned long)event
;
805 return (addr
& ~PAGE_MASK
) - (PAGE_SIZE
- BUF_PAGE_SIZE
);
809 rb_is_commit(struct ring_buffer_per_cpu
*cpu_buffer
,
810 struct ring_buffer_event
*event
)
812 unsigned long addr
= (unsigned long)event
;
815 index
= rb_event_index(event
);
818 return cpu_buffer
->commit_page
->page
== (void *)addr
&&
819 rb_commit_index(cpu_buffer
) == index
;
823 rb_set_commit_event(struct ring_buffer_per_cpu
*cpu_buffer
,
824 struct ring_buffer_event
*event
)
826 unsigned long addr
= (unsigned long)event
;
829 index
= rb_event_index(event
);
832 while (cpu_buffer
->commit_page
->page
!= (void *)addr
) {
833 if (RB_WARN_ON(cpu_buffer
,
834 cpu_buffer
->commit_page
== cpu_buffer
->tail_page
))
836 cpu_buffer
->commit_page
->page
->commit
=
837 cpu_buffer
->commit_page
->write
;
838 rb_inc_page(cpu_buffer
, &cpu_buffer
->commit_page
);
839 cpu_buffer
->write_stamp
=
840 cpu_buffer
->commit_page
->page
->time_stamp
;
843 /* Now set the commit to the event's index */
844 local_set(&cpu_buffer
->commit_page
->page
->commit
, index
);
848 rb_set_commit_to_write(struct ring_buffer_per_cpu
*cpu_buffer
)
851 * We only race with interrupts and NMIs on this CPU.
852 * If we own the commit event, then we can commit
853 * all others that interrupted us, since the interruptions
854 * are in stack format (they finish before they come
855 * back to us). This allows us to do a simple loop to
856 * assign the commit to the tail.
859 while (cpu_buffer
->commit_page
!= cpu_buffer
->tail_page
) {
860 cpu_buffer
->commit_page
->page
->commit
=
861 cpu_buffer
->commit_page
->write
;
862 rb_inc_page(cpu_buffer
, &cpu_buffer
->commit_page
);
863 cpu_buffer
->write_stamp
=
864 cpu_buffer
->commit_page
->page
->time_stamp
;
865 /* add barrier to keep gcc from optimizing too much */
868 while (rb_commit_index(cpu_buffer
) !=
869 rb_page_write(cpu_buffer
->commit_page
)) {
870 cpu_buffer
->commit_page
->page
->commit
=
871 cpu_buffer
->commit_page
->write
;
875 /* again, keep gcc from optimizing */
879 * If an interrupt came in just after the first while loop
880 * and pushed the tail page forward, we will be left with
881 * a dangling commit that will never go forward.
883 if (unlikely(cpu_buffer
->commit_page
!= cpu_buffer
->tail_page
))
887 static void rb_reset_reader_page(struct ring_buffer_per_cpu
*cpu_buffer
)
889 cpu_buffer
->read_stamp
= cpu_buffer
->reader_page
->page
->time_stamp
;
890 cpu_buffer
->reader_page
->read
= 0;
893 static inline void rb_inc_iter(struct ring_buffer_iter
*iter
)
895 struct ring_buffer_per_cpu
*cpu_buffer
= iter
->cpu_buffer
;
898 * The iterator could be on the reader page (it starts there).
899 * But the head could have moved, since the reader was
900 * found. Check for this case and assign the iterator
901 * to the head page instead of next.
903 if (iter
->head_page
== cpu_buffer
->reader_page
)
904 iter
->head_page
= cpu_buffer
->head_page
;
906 rb_inc_page(cpu_buffer
, &iter
->head_page
);
908 iter
->read_stamp
= iter
->head_page
->page
->time_stamp
;
913 * ring_buffer_update_event - update event type and data
914 * @event: the even to update
915 * @type: the type of event
916 * @length: the size of the event field in the ring buffer
918 * Update the type and data fields of the event. The length
919 * is the actual size that is written to the ring buffer,
920 * and with this, we can determine what to place into the
924 rb_update_event(struct ring_buffer_event
*event
,
925 unsigned type
, unsigned length
)
931 case RINGBUF_TYPE_PADDING
:
934 case RINGBUF_TYPE_TIME_EXTEND
:
936 (RB_LEN_TIME_EXTEND
+ (RB_ALIGNMENT
-1))
937 >> RB_ALIGNMENT_SHIFT
;
940 case RINGBUF_TYPE_TIME_STAMP
:
942 (RB_LEN_TIME_STAMP
+ (RB_ALIGNMENT
-1))
943 >> RB_ALIGNMENT_SHIFT
;
946 case RINGBUF_TYPE_DATA
:
947 length
-= RB_EVNT_HDR_SIZE
;
948 if (length
> RB_MAX_SMALL_DATA
) {
950 event
->array
[0] = length
;
953 (length
+ (RB_ALIGNMENT
-1))
954 >> RB_ALIGNMENT_SHIFT
;
961 static inline unsigned rb_calculate_event_length(unsigned length
)
963 struct ring_buffer_event event
; /* Used only for sizeof array */
965 /* zero length can cause confusions */
969 if (length
> RB_MAX_SMALL_DATA
)
970 length
+= sizeof(event
.array
[0]);
972 length
+= RB_EVNT_HDR_SIZE
;
973 length
= ALIGN(length
, RB_ALIGNMENT
);
978 static struct ring_buffer_event
*
979 __rb_reserve_next(struct ring_buffer_per_cpu
*cpu_buffer
,
980 unsigned type
, unsigned long length
, u64
*ts
)
982 struct buffer_page
*tail_page
, *head_page
, *reader_page
, *commit_page
;
983 unsigned long tail
, write
;
984 struct ring_buffer
*buffer
= cpu_buffer
->buffer
;
985 struct ring_buffer_event
*event
;
988 commit_page
= cpu_buffer
->commit_page
;
989 /* we just need to protect against interrupts */
991 tail_page
= cpu_buffer
->tail_page
;
992 write
= local_add_return(length
, &tail_page
->write
);
993 tail
= write
- length
;
995 /* See if we shot pass the end of this buffer page */
996 if (write
> BUF_PAGE_SIZE
) {
997 struct buffer_page
*next_page
= tail_page
;
999 local_irq_save(flags
);
1000 __raw_spin_lock(&cpu_buffer
->lock
);
1002 rb_inc_page(cpu_buffer
, &next_page
);
1004 head_page
= cpu_buffer
->head_page
;
1005 reader_page
= cpu_buffer
->reader_page
;
1007 /* we grabbed the lock before incrementing */
1008 if (RB_WARN_ON(cpu_buffer
, next_page
== reader_page
))
1012 * If for some reason, we had an interrupt storm that made
1013 * it all the way around the buffer, bail, and warn
1016 if (unlikely(next_page
== commit_page
)) {
1021 if (next_page
== head_page
) {
1022 if (!(buffer
->flags
& RB_FL_OVERWRITE
)) {
1024 if (tail
<= BUF_PAGE_SIZE
)
1025 local_set(&tail_page
->write
, tail
);
1029 /* tail_page has not moved yet? */
1030 if (tail_page
== cpu_buffer
->tail_page
) {
1031 /* count overflows */
1032 rb_update_overflow(cpu_buffer
);
1034 rb_inc_page(cpu_buffer
, &head_page
);
1035 cpu_buffer
->head_page
= head_page
;
1036 cpu_buffer
->head_page
->read
= 0;
1041 * If the tail page is still the same as what we think
1042 * it is, then it is up to us to update the tail
1045 if (tail_page
== cpu_buffer
->tail_page
) {
1046 local_set(&next_page
->write
, 0);
1047 local_set(&next_page
->page
->commit
, 0);
1048 cpu_buffer
->tail_page
= next_page
;
1050 /* reread the time stamp */
1051 *ts
= ring_buffer_time_stamp(cpu_buffer
->cpu
);
1052 cpu_buffer
->tail_page
->page
->time_stamp
= *ts
;
1056 * The actual tail page has moved forward.
1058 if (tail
< BUF_PAGE_SIZE
) {
1059 /* Mark the rest of the page with padding */
1060 event
= __rb_page_index(tail_page
, tail
);
1061 event
->type
= RINGBUF_TYPE_PADDING
;
1064 if (tail
<= BUF_PAGE_SIZE
)
1065 /* Set the write back to the previous setting */
1066 local_set(&tail_page
->write
, tail
);
1069 * If this was a commit entry that failed,
1070 * increment that too
1072 if (tail_page
== cpu_buffer
->commit_page
&&
1073 tail
== rb_commit_index(cpu_buffer
)) {
1074 rb_set_commit_to_write(cpu_buffer
);
1077 __raw_spin_unlock(&cpu_buffer
->lock
);
1078 local_irq_restore(flags
);
1080 /* fail and let the caller try again */
1081 return ERR_PTR(-EAGAIN
);
1084 /* We reserved something on the buffer */
1086 if (RB_WARN_ON(cpu_buffer
, write
> BUF_PAGE_SIZE
))
1089 event
= __rb_page_index(tail_page
, tail
);
1090 rb_update_event(event
, type
, length
);
1093 * If this is a commit and the tail is zero, then update
1094 * this page's time stamp.
1096 if (!tail
&& rb_is_commit(cpu_buffer
, event
))
1097 cpu_buffer
->commit_page
->page
->time_stamp
= *ts
;
1102 __raw_spin_unlock(&cpu_buffer
->lock
);
1103 local_irq_restore(flags
);
1108 rb_add_time_stamp(struct ring_buffer_per_cpu
*cpu_buffer
,
1109 u64
*ts
, u64
*delta
)
1111 struct ring_buffer_event
*event
;
1115 if (unlikely(*delta
> (1ULL << 59) && !once
++)) {
1116 printk(KERN_WARNING
"Delta way too big! %llu"
1117 " ts=%llu write stamp = %llu\n",
1118 (unsigned long long)*delta
,
1119 (unsigned long long)*ts
,
1120 (unsigned long long)cpu_buffer
->write_stamp
);
1125 * The delta is too big, we to add a
1128 event
= __rb_reserve_next(cpu_buffer
,
1129 RINGBUF_TYPE_TIME_EXTEND
,
1135 if (PTR_ERR(event
) == -EAGAIN
)
1138 /* Only a commited time event can update the write stamp */
1139 if (rb_is_commit(cpu_buffer
, event
)) {
1141 * If this is the first on the page, then we need to
1142 * update the page itself, and just put in a zero.
1144 if (rb_event_index(event
)) {
1145 event
->time_delta
= *delta
& TS_MASK
;
1146 event
->array
[0] = *delta
>> TS_SHIFT
;
1148 cpu_buffer
->commit_page
->page
->time_stamp
= *ts
;
1149 event
->time_delta
= 0;
1150 event
->array
[0] = 0;
1152 cpu_buffer
->write_stamp
= *ts
;
1153 /* let the caller know this was the commit */
1156 /* Darn, this is just wasted space */
1157 event
->time_delta
= 0;
1158 event
->array
[0] = 0;
1167 static struct ring_buffer_event
*
1168 rb_reserve_next_event(struct ring_buffer_per_cpu
*cpu_buffer
,
1169 unsigned type
, unsigned long length
)
1171 struct ring_buffer_event
*event
;
1178 * We allow for interrupts to reenter here and do a trace.
1179 * If one does, it will cause this original code to loop
1180 * back here. Even with heavy interrupts happening, this
1181 * should only happen a few times in a row. If this happens
1182 * 1000 times in a row, there must be either an interrupt
1183 * storm or we have something buggy.
1186 if (RB_WARN_ON(cpu_buffer
, ++nr_loops
> 1000))
1189 ts
= ring_buffer_time_stamp(cpu_buffer
->cpu
);
1192 * Only the first commit can update the timestamp.
1193 * Yes there is a race here. If an interrupt comes in
1194 * just after the conditional and it traces too, then it
1195 * will also check the deltas. More than one timestamp may
1196 * also be made. But only the entry that did the actual
1197 * commit will be something other than zero.
1199 if (cpu_buffer
->tail_page
== cpu_buffer
->commit_page
&&
1200 rb_page_write(cpu_buffer
->tail_page
) ==
1201 rb_commit_index(cpu_buffer
)) {
1203 delta
= ts
- cpu_buffer
->write_stamp
;
1205 /* make sure this delta is calculated here */
1208 /* Did the write stamp get updated already? */
1209 if (unlikely(ts
< cpu_buffer
->write_stamp
))
1212 if (test_time_stamp(delta
)) {
1214 commit
= rb_add_time_stamp(cpu_buffer
, &ts
, &delta
);
1216 if (commit
== -EBUSY
)
1219 if (commit
== -EAGAIN
)
1222 RB_WARN_ON(cpu_buffer
, commit
< 0);
1225 /* Non commits have zero deltas */
1228 event
= __rb_reserve_next(cpu_buffer
, type
, length
, &ts
);
1229 if (PTR_ERR(event
) == -EAGAIN
)
1233 if (unlikely(commit
))
1235 * Ouch! We needed a timestamp and it was commited. But
1236 * we didn't get our event reserved.
1238 rb_set_commit_to_write(cpu_buffer
);
1243 * If the timestamp was commited, make the commit our entry
1244 * now so that we will update it when needed.
1247 rb_set_commit_event(cpu_buffer
, event
);
1248 else if (!rb_is_commit(cpu_buffer
, event
))
1251 event
->time_delta
= delta
;
1256 static DEFINE_PER_CPU(int, rb_need_resched
);
1259 * ring_buffer_lock_reserve - reserve a part of the buffer
1260 * @buffer: the ring buffer to reserve from
1261 * @length: the length of the data to reserve (excluding event header)
1262 * @flags: a pointer to save the interrupt flags
1264 * Returns a reseverd event on the ring buffer to copy directly to.
1265 * The user of this interface will need to get the body to write into
1266 * and can use the ring_buffer_event_data() interface.
1268 * The length is the length of the data needed, not the event length
1269 * which also includes the event header.
1271 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
1272 * If NULL is returned, then nothing has been allocated or locked.
1274 struct ring_buffer_event
*
1275 ring_buffer_lock_reserve(struct ring_buffer
*buffer
,
1276 unsigned long length
,
1277 unsigned long *flags
)
1279 struct ring_buffer_per_cpu
*cpu_buffer
;
1280 struct ring_buffer_event
*event
;
1283 if (ring_buffer_flags
!= RB_BUFFERS_ON
)
1286 if (atomic_read(&buffer
->record_disabled
))
1289 /* If we are tracing schedule, we don't want to recurse */
1290 resched
= ftrace_preempt_disable();
1292 cpu
= raw_smp_processor_id();
1294 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
1297 cpu_buffer
= buffer
->buffers
[cpu
];
1299 if (atomic_read(&cpu_buffer
->record_disabled
))
1302 length
= rb_calculate_event_length(length
);
1303 if (length
> BUF_PAGE_SIZE
)
1306 event
= rb_reserve_next_event(cpu_buffer
, RINGBUF_TYPE_DATA
, length
);
1311 * Need to store resched state on this cpu.
1312 * Only the first needs to.
1315 if (preempt_count() == 1)
1316 per_cpu(rb_need_resched
, cpu
) = resched
;
1321 ftrace_preempt_enable(resched
);
1324 EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve
);
1326 static void rb_commit(struct ring_buffer_per_cpu
*cpu_buffer
,
1327 struct ring_buffer_event
*event
)
1329 cpu_buffer
->entries
++;
1331 /* Only process further if we own the commit */
1332 if (!rb_is_commit(cpu_buffer
, event
))
1335 cpu_buffer
->write_stamp
+= event
->time_delta
;
1337 rb_set_commit_to_write(cpu_buffer
);
1341 * ring_buffer_unlock_commit - commit a reserved
1342 * @buffer: The buffer to commit to
1343 * @event: The event pointer to commit.
1344 * @flags: the interrupt flags received from ring_buffer_lock_reserve.
1346 * This commits the data to the ring buffer, and releases any locks held.
1348 * Must be paired with ring_buffer_lock_reserve.
1350 int ring_buffer_unlock_commit(struct ring_buffer
*buffer
,
1351 struct ring_buffer_event
*event
,
1352 unsigned long flags
)
1354 struct ring_buffer_per_cpu
*cpu_buffer
;
1355 int cpu
= raw_smp_processor_id();
1357 cpu_buffer
= buffer
->buffers
[cpu
];
1359 rb_commit(cpu_buffer
, event
);
1362 * Only the last preempt count needs to restore preemption.
1364 if (preempt_count() == 1)
1365 ftrace_preempt_enable(per_cpu(rb_need_resched
, cpu
));
1367 preempt_enable_no_resched_notrace();
1371 EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit
);
1374 * ring_buffer_write - write data to the buffer without reserving
1375 * @buffer: The ring buffer to write to.
1376 * @length: The length of the data being written (excluding the event header)
1377 * @data: The data to write to the buffer.
1379 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
1380 * one function. If you already have the data to write to the buffer, it
1381 * may be easier to simply call this function.
1383 * Note, like ring_buffer_lock_reserve, the length is the length of the data
1384 * and not the length of the event which would hold the header.
1386 int ring_buffer_write(struct ring_buffer
*buffer
,
1387 unsigned long length
,
1390 struct ring_buffer_per_cpu
*cpu_buffer
;
1391 struct ring_buffer_event
*event
;
1392 unsigned long event_length
;
1397 if (ring_buffer_flags
!= RB_BUFFERS_ON
)
1400 if (atomic_read(&buffer
->record_disabled
))
1403 resched
= ftrace_preempt_disable();
1405 cpu
= raw_smp_processor_id();
1407 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
1410 cpu_buffer
= buffer
->buffers
[cpu
];
1412 if (atomic_read(&cpu_buffer
->record_disabled
))
1415 event_length
= rb_calculate_event_length(length
);
1416 event
= rb_reserve_next_event(cpu_buffer
,
1417 RINGBUF_TYPE_DATA
, event_length
);
1421 body
= rb_event_data(event
);
1423 memcpy(body
, data
, length
);
1425 rb_commit(cpu_buffer
, event
);
1429 ftrace_preempt_enable(resched
);
1433 EXPORT_SYMBOL_GPL(ring_buffer_write
);
1435 static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu
*cpu_buffer
)
1437 struct buffer_page
*reader
= cpu_buffer
->reader_page
;
1438 struct buffer_page
*head
= cpu_buffer
->head_page
;
1439 struct buffer_page
*commit
= cpu_buffer
->commit_page
;
1441 return reader
->read
== rb_page_commit(reader
) &&
1442 (commit
== reader
||
1444 head
->read
== rb_page_commit(commit
)));
1448 * ring_buffer_record_disable - stop all writes into the buffer
1449 * @buffer: The ring buffer to stop writes to.
1451 * This prevents all writes to the buffer. Any attempt to write
1452 * to the buffer after this will fail and return NULL.
1454 * The caller should call synchronize_sched() after this.
1456 void ring_buffer_record_disable(struct ring_buffer
*buffer
)
1458 atomic_inc(&buffer
->record_disabled
);
1460 EXPORT_SYMBOL_GPL(ring_buffer_record_disable
);
1463 * ring_buffer_record_enable - enable writes to the buffer
1464 * @buffer: The ring buffer to enable writes
1466 * Note, multiple disables will need the same number of enables
1467 * to truely enable the writing (much like preempt_disable).
1469 void ring_buffer_record_enable(struct ring_buffer
*buffer
)
1471 atomic_dec(&buffer
->record_disabled
);
1473 EXPORT_SYMBOL_GPL(ring_buffer_record_enable
);
1476 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
1477 * @buffer: The ring buffer to stop writes to.
1478 * @cpu: The CPU buffer to stop
1480 * This prevents all writes to the buffer. Any attempt to write
1481 * to the buffer after this will fail and return NULL.
1483 * The caller should call synchronize_sched() after this.
1485 void ring_buffer_record_disable_cpu(struct ring_buffer
*buffer
, int cpu
)
1487 struct ring_buffer_per_cpu
*cpu_buffer
;
1489 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
1492 cpu_buffer
= buffer
->buffers
[cpu
];
1493 atomic_inc(&cpu_buffer
->record_disabled
);
1495 EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu
);
1498 * ring_buffer_record_enable_cpu - enable writes to the buffer
1499 * @buffer: The ring buffer to enable writes
1500 * @cpu: The CPU to enable.
1502 * Note, multiple disables will need the same number of enables
1503 * to truely enable the writing (much like preempt_disable).
1505 void ring_buffer_record_enable_cpu(struct ring_buffer
*buffer
, int cpu
)
1507 struct ring_buffer_per_cpu
*cpu_buffer
;
1509 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
1512 cpu_buffer
= buffer
->buffers
[cpu
];
1513 atomic_dec(&cpu_buffer
->record_disabled
);
1515 EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu
);
1518 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
1519 * @buffer: The ring buffer
1520 * @cpu: The per CPU buffer to get the entries from.
1522 unsigned long ring_buffer_entries_cpu(struct ring_buffer
*buffer
, int cpu
)
1524 struct ring_buffer_per_cpu
*cpu_buffer
;
1526 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
1529 cpu_buffer
= buffer
->buffers
[cpu
];
1530 return cpu_buffer
->entries
;
1532 EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu
);
1535 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
1536 * @buffer: The ring buffer
1537 * @cpu: The per CPU buffer to get the number of overruns from
1539 unsigned long ring_buffer_overrun_cpu(struct ring_buffer
*buffer
, int cpu
)
1541 struct ring_buffer_per_cpu
*cpu_buffer
;
1543 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
1546 cpu_buffer
= buffer
->buffers
[cpu
];
1547 return cpu_buffer
->overrun
;
1549 EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu
);
1552 * ring_buffer_entries - get the number of entries in a buffer
1553 * @buffer: The ring buffer
1555 * Returns the total number of entries in the ring buffer
1558 unsigned long ring_buffer_entries(struct ring_buffer
*buffer
)
1560 struct ring_buffer_per_cpu
*cpu_buffer
;
1561 unsigned long entries
= 0;
1564 /* if you care about this being correct, lock the buffer */
1565 for_each_buffer_cpu(buffer
, cpu
) {
1566 cpu_buffer
= buffer
->buffers
[cpu
];
1567 entries
+= cpu_buffer
->entries
;
1572 EXPORT_SYMBOL_GPL(ring_buffer_entries
);
1575 * ring_buffer_overrun_cpu - get the number of overruns in buffer
1576 * @buffer: The ring buffer
1578 * Returns the total number of overruns in the ring buffer
1581 unsigned long ring_buffer_overruns(struct ring_buffer
*buffer
)
1583 struct ring_buffer_per_cpu
*cpu_buffer
;
1584 unsigned long overruns
= 0;
1587 /* if you care about this being correct, lock the buffer */
1588 for_each_buffer_cpu(buffer
, cpu
) {
1589 cpu_buffer
= buffer
->buffers
[cpu
];
1590 overruns
+= cpu_buffer
->overrun
;
1595 EXPORT_SYMBOL_GPL(ring_buffer_overruns
);
1597 static void rb_iter_reset(struct ring_buffer_iter
*iter
)
1599 struct ring_buffer_per_cpu
*cpu_buffer
= iter
->cpu_buffer
;
1601 /* Iterator usage is expected to have record disabled */
1602 if (list_empty(&cpu_buffer
->reader_page
->list
)) {
1603 iter
->head_page
= cpu_buffer
->head_page
;
1604 iter
->head
= cpu_buffer
->head_page
->read
;
1606 iter
->head_page
= cpu_buffer
->reader_page
;
1607 iter
->head
= cpu_buffer
->reader_page
->read
;
1610 iter
->read_stamp
= cpu_buffer
->read_stamp
;
1612 iter
->read_stamp
= iter
->head_page
->page
->time_stamp
;
1616 * ring_buffer_iter_reset - reset an iterator
1617 * @iter: The iterator to reset
1619 * Resets the iterator, so that it will start from the beginning
1622 void ring_buffer_iter_reset(struct ring_buffer_iter
*iter
)
1624 struct ring_buffer_per_cpu
*cpu_buffer
= iter
->cpu_buffer
;
1625 unsigned long flags
;
1627 spin_lock_irqsave(&cpu_buffer
->reader_lock
, flags
);
1628 rb_iter_reset(iter
);
1629 spin_unlock_irqrestore(&cpu_buffer
->reader_lock
, flags
);
1631 EXPORT_SYMBOL_GPL(ring_buffer_iter_reset
);
1634 * ring_buffer_iter_empty - check if an iterator has no more to read
1635 * @iter: The iterator to check
1637 int ring_buffer_iter_empty(struct ring_buffer_iter
*iter
)
1639 struct ring_buffer_per_cpu
*cpu_buffer
;
1641 cpu_buffer
= iter
->cpu_buffer
;
1643 return iter
->head_page
== cpu_buffer
->commit_page
&&
1644 iter
->head
== rb_commit_index(cpu_buffer
);
1646 EXPORT_SYMBOL_GPL(ring_buffer_iter_empty
);
1649 rb_update_read_stamp(struct ring_buffer_per_cpu
*cpu_buffer
,
1650 struct ring_buffer_event
*event
)
1654 switch (event
->type
) {
1655 case RINGBUF_TYPE_PADDING
:
1658 case RINGBUF_TYPE_TIME_EXTEND
:
1659 delta
= event
->array
[0];
1661 delta
+= event
->time_delta
;
1662 cpu_buffer
->read_stamp
+= delta
;
1665 case RINGBUF_TYPE_TIME_STAMP
:
1666 /* FIXME: not implemented */
1669 case RINGBUF_TYPE_DATA
:
1670 cpu_buffer
->read_stamp
+= event
->time_delta
;
1680 rb_update_iter_read_stamp(struct ring_buffer_iter
*iter
,
1681 struct ring_buffer_event
*event
)
1685 switch (event
->type
) {
1686 case RINGBUF_TYPE_PADDING
:
1689 case RINGBUF_TYPE_TIME_EXTEND
:
1690 delta
= event
->array
[0];
1692 delta
+= event
->time_delta
;
1693 iter
->read_stamp
+= delta
;
1696 case RINGBUF_TYPE_TIME_STAMP
:
1697 /* FIXME: not implemented */
1700 case RINGBUF_TYPE_DATA
:
1701 iter
->read_stamp
+= event
->time_delta
;
1710 static struct buffer_page
*
1711 rb_get_reader_page(struct ring_buffer_per_cpu
*cpu_buffer
)
1713 struct buffer_page
*reader
= NULL
;
1714 unsigned long flags
;
1717 local_irq_save(flags
);
1718 __raw_spin_lock(&cpu_buffer
->lock
);
1722 * This should normally only loop twice. But because the
1723 * start of the reader inserts an empty page, it causes
1724 * a case where we will loop three times. There should be no
1725 * reason to loop four times (that I know of).
1727 if (RB_WARN_ON(cpu_buffer
, ++nr_loops
> 3)) {
1732 reader
= cpu_buffer
->reader_page
;
1734 /* If there's more to read, return this page */
1735 if (cpu_buffer
->reader_page
->read
< rb_page_size(reader
))
1738 /* Never should we have an index greater than the size */
1739 if (RB_WARN_ON(cpu_buffer
,
1740 cpu_buffer
->reader_page
->read
> rb_page_size(reader
)))
1743 /* check if we caught up to the tail */
1745 if (cpu_buffer
->commit_page
== cpu_buffer
->reader_page
)
1749 * Splice the empty reader page into the list around the head.
1750 * Reset the reader page to size zero.
1753 reader
= cpu_buffer
->head_page
;
1754 cpu_buffer
->reader_page
->list
.next
= reader
->list
.next
;
1755 cpu_buffer
->reader_page
->list
.prev
= reader
->list
.prev
;
1757 local_set(&cpu_buffer
->reader_page
->write
, 0);
1758 local_set(&cpu_buffer
->reader_page
->page
->commit
, 0);
1760 /* Make the reader page now replace the head */
1761 reader
->list
.prev
->next
= &cpu_buffer
->reader_page
->list
;
1762 reader
->list
.next
->prev
= &cpu_buffer
->reader_page
->list
;
1765 * If the tail is on the reader, then we must set the head
1766 * to the inserted page, otherwise we set it one before.
1768 cpu_buffer
->head_page
= cpu_buffer
->reader_page
;
1770 if (cpu_buffer
->commit_page
!= reader
)
1771 rb_inc_page(cpu_buffer
, &cpu_buffer
->head_page
);
1773 /* Finally update the reader page to the new head */
1774 cpu_buffer
->reader_page
= reader
;
1775 rb_reset_reader_page(cpu_buffer
);
1780 __raw_spin_unlock(&cpu_buffer
->lock
);
1781 local_irq_restore(flags
);
1786 static void rb_advance_reader(struct ring_buffer_per_cpu
*cpu_buffer
)
1788 struct ring_buffer_event
*event
;
1789 struct buffer_page
*reader
;
1792 reader
= rb_get_reader_page(cpu_buffer
);
1794 /* This function should not be called when buffer is empty */
1795 if (RB_WARN_ON(cpu_buffer
, !reader
))
1798 event
= rb_reader_event(cpu_buffer
);
1800 if (event
->type
== RINGBUF_TYPE_DATA
)
1801 cpu_buffer
->entries
--;
1803 rb_update_read_stamp(cpu_buffer
, event
);
1805 length
= rb_event_length(event
);
1806 cpu_buffer
->reader_page
->read
+= length
;
1809 static void rb_advance_iter(struct ring_buffer_iter
*iter
)
1811 struct ring_buffer
*buffer
;
1812 struct ring_buffer_per_cpu
*cpu_buffer
;
1813 struct ring_buffer_event
*event
;
1816 cpu_buffer
= iter
->cpu_buffer
;
1817 buffer
= cpu_buffer
->buffer
;
1820 * Check if we are at the end of the buffer.
1822 if (iter
->head
>= rb_page_size(iter
->head_page
)) {
1823 if (RB_WARN_ON(buffer
,
1824 iter
->head_page
== cpu_buffer
->commit_page
))
1830 event
= rb_iter_head_event(iter
);
1832 length
= rb_event_length(event
);
1835 * This should not be called to advance the header if we are
1836 * at the tail of the buffer.
1838 if (RB_WARN_ON(cpu_buffer
,
1839 (iter
->head_page
== cpu_buffer
->commit_page
) &&
1840 (iter
->head
+ length
> rb_commit_index(cpu_buffer
))))
1843 rb_update_iter_read_stamp(iter
, event
);
1845 iter
->head
+= length
;
1847 /* check for end of page padding */
1848 if ((iter
->head
>= rb_page_size(iter
->head_page
)) &&
1849 (iter
->head_page
!= cpu_buffer
->commit_page
))
1850 rb_advance_iter(iter
);
1853 static struct ring_buffer_event
*
1854 rb_buffer_peek(struct ring_buffer
*buffer
, int cpu
, u64
*ts
)
1856 struct ring_buffer_per_cpu
*cpu_buffer
;
1857 struct ring_buffer_event
*event
;
1858 struct buffer_page
*reader
;
1861 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
1864 cpu_buffer
= buffer
->buffers
[cpu
];
1868 * We repeat when a timestamp is encountered. It is possible
1869 * to get multiple timestamps from an interrupt entering just
1870 * as one timestamp is about to be written. The max times
1871 * that this can happen is the number of nested interrupts we
1872 * can have. Nesting 10 deep of interrupts is clearly
1875 if (RB_WARN_ON(cpu_buffer
, ++nr_loops
> 10))
1878 reader
= rb_get_reader_page(cpu_buffer
);
1882 event
= rb_reader_event(cpu_buffer
);
1884 switch (event
->type
) {
1885 case RINGBUF_TYPE_PADDING
:
1886 RB_WARN_ON(cpu_buffer
, 1);
1887 rb_advance_reader(cpu_buffer
);
1890 case RINGBUF_TYPE_TIME_EXTEND
:
1891 /* Internal data, OK to advance */
1892 rb_advance_reader(cpu_buffer
);
1895 case RINGBUF_TYPE_TIME_STAMP
:
1896 /* FIXME: not implemented */
1897 rb_advance_reader(cpu_buffer
);
1900 case RINGBUF_TYPE_DATA
:
1902 *ts
= cpu_buffer
->read_stamp
+ event
->time_delta
;
1903 ring_buffer_normalize_time_stamp(cpu_buffer
->cpu
, ts
);
1913 EXPORT_SYMBOL_GPL(ring_buffer_peek
);
1915 static struct ring_buffer_event
*
1916 rb_iter_peek(struct ring_buffer_iter
*iter
, u64
*ts
)
1918 struct ring_buffer
*buffer
;
1919 struct ring_buffer_per_cpu
*cpu_buffer
;
1920 struct ring_buffer_event
*event
;
1923 if (ring_buffer_iter_empty(iter
))
1926 cpu_buffer
= iter
->cpu_buffer
;
1927 buffer
= cpu_buffer
->buffer
;
1931 * We repeat when a timestamp is encountered. It is possible
1932 * to get multiple timestamps from an interrupt entering just
1933 * as one timestamp is about to be written. The max times
1934 * that this can happen is the number of nested interrupts we
1935 * can have. Nesting 10 deep of interrupts is clearly
1938 if (RB_WARN_ON(cpu_buffer
, ++nr_loops
> 10))
1941 if (rb_per_cpu_empty(cpu_buffer
))
1944 event
= rb_iter_head_event(iter
);
1946 switch (event
->type
) {
1947 case RINGBUF_TYPE_PADDING
:
1951 case RINGBUF_TYPE_TIME_EXTEND
:
1952 /* Internal data, OK to advance */
1953 rb_advance_iter(iter
);
1956 case RINGBUF_TYPE_TIME_STAMP
:
1957 /* FIXME: not implemented */
1958 rb_advance_iter(iter
);
1961 case RINGBUF_TYPE_DATA
:
1963 *ts
= iter
->read_stamp
+ event
->time_delta
;
1964 ring_buffer_normalize_time_stamp(cpu_buffer
->cpu
, ts
);
1974 EXPORT_SYMBOL_GPL(ring_buffer_iter_peek
);
1977 * ring_buffer_peek - peek at the next event to be read
1978 * @buffer: The ring buffer to read
1979 * @cpu: The cpu to peak at
1980 * @ts: The timestamp counter of this event.
1982 * This will return the event that will be read next, but does
1983 * not consume the data.
1985 struct ring_buffer_event
*
1986 ring_buffer_peek(struct ring_buffer
*buffer
, int cpu
, u64
*ts
)
1988 struct ring_buffer_per_cpu
*cpu_buffer
= buffer
->buffers
[cpu
];
1989 struct ring_buffer_event
*event
;
1990 unsigned long flags
;
1992 spin_lock_irqsave(&cpu_buffer
->reader_lock
, flags
);
1993 event
= rb_buffer_peek(buffer
, cpu
, ts
);
1994 spin_unlock_irqrestore(&cpu_buffer
->reader_lock
, flags
);
2000 * ring_buffer_iter_peek - peek at the next event to be read
2001 * @iter: The ring buffer iterator
2002 * @ts: The timestamp counter of this event.
2004 * This will return the event that will be read next, but does
2005 * not increment the iterator.
2007 struct ring_buffer_event
*
2008 ring_buffer_iter_peek(struct ring_buffer_iter
*iter
, u64
*ts
)
2010 struct ring_buffer_per_cpu
*cpu_buffer
= iter
->cpu_buffer
;
2011 struct ring_buffer_event
*event
;
2012 unsigned long flags
;
2014 spin_lock_irqsave(&cpu_buffer
->reader_lock
, flags
);
2015 event
= rb_iter_peek(iter
, ts
);
2016 spin_unlock_irqrestore(&cpu_buffer
->reader_lock
, flags
);
2022 * ring_buffer_consume - return an event and consume it
2023 * @buffer: The ring buffer to get the next event from
2025 * Returns the next event in the ring buffer, and that event is consumed.
2026 * Meaning, that sequential reads will keep returning a different event,
2027 * and eventually empty the ring buffer if the producer is slower.
2029 struct ring_buffer_event
*
2030 ring_buffer_consume(struct ring_buffer
*buffer
, int cpu
, u64
*ts
)
2032 struct ring_buffer_per_cpu
*cpu_buffer
= buffer
->buffers
[cpu
];
2033 struct ring_buffer_event
*event
;
2034 unsigned long flags
;
2036 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
2039 spin_lock_irqsave(&cpu_buffer
->reader_lock
, flags
);
2041 event
= rb_buffer_peek(buffer
, cpu
, ts
);
2045 rb_advance_reader(cpu_buffer
);
2048 spin_unlock_irqrestore(&cpu_buffer
->reader_lock
, flags
);
2052 EXPORT_SYMBOL_GPL(ring_buffer_consume
);
2055 * ring_buffer_read_start - start a non consuming read of the buffer
2056 * @buffer: The ring buffer to read from
2057 * @cpu: The cpu buffer to iterate over
2059 * This starts up an iteration through the buffer. It also disables
2060 * the recording to the buffer until the reading is finished.
2061 * This prevents the reading from being corrupted. This is not
2062 * a consuming read, so a producer is not expected.
2064 * Must be paired with ring_buffer_finish.
2066 struct ring_buffer_iter
*
2067 ring_buffer_read_start(struct ring_buffer
*buffer
, int cpu
)
2069 struct ring_buffer_per_cpu
*cpu_buffer
;
2070 struct ring_buffer_iter
*iter
;
2071 unsigned long flags
;
2073 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
2076 iter
= kmalloc(sizeof(*iter
), GFP_KERNEL
);
2080 cpu_buffer
= buffer
->buffers
[cpu
];
2082 iter
->cpu_buffer
= cpu_buffer
;
2084 atomic_inc(&cpu_buffer
->record_disabled
);
2085 synchronize_sched();
2087 spin_lock_irqsave(&cpu_buffer
->reader_lock
, flags
);
2088 __raw_spin_lock(&cpu_buffer
->lock
);
2089 rb_iter_reset(iter
);
2090 __raw_spin_unlock(&cpu_buffer
->lock
);
2091 spin_unlock_irqrestore(&cpu_buffer
->reader_lock
, flags
);
2095 EXPORT_SYMBOL_GPL(ring_buffer_read_start
);
2098 * ring_buffer_finish - finish reading the iterator of the buffer
2099 * @iter: The iterator retrieved by ring_buffer_start
2101 * This re-enables the recording to the buffer, and frees the
2105 ring_buffer_read_finish(struct ring_buffer_iter
*iter
)
2107 struct ring_buffer_per_cpu
*cpu_buffer
= iter
->cpu_buffer
;
2109 atomic_dec(&cpu_buffer
->record_disabled
);
2112 EXPORT_SYMBOL_GPL(ring_buffer_read_finish
);
2115 * ring_buffer_read - read the next item in the ring buffer by the iterator
2116 * @iter: The ring buffer iterator
2117 * @ts: The time stamp of the event read.
2119 * This reads the next event in the ring buffer and increments the iterator.
2121 struct ring_buffer_event
*
2122 ring_buffer_read(struct ring_buffer_iter
*iter
, u64
*ts
)
2124 struct ring_buffer_event
*event
;
2125 struct ring_buffer_per_cpu
*cpu_buffer
= iter
->cpu_buffer
;
2126 unsigned long flags
;
2128 spin_lock_irqsave(&cpu_buffer
->reader_lock
, flags
);
2129 event
= rb_iter_peek(iter
, ts
);
2133 rb_advance_iter(iter
);
2135 spin_unlock_irqrestore(&cpu_buffer
->reader_lock
, flags
);
2139 EXPORT_SYMBOL_GPL(ring_buffer_read
);
2142 * ring_buffer_size - return the size of the ring buffer (in bytes)
2143 * @buffer: The ring buffer.
2145 unsigned long ring_buffer_size(struct ring_buffer
*buffer
)
2147 return BUF_PAGE_SIZE
* buffer
->pages
;
2149 EXPORT_SYMBOL_GPL(ring_buffer_size
);
2152 rb_reset_cpu(struct ring_buffer_per_cpu
*cpu_buffer
)
2154 cpu_buffer
->head_page
2155 = list_entry(cpu_buffer
->pages
.next
, struct buffer_page
, list
);
2156 local_set(&cpu_buffer
->head_page
->write
, 0);
2157 local_set(&cpu_buffer
->head_page
->page
->commit
, 0);
2159 cpu_buffer
->head_page
->read
= 0;
2161 cpu_buffer
->tail_page
= cpu_buffer
->head_page
;
2162 cpu_buffer
->commit_page
= cpu_buffer
->head_page
;
2164 INIT_LIST_HEAD(&cpu_buffer
->reader_page
->list
);
2165 local_set(&cpu_buffer
->reader_page
->write
, 0);
2166 local_set(&cpu_buffer
->reader_page
->page
->commit
, 0);
2167 cpu_buffer
->reader_page
->read
= 0;
2169 cpu_buffer
->overrun
= 0;
2170 cpu_buffer
->entries
= 0;
2174 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
2175 * @buffer: The ring buffer to reset a per cpu buffer of
2176 * @cpu: The CPU buffer to be reset
2178 void ring_buffer_reset_cpu(struct ring_buffer
*buffer
, int cpu
)
2180 struct ring_buffer_per_cpu
*cpu_buffer
= buffer
->buffers
[cpu
];
2181 unsigned long flags
;
2183 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
2186 spin_lock_irqsave(&cpu_buffer
->reader_lock
, flags
);
2188 __raw_spin_lock(&cpu_buffer
->lock
);
2190 rb_reset_cpu(cpu_buffer
);
2192 __raw_spin_unlock(&cpu_buffer
->lock
);
2194 spin_unlock_irqrestore(&cpu_buffer
->reader_lock
, flags
);
2196 EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu
);
2199 * ring_buffer_reset - reset a ring buffer
2200 * @buffer: The ring buffer to reset all cpu buffers
2202 void ring_buffer_reset(struct ring_buffer
*buffer
)
2206 for_each_buffer_cpu(buffer
, cpu
)
2207 ring_buffer_reset_cpu(buffer
, cpu
);
2209 EXPORT_SYMBOL_GPL(ring_buffer_reset
);
2212 * rind_buffer_empty - is the ring buffer empty?
2213 * @buffer: The ring buffer to test
2215 int ring_buffer_empty(struct ring_buffer
*buffer
)
2217 struct ring_buffer_per_cpu
*cpu_buffer
;
2220 /* yes this is racy, but if you don't like the race, lock the buffer */
2221 for_each_buffer_cpu(buffer
, cpu
) {
2222 cpu_buffer
= buffer
->buffers
[cpu
];
2223 if (!rb_per_cpu_empty(cpu_buffer
))
2228 EXPORT_SYMBOL_GPL(ring_buffer_empty
);
2231 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
2232 * @buffer: The ring buffer
2233 * @cpu: The CPU buffer to test
2235 int ring_buffer_empty_cpu(struct ring_buffer
*buffer
, int cpu
)
2237 struct ring_buffer_per_cpu
*cpu_buffer
;
2239 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
2242 cpu_buffer
= buffer
->buffers
[cpu
];
2243 return rb_per_cpu_empty(cpu_buffer
);
2245 EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu
);
2248 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
2249 * @buffer_a: One buffer to swap with
2250 * @buffer_b: The other buffer to swap with
2252 * This function is useful for tracers that want to take a "snapshot"
2253 * of a CPU buffer and has another back up buffer lying around.
2254 * it is expected that the tracer handles the cpu buffer not being
2255 * used at the moment.
2257 int ring_buffer_swap_cpu(struct ring_buffer
*buffer_a
,
2258 struct ring_buffer
*buffer_b
, int cpu
)
2260 struct ring_buffer_per_cpu
*cpu_buffer_a
;
2261 struct ring_buffer_per_cpu
*cpu_buffer_b
;
2263 if (!cpumask_test_cpu(cpu
, buffer_a
->cpumask
) ||
2264 !cpumask_test_cpu(cpu
, buffer_b
->cpumask
))
2267 /* At least make sure the two buffers are somewhat the same */
2268 if (buffer_a
->pages
!= buffer_b
->pages
)
2271 cpu_buffer_a
= buffer_a
->buffers
[cpu
];
2272 cpu_buffer_b
= buffer_b
->buffers
[cpu
];
2275 * We can't do a synchronize_sched here because this
2276 * function can be called in atomic context.
2277 * Normally this will be called from the same CPU as cpu.
2278 * If not it's up to the caller to protect this.
2280 atomic_inc(&cpu_buffer_a
->record_disabled
);
2281 atomic_inc(&cpu_buffer_b
->record_disabled
);
2283 buffer_a
->buffers
[cpu
] = cpu_buffer_b
;
2284 buffer_b
->buffers
[cpu
] = cpu_buffer_a
;
2286 cpu_buffer_b
->buffer
= buffer_a
;
2287 cpu_buffer_a
->buffer
= buffer_b
;
2289 atomic_dec(&cpu_buffer_a
->record_disabled
);
2290 atomic_dec(&cpu_buffer_b
->record_disabled
);
2294 EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu
);
2296 static void rb_remove_entries(struct ring_buffer_per_cpu
*cpu_buffer
,
2297 struct buffer_data_page
*bpage
)
2299 struct ring_buffer_event
*event
;
2302 __raw_spin_lock(&cpu_buffer
->lock
);
2303 for (head
= 0; head
< local_read(&bpage
->commit
);
2304 head
+= rb_event_length(event
)) {
2306 event
= __rb_data_page_index(bpage
, head
);
2307 if (RB_WARN_ON(cpu_buffer
, rb_null_event(event
)))
2309 /* Only count data entries */
2310 if (event
->type
!= RINGBUF_TYPE_DATA
)
2312 cpu_buffer
->entries
--;
2314 __raw_spin_unlock(&cpu_buffer
->lock
);
2318 * ring_buffer_alloc_read_page - allocate a page to read from buffer
2319 * @buffer: the buffer to allocate for.
2321 * This function is used in conjunction with ring_buffer_read_page.
2322 * When reading a full page from the ring buffer, these functions
2323 * can be used to speed up the process. The calling function should
2324 * allocate a few pages first with this function. Then when it
2325 * needs to get pages from the ring buffer, it passes the result
2326 * of this function into ring_buffer_read_page, which will swap
2327 * the page that was allocated, with the read page of the buffer.
2330 * The page allocated, or NULL on error.
2332 void *ring_buffer_alloc_read_page(struct ring_buffer
*buffer
)
2335 struct buffer_data_page
*bpage
;
2337 addr
= __get_free_page(GFP_KERNEL
);
2341 bpage
= (void *)addr
;
2347 * ring_buffer_free_read_page - free an allocated read page
2348 * @buffer: the buffer the page was allocate for
2349 * @data: the page to free
2351 * Free a page allocated from ring_buffer_alloc_read_page.
2353 void ring_buffer_free_read_page(struct ring_buffer
*buffer
, void *data
)
2355 free_page((unsigned long)data
);
2359 * ring_buffer_read_page - extract a page from the ring buffer
2360 * @buffer: buffer to extract from
2361 * @data_page: the page to use allocated from ring_buffer_alloc_read_page
2362 * @cpu: the cpu of the buffer to extract
2363 * @full: should the extraction only happen when the page is full.
2365 * This function will pull out a page from the ring buffer and consume it.
2366 * @data_page must be the address of the variable that was returned
2367 * from ring_buffer_alloc_read_page. This is because the page might be used
2368 * to swap with a page in the ring buffer.
2371 * rpage = ring_buffer_alloc_page(buffer);
2374 * ret = ring_buffer_read_page(buffer, &rpage, cpu, 0);
2376 * process_page(rpage);
2378 * When @full is set, the function will not return true unless
2379 * the writer is off the reader page.
2381 * Note: it is up to the calling functions to handle sleeps and wakeups.
2382 * The ring buffer can be used anywhere in the kernel and can not
2383 * blindly call wake_up. The layer that uses the ring buffer must be
2384 * responsible for that.
2387 * 1 if data has been transferred
2388 * 0 if no data has been transferred.
2390 int ring_buffer_read_page(struct ring_buffer
*buffer
,
2391 void **data_page
, int cpu
, int full
)
2393 struct ring_buffer_per_cpu
*cpu_buffer
= buffer
->buffers
[cpu
];
2394 struct ring_buffer_event
*event
;
2395 struct buffer_data_page
*bpage
;
2396 unsigned long flags
;
2406 spin_lock_irqsave(&cpu_buffer
->reader_lock
, flags
);
2409 * rb_buffer_peek will get the next ring buffer if
2410 * the current reader page is empty.
2412 event
= rb_buffer_peek(buffer
, cpu
, NULL
);
2416 /* check for data */
2417 if (!local_read(&cpu_buffer
->reader_page
->page
->commit
))
2420 * If the writer is already off of the read page, then simply
2421 * switch the read page with the given page. Otherwise
2422 * we need to copy the data from the reader to the writer.
2424 if (cpu_buffer
->reader_page
== cpu_buffer
->commit_page
) {
2425 unsigned int read
= cpu_buffer
->reader_page
->read
;
2429 /* The writer is still on the reader page, we must copy */
2430 bpage
= cpu_buffer
->reader_page
->page
;
2432 cpu_buffer
->reader_page
->page
->data
+ read
,
2433 local_read(&bpage
->commit
) - read
);
2435 /* consume what was read */
2436 cpu_buffer
->reader_page
+= read
;
2439 /* swap the pages */
2440 rb_init_page(bpage
);
2441 bpage
= cpu_buffer
->reader_page
->page
;
2442 cpu_buffer
->reader_page
->page
= *data_page
;
2443 cpu_buffer
->reader_page
->read
= 0;
2448 /* update the entry counter */
2449 rb_remove_entries(cpu_buffer
, bpage
);
2451 spin_unlock_irqrestore(&cpu_buffer
->reader_lock
, flags
);
2457 rb_simple_read(struct file
*filp
, char __user
*ubuf
,
2458 size_t cnt
, loff_t
*ppos
)
2460 long *p
= filp
->private_data
;
2464 if (test_bit(RB_BUFFERS_DISABLED_BIT
, p
))
2465 r
= sprintf(buf
, "permanently disabled\n");
2467 r
= sprintf(buf
, "%d\n", test_bit(RB_BUFFERS_ON_BIT
, p
));
2469 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
2473 rb_simple_write(struct file
*filp
, const char __user
*ubuf
,
2474 size_t cnt
, loff_t
*ppos
)
2476 long *p
= filp
->private_data
;
2481 if (cnt
>= sizeof(buf
))
2484 if (copy_from_user(&buf
, ubuf
, cnt
))
2489 ret
= strict_strtoul(buf
, 10, &val
);
2494 set_bit(RB_BUFFERS_ON_BIT
, p
);
2496 clear_bit(RB_BUFFERS_ON_BIT
, p
);
2503 static struct file_operations rb_simple_fops
= {
2504 .open
= tracing_open_generic
,
2505 .read
= rb_simple_read
,
2506 .write
= rb_simple_write
,
2510 static __init
int rb_init_debugfs(void)
2512 struct dentry
*d_tracer
;
2513 struct dentry
*entry
;
2515 d_tracer
= tracing_init_dentry();
2517 entry
= debugfs_create_file("tracing_on", 0644, d_tracer
,
2518 &ring_buffer_flags
, &rb_simple_fops
);
2520 pr_warning("Could not create debugfs 'tracing_on' entry\n");
2525 fs_initcall(rb_init_debugfs
);