Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jesse/openvswitch
[deliverable/linux.git] / kernel / trace / ring_buffer.c
1 /*
2 * Generic ring buffer
3 *
4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5 */
6 #include <linux/ftrace_event.h>
7 #include <linux/ring_buffer.h>
8 #include <linux/trace_clock.h>
9 #include <linux/trace_seq.h>
10 #include <linux/spinlock.h>
11 #include <linux/irq_work.h>
12 #include <linux/debugfs.h>
13 #include <linux/uaccess.h>
14 #include <linux/hardirq.h>
15 #include <linux/kthread.h> /* for self test */
16 #include <linux/kmemcheck.h>
17 #include <linux/module.h>
18 #include <linux/percpu.h>
19 #include <linux/mutex.h>
20 #include <linux/delay.h>
21 #include <linux/slab.h>
22 #include <linux/init.h>
23 #include <linux/hash.h>
24 #include <linux/list.h>
25 #include <linux/cpu.h>
26 #include <linux/fs.h>
27
28 #include <asm/local.h>
29
30 static void update_pages_handler(struct work_struct *work);
31
32 /*
33 * The ring buffer header is special. We must manually up keep it.
34 */
35 int ring_buffer_print_entry_header(struct trace_seq *s)
36 {
37 int ret;
38
39 ret = trace_seq_puts(s, "# compressed entry header\n");
40 ret = trace_seq_puts(s, "\ttype_len : 5 bits\n");
41 ret = trace_seq_puts(s, "\ttime_delta : 27 bits\n");
42 ret = trace_seq_puts(s, "\tarray : 32 bits\n");
43 ret = trace_seq_putc(s, '\n');
44 ret = trace_seq_printf(s, "\tpadding : type == %d\n",
45 RINGBUF_TYPE_PADDING);
46 ret = trace_seq_printf(s, "\ttime_extend : type == %d\n",
47 RINGBUF_TYPE_TIME_EXTEND);
48 ret = trace_seq_printf(s, "\tdata max type_len == %d\n",
49 RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
50
51 return ret;
52 }
53
54 /*
55 * The ring buffer is made up of a list of pages. A separate list of pages is
56 * allocated for each CPU. A writer may only write to a buffer that is
57 * associated with the CPU it is currently executing on. A reader may read
58 * from any per cpu buffer.
59 *
60 * The reader is special. For each per cpu buffer, the reader has its own
61 * reader page. When a reader has read the entire reader page, this reader
62 * page is swapped with another page in the ring buffer.
63 *
64 * Now, as long as the writer is off the reader page, the reader can do what
65 * ever it wants with that page. The writer will never write to that page
66 * again (as long as it is out of the ring buffer).
67 *
68 * Here's some silly ASCII art.
69 *
70 * +------+
71 * |reader| RING BUFFER
72 * |page |
73 * +------+ +---+ +---+ +---+
74 * | |-->| |-->| |
75 * +---+ +---+ +---+
76 * ^ |
77 * | |
78 * +---------------+
79 *
80 *
81 * +------+
82 * |reader| RING BUFFER
83 * |page |------------------v
84 * +------+ +---+ +---+ +---+
85 * | |-->| |-->| |
86 * +---+ +---+ +---+
87 * ^ |
88 * | |
89 * +---------------+
90 *
91 *
92 * +------+
93 * |reader| RING BUFFER
94 * |page |------------------v
95 * +------+ +---+ +---+ +---+
96 * ^ | |-->| |-->| |
97 * | +---+ +---+ +---+
98 * | |
99 * | |
100 * +------------------------------+
101 *
102 *
103 * +------+
104 * |buffer| RING BUFFER
105 * |page |------------------v
106 * +------+ +---+ +---+ +---+
107 * ^ | | | |-->| |
108 * | New +---+ +---+ +---+
109 * | Reader------^ |
110 * | page |
111 * +------------------------------+
112 *
113 *
114 * After we make this swap, the reader can hand this page off to the splice
115 * code and be done with it. It can even allocate a new page if it needs to
116 * and swap that into the ring buffer.
117 *
118 * We will be using cmpxchg soon to make all this lockless.
119 *
120 */
121
122 /*
123 * A fast way to enable or disable all ring buffers is to
124 * call tracing_on or tracing_off. Turning off the ring buffers
125 * prevents all ring buffers from being recorded to.
126 * Turning this switch on, makes it OK to write to the
127 * ring buffer, if the ring buffer is enabled itself.
128 *
129 * There's three layers that must be on in order to write
130 * to the ring buffer.
131 *
132 * 1) This global flag must be set.
133 * 2) The ring buffer must be enabled for recording.
134 * 3) The per cpu buffer must be enabled for recording.
135 *
136 * In case of an anomaly, this global flag has a bit set that
137 * will permantly disable all ring buffers.
138 */
139
140 /*
141 * Global flag to disable all recording to ring buffers
142 * This has two bits: ON, DISABLED
143 *
144 * ON DISABLED
145 * ---- ----------
146 * 0 0 : ring buffers are off
147 * 1 0 : ring buffers are on
148 * X 1 : ring buffers are permanently disabled
149 */
150
151 enum {
152 RB_BUFFERS_ON_BIT = 0,
153 RB_BUFFERS_DISABLED_BIT = 1,
154 };
155
156 enum {
157 RB_BUFFERS_ON = 1 << RB_BUFFERS_ON_BIT,
158 RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT,
159 };
160
161 static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
162
163 /* Used for individual buffers (after the counter) */
164 #define RB_BUFFER_OFF (1 << 20)
165
166 #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
167
168 /**
169 * tracing_off_permanent - permanently disable ring buffers
170 *
171 * This function, once called, will disable all ring buffers
172 * permanently.
173 */
174 void tracing_off_permanent(void)
175 {
176 set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
177 }
178
179 #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
180 #define RB_ALIGNMENT 4U
181 #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
182 #define RB_EVNT_MIN_SIZE 8U /* two 32bit words */
183
184 #ifndef CONFIG_HAVE_64BIT_ALIGNED_ACCESS
185 # define RB_FORCE_8BYTE_ALIGNMENT 0
186 # define RB_ARCH_ALIGNMENT RB_ALIGNMENT
187 #else
188 # define RB_FORCE_8BYTE_ALIGNMENT 1
189 # define RB_ARCH_ALIGNMENT 8U
190 #endif
191
192 #define RB_ALIGN_DATA __aligned(RB_ARCH_ALIGNMENT)
193
194 /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
195 #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
196
197 enum {
198 RB_LEN_TIME_EXTEND = 8,
199 RB_LEN_TIME_STAMP = 16,
200 };
201
202 #define skip_time_extend(event) \
203 ((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND))
204
205 static inline int rb_null_event(struct ring_buffer_event *event)
206 {
207 return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
208 }
209
210 static void rb_event_set_padding(struct ring_buffer_event *event)
211 {
212 /* padding has a NULL time_delta */
213 event->type_len = RINGBUF_TYPE_PADDING;
214 event->time_delta = 0;
215 }
216
217 static unsigned
218 rb_event_data_length(struct ring_buffer_event *event)
219 {
220 unsigned length;
221
222 if (event->type_len)
223 length = event->type_len * RB_ALIGNMENT;
224 else
225 length = event->array[0];
226 return length + RB_EVNT_HDR_SIZE;
227 }
228
229 /*
230 * Return the length of the given event. Will return
231 * the length of the time extend if the event is a
232 * time extend.
233 */
234 static inline unsigned
235 rb_event_length(struct ring_buffer_event *event)
236 {
237 switch (event->type_len) {
238 case RINGBUF_TYPE_PADDING:
239 if (rb_null_event(event))
240 /* undefined */
241 return -1;
242 return event->array[0] + RB_EVNT_HDR_SIZE;
243
244 case RINGBUF_TYPE_TIME_EXTEND:
245 return RB_LEN_TIME_EXTEND;
246
247 case RINGBUF_TYPE_TIME_STAMP:
248 return RB_LEN_TIME_STAMP;
249
250 case RINGBUF_TYPE_DATA:
251 return rb_event_data_length(event);
252 default:
253 BUG();
254 }
255 /* not hit */
256 return 0;
257 }
258
259 /*
260 * Return total length of time extend and data,
261 * or just the event length for all other events.
262 */
263 static inline unsigned
264 rb_event_ts_length(struct ring_buffer_event *event)
265 {
266 unsigned len = 0;
267
268 if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
269 /* time extends include the data event after it */
270 len = RB_LEN_TIME_EXTEND;
271 event = skip_time_extend(event);
272 }
273 return len + rb_event_length(event);
274 }
275
276 /**
277 * ring_buffer_event_length - return the length of the event
278 * @event: the event to get the length of
279 *
280 * Returns the size of the data load of a data event.
281 * If the event is something other than a data event, it
282 * returns the size of the event itself. With the exception
283 * of a TIME EXTEND, where it still returns the size of the
284 * data load of the data event after it.
285 */
286 unsigned ring_buffer_event_length(struct ring_buffer_event *event)
287 {
288 unsigned length;
289
290 if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
291 event = skip_time_extend(event);
292
293 length = rb_event_length(event);
294 if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
295 return length;
296 length -= RB_EVNT_HDR_SIZE;
297 if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
298 length -= sizeof(event->array[0]);
299 return length;
300 }
301 EXPORT_SYMBOL_GPL(ring_buffer_event_length);
302
303 /* inline for ring buffer fast paths */
304 static void *
305 rb_event_data(struct ring_buffer_event *event)
306 {
307 if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
308 event = skip_time_extend(event);
309 BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
310 /* If length is in len field, then array[0] has the data */
311 if (event->type_len)
312 return (void *)&event->array[0];
313 /* Otherwise length is in array[0] and array[1] has the data */
314 return (void *)&event->array[1];
315 }
316
317 /**
318 * ring_buffer_event_data - return the data of the event
319 * @event: the event to get the data from
320 */
321 void *ring_buffer_event_data(struct ring_buffer_event *event)
322 {
323 return rb_event_data(event);
324 }
325 EXPORT_SYMBOL_GPL(ring_buffer_event_data);
326
327 #define for_each_buffer_cpu(buffer, cpu) \
328 for_each_cpu(cpu, buffer->cpumask)
329
330 #define TS_SHIFT 27
331 #define TS_MASK ((1ULL << TS_SHIFT) - 1)
332 #define TS_DELTA_TEST (~TS_MASK)
333
334 /* Flag when events were overwritten */
335 #define RB_MISSED_EVENTS (1 << 31)
336 /* Missed count stored at end */
337 #define RB_MISSED_STORED (1 << 30)
338
339 struct buffer_data_page {
340 u64 time_stamp; /* page time stamp */
341 local_t commit; /* write committed index */
342 unsigned char data[] RB_ALIGN_DATA; /* data of buffer page */
343 };
344
345 /*
346 * Note, the buffer_page list must be first. The buffer pages
347 * are allocated in cache lines, which means that each buffer
348 * page will be at the beginning of a cache line, and thus
349 * the least significant bits will be zero. We use this to
350 * add flags in the list struct pointers, to make the ring buffer
351 * lockless.
352 */
353 struct buffer_page {
354 struct list_head list; /* list of buffer pages */
355 local_t write; /* index for next write */
356 unsigned read; /* index for next read */
357 local_t entries; /* entries on this page */
358 unsigned long real_end; /* real end of data */
359 struct buffer_data_page *page; /* Actual data page */
360 };
361
362 /*
363 * The buffer page counters, write and entries, must be reset
364 * atomically when crossing page boundaries. To synchronize this
365 * update, two counters are inserted into the number. One is
366 * the actual counter for the write position or count on the page.
367 *
368 * The other is a counter of updaters. Before an update happens
369 * the update partition of the counter is incremented. This will
370 * allow the updater to update the counter atomically.
371 *
372 * The counter is 20 bits, and the state data is 12.
373 */
374 #define RB_WRITE_MASK 0xfffff
375 #define RB_WRITE_INTCNT (1 << 20)
376
377 static void rb_init_page(struct buffer_data_page *bpage)
378 {
379 local_set(&bpage->commit, 0);
380 }
381
382 /**
383 * ring_buffer_page_len - the size of data on the page.
384 * @page: The page to read
385 *
386 * Returns the amount of data on the page, including buffer page header.
387 */
388 size_t ring_buffer_page_len(void *page)
389 {
390 return local_read(&((struct buffer_data_page *)page)->commit)
391 + BUF_PAGE_HDR_SIZE;
392 }
393
394 /*
395 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
396 * this issue out.
397 */
398 static void free_buffer_page(struct buffer_page *bpage)
399 {
400 free_page((unsigned long)bpage->page);
401 kfree(bpage);
402 }
403
404 /*
405 * We need to fit the time_stamp delta into 27 bits.
406 */
407 static inline int test_time_stamp(u64 delta)
408 {
409 if (delta & TS_DELTA_TEST)
410 return 1;
411 return 0;
412 }
413
414 #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
415
416 /* Max payload is BUF_PAGE_SIZE - header (8bytes) */
417 #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
418
419 int ring_buffer_print_page_header(struct trace_seq *s)
420 {
421 struct buffer_data_page field;
422 int ret;
423
424 ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t"
425 "offset:0;\tsize:%u;\tsigned:%u;\n",
426 (unsigned int)sizeof(field.time_stamp),
427 (unsigned int)is_signed_type(u64));
428
429 ret = trace_seq_printf(s, "\tfield: local_t commit;\t"
430 "offset:%u;\tsize:%u;\tsigned:%u;\n",
431 (unsigned int)offsetof(typeof(field), commit),
432 (unsigned int)sizeof(field.commit),
433 (unsigned int)is_signed_type(long));
434
435 ret = trace_seq_printf(s, "\tfield: int overwrite;\t"
436 "offset:%u;\tsize:%u;\tsigned:%u;\n",
437 (unsigned int)offsetof(typeof(field), commit),
438 1,
439 (unsigned int)is_signed_type(long));
440
441 ret = trace_seq_printf(s, "\tfield: char data;\t"
442 "offset:%u;\tsize:%u;\tsigned:%u;\n",
443 (unsigned int)offsetof(typeof(field), data),
444 (unsigned int)BUF_PAGE_SIZE,
445 (unsigned int)is_signed_type(char));
446
447 return ret;
448 }
449
450 struct rb_irq_work {
451 struct irq_work work;
452 wait_queue_head_t waiters;
453 bool waiters_pending;
454 };
455
456 /*
457 * head_page == tail_page && head == tail then buffer is empty.
458 */
459 struct ring_buffer_per_cpu {
460 int cpu;
461 atomic_t record_disabled;
462 struct ring_buffer *buffer;
463 raw_spinlock_t reader_lock; /* serialize readers */
464 arch_spinlock_t lock;
465 struct lock_class_key lock_key;
466 unsigned int nr_pages;
467 struct list_head *pages;
468 struct buffer_page *head_page; /* read from head */
469 struct buffer_page *tail_page; /* write to tail */
470 struct buffer_page *commit_page; /* committed pages */
471 struct buffer_page *reader_page;
472 unsigned long lost_events;
473 unsigned long last_overrun;
474 local_t entries_bytes;
475 local_t entries;
476 local_t overrun;
477 local_t commit_overrun;
478 local_t dropped_events;
479 local_t committing;
480 local_t commits;
481 unsigned long read;
482 unsigned long read_bytes;
483 u64 write_stamp;
484 u64 read_stamp;
485 /* ring buffer pages to update, > 0 to add, < 0 to remove */
486 int nr_pages_to_update;
487 struct list_head new_pages; /* new pages to add */
488 struct work_struct update_pages_work;
489 struct completion update_done;
490
491 struct rb_irq_work irq_work;
492 };
493
494 struct ring_buffer {
495 unsigned flags;
496 int cpus;
497 atomic_t record_disabled;
498 atomic_t resize_disabled;
499 cpumask_var_t cpumask;
500
501 struct lock_class_key *reader_lock_key;
502
503 struct mutex mutex;
504
505 struct ring_buffer_per_cpu **buffers;
506
507 #ifdef CONFIG_HOTPLUG_CPU
508 struct notifier_block cpu_notify;
509 #endif
510 u64 (*clock)(void);
511
512 struct rb_irq_work irq_work;
513 };
514
515 struct ring_buffer_iter {
516 struct ring_buffer_per_cpu *cpu_buffer;
517 unsigned long head;
518 struct buffer_page *head_page;
519 struct buffer_page *cache_reader_page;
520 unsigned long cache_read;
521 u64 read_stamp;
522 };
523
524 /*
525 * rb_wake_up_waiters - wake up tasks waiting for ring buffer input
526 *
527 * Schedules a delayed work to wake up any task that is blocked on the
528 * ring buffer waiters queue.
529 */
530 static void rb_wake_up_waiters(struct irq_work *work)
531 {
532 struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work);
533
534 wake_up_all(&rbwork->waiters);
535 }
536
537 /**
538 * ring_buffer_wait - wait for input to the ring buffer
539 * @buffer: buffer to wait on
540 * @cpu: the cpu buffer to wait on
541 *
542 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
543 * as data is added to any of the @buffer's cpu buffers. Otherwise
544 * it will wait for data to be added to a specific cpu buffer.
545 */
546 void ring_buffer_wait(struct ring_buffer *buffer, int cpu)
547 {
548 struct ring_buffer_per_cpu *cpu_buffer;
549 DEFINE_WAIT(wait);
550 struct rb_irq_work *work;
551
552 /*
553 * Depending on what the caller is waiting for, either any
554 * data in any cpu buffer, or a specific buffer, put the
555 * caller on the appropriate wait queue.
556 */
557 if (cpu == RING_BUFFER_ALL_CPUS)
558 work = &buffer->irq_work;
559 else {
560 cpu_buffer = buffer->buffers[cpu];
561 work = &cpu_buffer->irq_work;
562 }
563
564
565 prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
566
567 /*
568 * The events can happen in critical sections where
569 * checking a work queue can cause deadlocks.
570 * After adding a task to the queue, this flag is set
571 * only to notify events to try to wake up the queue
572 * using irq_work.
573 *
574 * We don't clear it even if the buffer is no longer
575 * empty. The flag only causes the next event to run
576 * irq_work to do the work queue wake up. The worse
577 * that can happen if we race with !trace_empty() is that
578 * an event will cause an irq_work to try to wake up
579 * an empty queue.
580 *
581 * There's no reason to protect this flag either, as
582 * the work queue and irq_work logic will do the necessary
583 * synchronization for the wake ups. The only thing
584 * that is necessary is that the wake up happens after
585 * a task has been queued. It's OK for spurious wake ups.
586 */
587 work->waiters_pending = true;
588
589 if ((cpu == RING_BUFFER_ALL_CPUS && ring_buffer_empty(buffer)) ||
590 (cpu != RING_BUFFER_ALL_CPUS && ring_buffer_empty_cpu(buffer, cpu)))
591 schedule();
592
593 finish_wait(&work->waiters, &wait);
594 }
595
596 /**
597 * ring_buffer_poll_wait - poll on buffer input
598 * @buffer: buffer to wait on
599 * @cpu: the cpu buffer to wait on
600 * @filp: the file descriptor
601 * @poll_table: The poll descriptor
602 *
603 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
604 * as data is added to any of the @buffer's cpu buffers. Otherwise
605 * it will wait for data to be added to a specific cpu buffer.
606 *
607 * Returns POLLIN | POLLRDNORM if data exists in the buffers,
608 * zero otherwise.
609 */
610 int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
611 struct file *filp, poll_table *poll_table)
612 {
613 struct ring_buffer_per_cpu *cpu_buffer;
614 struct rb_irq_work *work;
615
616 if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
617 (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
618 return POLLIN | POLLRDNORM;
619
620 if (cpu == RING_BUFFER_ALL_CPUS)
621 work = &buffer->irq_work;
622 else {
623 if (!cpumask_test_cpu(cpu, buffer->cpumask))
624 return -EINVAL;
625
626 cpu_buffer = buffer->buffers[cpu];
627 work = &cpu_buffer->irq_work;
628 }
629
630 work->waiters_pending = true;
631 poll_wait(filp, &work->waiters, poll_table);
632
633 if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
634 (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
635 return POLLIN | POLLRDNORM;
636 return 0;
637 }
638
639 /* buffer may be either ring_buffer or ring_buffer_per_cpu */
640 #define RB_WARN_ON(b, cond) \
641 ({ \
642 int _____ret = unlikely(cond); \
643 if (_____ret) { \
644 if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
645 struct ring_buffer_per_cpu *__b = \
646 (void *)b; \
647 atomic_inc(&__b->buffer->record_disabled); \
648 } else \
649 atomic_inc(&b->record_disabled); \
650 WARN_ON(1); \
651 } \
652 _____ret; \
653 })
654
655 /* Up this if you want to test the TIME_EXTENTS and normalization */
656 #define DEBUG_SHIFT 0
657
658 static inline u64 rb_time_stamp(struct ring_buffer *buffer)
659 {
660 /* shift to debug/test normalization and TIME_EXTENTS */
661 return buffer->clock() << DEBUG_SHIFT;
662 }
663
664 u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
665 {
666 u64 time;
667
668 preempt_disable_notrace();
669 time = rb_time_stamp(buffer);
670 preempt_enable_no_resched_notrace();
671
672 return time;
673 }
674 EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
675
676 void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
677 int cpu, u64 *ts)
678 {
679 /* Just stupid testing the normalize function and deltas */
680 *ts >>= DEBUG_SHIFT;
681 }
682 EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
683
684 /*
685 * Making the ring buffer lockless makes things tricky.
686 * Although writes only happen on the CPU that they are on,
687 * and they only need to worry about interrupts. Reads can
688 * happen on any CPU.
689 *
690 * The reader page is always off the ring buffer, but when the
691 * reader finishes with a page, it needs to swap its page with
692 * a new one from the buffer. The reader needs to take from
693 * the head (writes go to the tail). But if a writer is in overwrite
694 * mode and wraps, it must push the head page forward.
695 *
696 * Here lies the problem.
697 *
698 * The reader must be careful to replace only the head page, and
699 * not another one. As described at the top of the file in the
700 * ASCII art, the reader sets its old page to point to the next
701 * page after head. It then sets the page after head to point to
702 * the old reader page. But if the writer moves the head page
703 * during this operation, the reader could end up with the tail.
704 *
705 * We use cmpxchg to help prevent this race. We also do something
706 * special with the page before head. We set the LSB to 1.
707 *
708 * When the writer must push the page forward, it will clear the
709 * bit that points to the head page, move the head, and then set
710 * the bit that points to the new head page.
711 *
712 * We also don't want an interrupt coming in and moving the head
713 * page on another writer. Thus we use the second LSB to catch
714 * that too. Thus:
715 *
716 * head->list->prev->next bit 1 bit 0
717 * ------- -------
718 * Normal page 0 0
719 * Points to head page 0 1
720 * New head page 1 0
721 *
722 * Note we can not trust the prev pointer of the head page, because:
723 *
724 * +----+ +-----+ +-----+
725 * | |------>| T |---X--->| N |
726 * | |<------| | | |
727 * +----+ +-----+ +-----+
728 * ^ ^ |
729 * | +-----+ | |
730 * +----------| R |----------+ |
731 * | |<-----------+
732 * +-----+
733 *
734 * Key: ---X--> HEAD flag set in pointer
735 * T Tail page
736 * R Reader page
737 * N Next page
738 *
739 * (see __rb_reserve_next() to see where this happens)
740 *
741 * What the above shows is that the reader just swapped out
742 * the reader page with a page in the buffer, but before it
743 * could make the new header point back to the new page added
744 * it was preempted by a writer. The writer moved forward onto
745 * the new page added by the reader and is about to move forward
746 * again.
747 *
748 * You can see, it is legitimate for the previous pointer of
749 * the head (or any page) not to point back to itself. But only
750 * temporarially.
751 */
752
753 #define RB_PAGE_NORMAL 0UL
754 #define RB_PAGE_HEAD 1UL
755 #define RB_PAGE_UPDATE 2UL
756
757
758 #define RB_FLAG_MASK 3UL
759
760 /* PAGE_MOVED is not part of the mask */
761 #define RB_PAGE_MOVED 4UL
762
763 /*
764 * rb_list_head - remove any bit
765 */
766 static struct list_head *rb_list_head(struct list_head *list)
767 {
768 unsigned long val = (unsigned long)list;
769
770 return (struct list_head *)(val & ~RB_FLAG_MASK);
771 }
772
773 /*
774 * rb_is_head_page - test if the given page is the head page
775 *
776 * Because the reader may move the head_page pointer, we can
777 * not trust what the head page is (it may be pointing to
778 * the reader page). But if the next page is a header page,
779 * its flags will be non zero.
780 */
781 static inline int
782 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
783 struct buffer_page *page, struct list_head *list)
784 {
785 unsigned long val;
786
787 val = (unsigned long)list->next;
788
789 if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list)
790 return RB_PAGE_MOVED;
791
792 return val & RB_FLAG_MASK;
793 }
794
795 /*
796 * rb_is_reader_page
797 *
798 * The unique thing about the reader page, is that, if the
799 * writer is ever on it, the previous pointer never points
800 * back to the reader page.
801 */
802 static int rb_is_reader_page(struct buffer_page *page)
803 {
804 struct list_head *list = page->list.prev;
805
806 return rb_list_head(list->next) != &page->list;
807 }
808
809 /*
810 * rb_set_list_to_head - set a list_head to be pointing to head.
811 */
812 static void rb_set_list_to_head(struct ring_buffer_per_cpu *cpu_buffer,
813 struct list_head *list)
814 {
815 unsigned long *ptr;
816
817 ptr = (unsigned long *)&list->next;
818 *ptr |= RB_PAGE_HEAD;
819 *ptr &= ~RB_PAGE_UPDATE;
820 }
821
822 /*
823 * rb_head_page_activate - sets up head page
824 */
825 static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
826 {
827 struct buffer_page *head;
828
829 head = cpu_buffer->head_page;
830 if (!head)
831 return;
832
833 /*
834 * Set the previous list pointer to have the HEAD flag.
835 */
836 rb_set_list_to_head(cpu_buffer, head->list.prev);
837 }
838
839 static void rb_list_head_clear(struct list_head *list)
840 {
841 unsigned long *ptr = (unsigned long *)&list->next;
842
843 *ptr &= ~RB_FLAG_MASK;
844 }
845
846 /*
847 * rb_head_page_dactivate - clears head page ptr (for free list)
848 */
849 static void
850 rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
851 {
852 struct list_head *hd;
853
854 /* Go through the whole list and clear any pointers found. */
855 rb_list_head_clear(cpu_buffer->pages);
856
857 list_for_each(hd, cpu_buffer->pages)
858 rb_list_head_clear(hd);
859 }
860
861 static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
862 struct buffer_page *head,
863 struct buffer_page *prev,
864 int old_flag, int new_flag)
865 {
866 struct list_head *list;
867 unsigned long val = (unsigned long)&head->list;
868 unsigned long ret;
869
870 list = &prev->list;
871
872 val &= ~RB_FLAG_MASK;
873
874 ret = cmpxchg((unsigned long *)&list->next,
875 val | old_flag, val | new_flag);
876
877 /* check if the reader took the page */
878 if ((ret & ~RB_FLAG_MASK) != val)
879 return RB_PAGE_MOVED;
880
881 return ret & RB_FLAG_MASK;
882 }
883
884 static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer,
885 struct buffer_page *head,
886 struct buffer_page *prev,
887 int old_flag)
888 {
889 return rb_head_page_set(cpu_buffer, head, prev,
890 old_flag, RB_PAGE_UPDATE);
891 }
892
893 static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer,
894 struct buffer_page *head,
895 struct buffer_page *prev,
896 int old_flag)
897 {
898 return rb_head_page_set(cpu_buffer, head, prev,
899 old_flag, RB_PAGE_HEAD);
900 }
901
902 static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
903 struct buffer_page *head,
904 struct buffer_page *prev,
905 int old_flag)
906 {
907 return rb_head_page_set(cpu_buffer, head, prev,
908 old_flag, RB_PAGE_NORMAL);
909 }
910
911 static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
912 struct buffer_page **bpage)
913 {
914 struct list_head *p = rb_list_head((*bpage)->list.next);
915
916 *bpage = list_entry(p, struct buffer_page, list);
917 }
918
919 static struct buffer_page *
920 rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
921 {
922 struct buffer_page *head;
923 struct buffer_page *page;
924 struct list_head *list;
925 int i;
926
927 if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page))
928 return NULL;
929
930 /* sanity check */
931 list = cpu_buffer->pages;
932 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list))
933 return NULL;
934
935 page = head = cpu_buffer->head_page;
936 /*
937 * It is possible that the writer moves the header behind
938 * where we started, and we miss in one loop.
939 * A second loop should grab the header, but we'll do
940 * three loops just because I'm paranoid.
941 */
942 for (i = 0; i < 3; i++) {
943 do {
944 if (rb_is_head_page(cpu_buffer, page, page->list.prev)) {
945 cpu_buffer->head_page = page;
946 return page;
947 }
948 rb_inc_page(cpu_buffer, &page);
949 } while (page != head);
950 }
951
952 RB_WARN_ON(cpu_buffer, 1);
953
954 return NULL;
955 }
956
957 static int rb_head_page_replace(struct buffer_page *old,
958 struct buffer_page *new)
959 {
960 unsigned long *ptr = (unsigned long *)&old->list.prev->next;
961 unsigned long val;
962 unsigned long ret;
963
964 val = *ptr & ~RB_FLAG_MASK;
965 val |= RB_PAGE_HEAD;
966
967 ret = cmpxchg(ptr, val, (unsigned long)&new->list);
968
969 return ret == val;
970 }
971
972 /*
973 * rb_tail_page_update - move the tail page forward
974 *
975 * Returns 1 if moved tail page, 0 if someone else did.
976 */
977 static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
978 struct buffer_page *tail_page,
979 struct buffer_page *next_page)
980 {
981 struct buffer_page *old_tail;
982 unsigned long old_entries;
983 unsigned long old_write;
984 int ret = 0;
985
986 /*
987 * The tail page now needs to be moved forward.
988 *
989 * We need to reset the tail page, but without messing
990 * with possible erasing of data brought in by interrupts
991 * that have moved the tail page and are currently on it.
992 *
993 * We add a counter to the write field to denote this.
994 */
995 old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
996 old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
997
998 /*
999 * Just make sure we have seen our old_write and synchronize
1000 * with any interrupts that come in.
1001 */
1002 barrier();
1003
1004 /*
1005 * If the tail page is still the same as what we think
1006 * it is, then it is up to us to update the tail
1007 * pointer.
1008 */
1009 if (tail_page == cpu_buffer->tail_page) {
1010 /* Zero the write counter */
1011 unsigned long val = old_write & ~RB_WRITE_MASK;
1012 unsigned long eval = old_entries & ~RB_WRITE_MASK;
1013
1014 /*
1015 * This will only succeed if an interrupt did
1016 * not come in and change it. In which case, we
1017 * do not want to modify it.
1018 *
1019 * We add (void) to let the compiler know that we do not care
1020 * about the return value of these functions. We use the
1021 * cmpxchg to only update if an interrupt did not already
1022 * do it for us. If the cmpxchg fails, we don't care.
1023 */
1024 (void)local_cmpxchg(&next_page->write, old_write, val);
1025 (void)local_cmpxchg(&next_page->entries, old_entries, eval);
1026
1027 /*
1028 * No need to worry about races with clearing out the commit.
1029 * it only can increment when a commit takes place. But that
1030 * only happens in the outer most nested commit.
1031 */
1032 local_set(&next_page->page->commit, 0);
1033
1034 old_tail = cmpxchg(&cpu_buffer->tail_page,
1035 tail_page, next_page);
1036
1037 if (old_tail == tail_page)
1038 ret = 1;
1039 }
1040
1041 return ret;
1042 }
1043
1044 static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
1045 struct buffer_page *bpage)
1046 {
1047 unsigned long val = (unsigned long)bpage;
1048
1049 if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK))
1050 return 1;
1051
1052 return 0;
1053 }
1054
1055 /**
1056 * rb_check_list - make sure a pointer to a list has the last bits zero
1057 */
1058 static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
1059 struct list_head *list)
1060 {
1061 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev))
1062 return 1;
1063 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next))
1064 return 1;
1065 return 0;
1066 }
1067
1068 /**
1069 * rb_check_pages - integrity check of buffer pages
1070 * @cpu_buffer: CPU buffer with pages to test
1071 *
1072 * As a safety measure we check to make sure the data pages have not
1073 * been corrupted.
1074 */
1075 static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
1076 {
1077 struct list_head *head = cpu_buffer->pages;
1078 struct buffer_page *bpage, *tmp;
1079
1080 /* Reset the head page if it exists */
1081 if (cpu_buffer->head_page)
1082 rb_set_head_page(cpu_buffer);
1083
1084 rb_head_page_deactivate(cpu_buffer);
1085
1086 if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
1087 return -1;
1088 if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
1089 return -1;
1090
1091 if (rb_check_list(cpu_buffer, head))
1092 return -1;
1093
1094 list_for_each_entry_safe(bpage, tmp, head, list) {
1095 if (RB_WARN_ON(cpu_buffer,
1096 bpage->list.next->prev != &bpage->list))
1097 return -1;
1098 if (RB_WARN_ON(cpu_buffer,
1099 bpage->list.prev->next != &bpage->list))
1100 return -1;
1101 if (rb_check_list(cpu_buffer, &bpage->list))
1102 return -1;
1103 }
1104
1105 rb_head_page_activate(cpu_buffer);
1106
1107 return 0;
1108 }
1109
1110 static int __rb_allocate_pages(int nr_pages, struct list_head *pages, int cpu)
1111 {
1112 int i;
1113 struct buffer_page *bpage, *tmp;
1114
1115 for (i = 0; i < nr_pages; i++) {
1116 struct page *page;
1117 /*
1118 * __GFP_NORETRY flag makes sure that the allocation fails
1119 * gracefully without invoking oom-killer and the system is
1120 * not destabilized.
1121 */
1122 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1123 GFP_KERNEL | __GFP_NORETRY,
1124 cpu_to_node(cpu));
1125 if (!bpage)
1126 goto free_pages;
1127
1128 list_add(&bpage->list, pages);
1129
1130 page = alloc_pages_node(cpu_to_node(cpu),
1131 GFP_KERNEL | __GFP_NORETRY, 0);
1132 if (!page)
1133 goto free_pages;
1134 bpage->page = page_address(page);
1135 rb_init_page(bpage->page);
1136 }
1137
1138 return 0;
1139
1140 free_pages:
1141 list_for_each_entry_safe(bpage, tmp, pages, list) {
1142 list_del_init(&bpage->list);
1143 free_buffer_page(bpage);
1144 }
1145
1146 return -ENOMEM;
1147 }
1148
1149 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
1150 unsigned nr_pages)
1151 {
1152 LIST_HEAD(pages);
1153
1154 WARN_ON(!nr_pages);
1155
1156 if (__rb_allocate_pages(nr_pages, &pages, cpu_buffer->cpu))
1157 return -ENOMEM;
1158
1159 /*
1160 * The ring buffer page list is a circular list that does not
1161 * start and end with a list head. All page list items point to
1162 * other pages.
1163 */
1164 cpu_buffer->pages = pages.next;
1165 list_del(&pages);
1166
1167 cpu_buffer->nr_pages = nr_pages;
1168
1169 rb_check_pages(cpu_buffer);
1170
1171 return 0;
1172 }
1173
1174 static struct ring_buffer_per_cpu *
1175 rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu)
1176 {
1177 struct ring_buffer_per_cpu *cpu_buffer;
1178 struct buffer_page *bpage;
1179 struct page *page;
1180 int ret;
1181
1182 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
1183 GFP_KERNEL, cpu_to_node(cpu));
1184 if (!cpu_buffer)
1185 return NULL;
1186
1187 cpu_buffer->cpu = cpu;
1188 cpu_buffer->buffer = buffer;
1189 raw_spin_lock_init(&cpu_buffer->reader_lock);
1190 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
1191 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
1192 INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler);
1193 init_completion(&cpu_buffer->update_done);
1194 init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters);
1195 init_waitqueue_head(&cpu_buffer->irq_work.waiters);
1196
1197 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1198 GFP_KERNEL, cpu_to_node(cpu));
1199 if (!bpage)
1200 goto fail_free_buffer;
1201
1202 rb_check_bpage(cpu_buffer, bpage);
1203
1204 cpu_buffer->reader_page = bpage;
1205 page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0);
1206 if (!page)
1207 goto fail_free_reader;
1208 bpage->page = page_address(page);
1209 rb_init_page(bpage->page);
1210
1211 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
1212 INIT_LIST_HEAD(&cpu_buffer->new_pages);
1213
1214 ret = rb_allocate_pages(cpu_buffer, nr_pages);
1215 if (ret < 0)
1216 goto fail_free_reader;
1217
1218 cpu_buffer->head_page
1219 = list_entry(cpu_buffer->pages, struct buffer_page, list);
1220 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
1221
1222 rb_head_page_activate(cpu_buffer);
1223
1224 return cpu_buffer;
1225
1226 fail_free_reader:
1227 free_buffer_page(cpu_buffer->reader_page);
1228
1229 fail_free_buffer:
1230 kfree(cpu_buffer);
1231 return NULL;
1232 }
1233
1234 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
1235 {
1236 struct list_head *head = cpu_buffer->pages;
1237 struct buffer_page *bpage, *tmp;
1238
1239 free_buffer_page(cpu_buffer->reader_page);
1240
1241 rb_head_page_deactivate(cpu_buffer);
1242
1243 if (head) {
1244 list_for_each_entry_safe(bpage, tmp, head, list) {
1245 list_del_init(&bpage->list);
1246 free_buffer_page(bpage);
1247 }
1248 bpage = list_entry(head, struct buffer_page, list);
1249 free_buffer_page(bpage);
1250 }
1251
1252 kfree(cpu_buffer);
1253 }
1254
1255 #ifdef CONFIG_HOTPLUG_CPU
1256 static int rb_cpu_notify(struct notifier_block *self,
1257 unsigned long action, void *hcpu);
1258 #endif
1259
1260 /**
1261 * __ring_buffer_alloc - allocate a new ring_buffer
1262 * @size: the size in bytes per cpu that is needed.
1263 * @flags: attributes to set for the ring buffer.
1264 *
1265 * Currently the only flag that is available is the RB_FL_OVERWRITE
1266 * flag. This flag means that the buffer will overwrite old data
1267 * when the buffer wraps. If this flag is not set, the buffer will
1268 * drop data when the tail hits the head.
1269 */
1270 struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
1271 struct lock_class_key *key)
1272 {
1273 struct ring_buffer *buffer;
1274 int bsize;
1275 int cpu, nr_pages;
1276
1277 /* keep it in its own cache line */
1278 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
1279 GFP_KERNEL);
1280 if (!buffer)
1281 return NULL;
1282
1283 if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
1284 goto fail_free_buffer;
1285
1286 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1287 buffer->flags = flags;
1288 buffer->clock = trace_clock_local;
1289 buffer->reader_lock_key = key;
1290
1291 init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters);
1292 init_waitqueue_head(&buffer->irq_work.waiters);
1293
1294 /* need at least two pages */
1295 if (nr_pages < 2)
1296 nr_pages = 2;
1297
1298 /*
1299 * In case of non-hotplug cpu, if the ring-buffer is allocated
1300 * in early initcall, it will not be notified of secondary cpus.
1301 * In that off case, we need to allocate for all possible cpus.
1302 */
1303 #ifdef CONFIG_HOTPLUG_CPU
1304 get_online_cpus();
1305 cpumask_copy(buffer->cpumask, cpu_online_mask);
1306 #else
1307 cpumask_copy(buffer->cpumask, cpu_possible_mask);
1308 #endif
1309 buffer->cpus = nr_cpu_ids;
1310
1311 bsize = sizeof(void *) * nr_cpu_ids;
1312 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
1313 GFP_KERNEL);
1314 if (!buffer->buffers)
1315 goto fail_free_cpumask;
1316
1317 for_each_buffer_cpu(buffer, cpu) {
1318 buffer->buffers[cpu] =
1319 rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
1320 if (!buffer->buffers[cpu])
1321 goto fail_free_buffers;
1322 }
1323
1324 #ifdef CONFIG_HOTPLUG_CPU
1325 buffer->cpu_notify.notifier_call = rb_cpu_notify;
1326 buffer->cpu_notify.priority = 0;
1327 register_cpu_notifier(&buffer->cpu_notify);
1328 #endif
1329
1330 put_online_cpus();
1331 mutex_init(&buffer->mutex);
1332
1333 return buffer;
1334
1335 fail_free_buffers:
1336 for_each_buffer_cpu(buffer, cpu) {
1337 if (buffer->buffers[cpu])
1338 rb_free_cpu_buffer(buffer->buffers[cpu]);
1339 }
1340 kfree(buffer->buffers);
1341
1342 fail_free_cpumask:
1343 free_cpumask_var(buffer->cpumask);
1344 put_online_cpus();
1345
1346 fail_free_buffer:
1347 kfree(buffer);
1348 return NULL;
1349 }
1350 EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
1351
1352 /**
1353 * ring_buffer_free - free a ring buffer.
1354 * @buffer: the buffer to free.
1355 */
1356 void
1357 ring_buffer_free(struct ring_buffer *buffer)
1358 {
1359 int cpu;
1360
1361 get_online_cpus();
1362
1363 #ifdef CONFIG_HOTPLUG_CPU
1364 unregister_cpu_notifier(&buffer->cpu_notify);
1365 #endif
1366
1367 for_each_buffer_cpu(buffer, cpu)
1368 rb_free_cpu_buffer(buffer->buffers[cpu]);
1369
1370 put_online_cpus();
1371
1372 kfree(buffer->buffers);
1373 free_cpumask_var(buffer->cpumask);
1374
1375 kfree(buffer);
1376 }
1377 EXPORT_SYMBOL_GPL(ring_buffer_free);
1378
1379 void ring_buffer_set_clock(struct ring_buffer *buffer,
1380 u64 (*clock)(void))
1381 {
1382 buffer->clock = clock;
1383 }
1384
1385 static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
1386
1387 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
1388 {
1389 return local_read(&bpage->entries) & RB_WRITE_MASK;
1390 }
1391
1392 static inline unsigned long rb_page_write(struct buffer_page *bpage)
1393 {
1394 return local_read(&bpage->write) & RB_WRITE_MASK;
1395 }
1396
1397 static int
1398 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
1399 {
1400 struct list_head *tail_page, *to_remove, *next_page;
1401 struct buffer_page *to_remove_page, *tmp_iter_page;
1402 struct buffer_page *last_page, *first_page;
1403 unsigned int nr_removed;
1404 unsigned long head_bit;
1405 int page_entries;
1406
1407 head_bit = 0;
1408
1409 raw_spin_lock_irq(&cpu_buffer->reader_lock);
1410 atomic_inc(&cpu_buffer->record_disabled);
1411 /*
1412 * We don't race with the readers since we have acquired the reader
1413 * lock. We also don't race with writers after disabling recording.
1414 * This makes it easy to figure out the first and the last page to be
1415 * removed from the list. We unlink all the pages in between including
1416 * the first and last pages. This is done in a busy loop so that we
1417 * lose the least number of traces.
1418 * The pages are freed after we restart recording and unlock readers.
1419 */
1420 tail_page = &cpu_buffer->tail_page->list;
1421
1422 /*
1423 * tail page might be on reader page, we remove the next page
1424 * from the ring buffer
1425 */
1426 if (cpu_buffer->tail_page == cpu_buffer->reader_page)
1427 tail_page = rb_list_head(tail_page->next);
1428 to_remove = tail_page;
1429
1430 /* start of pages to remove */
1431 first_page = list_entry(rb_list_head(to_remove->next),
1432 struct buffer_page, list);
1433
1434 for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) {
1435 to_remove = rb_list_head(to_remove)->next;
1436 head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD;
1437 }
1438
1439 next_page = rb_list_head(to_remove)->next;
1440
1441 /*
1442 * Now we remove all pages between tail_page and next_page.
1443 * Make sure that we have head_bit value preserved for the
1444 * next page
1445 */
1446 tail_page->next = (struct list_head *)((unsigned long)next_page |
1447 head_bit);
1448 next_page = rb_list_head(next_page);
1449 next_page->prev = tail_page;
1450
1451 /* make sure pages points to a valid page in the ring buffer */
1452 cpu_buffer->pages = next_page;
1453
1454 /* update head page */
1455 if (head_bit)
1456 cpu_buffer->head_page = list_entry(next_page,
1457 struct buffer_page, list);
1458
1459 /*
1460 * change read pointer to make sure any read iterators reset
1461 * themselves
1462 */
1463 cpu_buffer->read = 0;
1464
1465 /* pages are removed, resume tracing and then free the pages */
1466 atomic_dec(&cpu_buffer->record_disabled);
1467 raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1468
1469 RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages));
1470
1471 /* last buffer page to remove */
1472 last_page = list_entry(rb_list_head(to_remove), struct buffer_page,
1473 list);
1474 tmp_iter_page = first_page;
1475
1476 do {
1477 to_remove_page = tmp_iter_page;
1478 rb_inc_page(cpu_buffer, &tmp_iter_page);
1479
1480 /* update the counters */
1481 page_entries = rb_page_entries(to_remove_page);
1482 if (page_entries) {
1483 /*
1484 * If something was added to this page, it was full
1485 * since it is not the tail page. So we deduct the
1486 * bytes consumed in ring buffer from here.
1487 * Increment overrun to account for the lost events.
1488 */
1489 local_add(page_entries, &cpu_buffer->overrun);
1490 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
1491 }
1492
1493 /*
1494 * We have already removed references to this list item, just
1495 * free up the buffer_page and its page
1496 */
1497 free_buffer_page(to_remove_page);
1498 nr_removed--;
1499
1500 } while (to_remove_page != last_page);
1501
1502 RB_WARN_ON(cpu_buffer, nr_removed);
1503
1504 return nr_removed == 0;
1505 }
1506
1507 static int
1508 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
1509 {
1510 struct list_head *pages = &cpu_buffer->new_pages;
1511 int retries, success;
1512
1513 raw_spin_lock_irq(&cpu_buffer->reader_lock);
1514 /*
1515 * We are holding the reader lock, so the reader page won't be swapped
1516 * in the ring buffer. Now we are racing with the writer trying to
1517 * move head page and the tail page.
1518 * We are going to adapt the reader page update process where:
1519 * 1. We first splice the start and end of list of new pages between
1520 * the head page and its previous page.
1521 * 2. We cmpxchg the prev_page->next to point from head page to the
1522 * start of new pages list.
1523 * 3. Finally, we update the head->prev to the end of new list.
1524 *
1525 * We will try this process 10 times, to make sure that we don't keep
1526 * spinning.
1527 */
1528 retries = 10;
1529 success = 0;
1530 while (retries--) {
1531 struct list_head *head_page, *prev_page, *r;
1532 struct list_head *last_page, *first_page;
1533 struct list_head *head_page_with_bit;
1534
1535 head_page = &rb_set_head_page(cpu_buffer)->list;
1536 if (!head_page)
1537 break;
1538 prev_page = head_page->prev;
1539
1540 first_page = pages->next;
1541 last_page = pages->prev;
1542
1543 head_page_with_bit = (struct list_head *)
1544 ((unsigned long)head_page | RB_PAGE_HEAD);
1545
1546 last_page->next = head_page_with_bit;
1547 first_page->prev = prev_page;
1548
1549 r = cmpxchg(&prev_page->next, head_page_with_bit, first_page);
1550
1551 if (r == head_page_with_bit) {
1552 /*
1553 * yay, we replaced the page pointer to our new list,
1554 * now, we just have to update to head page's prev
1555 * pointer to point to end of list
1556 */
1557 head_page->prev = last_page;
1558 success = 1;
1559 break;
1560 }
1561 }
1562
1563 if (success)
1564 INIT_LIST_HEAD(pages);
1565 /*
1566 * If we weren't successful in adding in new pages, warn and stop
1567 * tracing
1568 */
1569 RB_WARN_ON(cpu_buffer, !success);
1570 raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1571
1572 /* free pages if they weren't inserted */
1573 if (!success) {
1574 struct buffer_page *bpage, *tmp;
1575 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
1576 list) {
1577 list_del_init(&bpage->list);
1578 free_buffer_page(bpage);
1579 }
1580 }
1581 return success;
1582 }
1583
1584 static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer)
1585 {
1586 int success;
1587
1588 if (cpu_buffer->nr_pages_to_update > 0)
1589 success = rb_insert_pages(cpu_buffer);
1590 else
1591 success = rb_remove_pages(cpu_buffer,
1592 -cpu_buffer->nr_pages_to_update);
1593
1594 if (success)
1595 cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update;
1596 }
1597
1598 static void update_pages_handler(struct work_struct *work)
1599 {
1600 struct ring_buffer_per_cpu *cpu_buffer = container_of(work,
1601 struct ring_buffer_per_cpu, update_pages_work);
1602 rb_update_pages(cpu_buffer);
1603 complete(&cpu_buffer->update_done);
1604 }
1605
1606 /**
1607 * ring_buffer_resize - resize the ring buffer
1608 * @buffer: the buffer to resize.
1609 * @size: the new size.
1610 * @cpu_id: the cpu buffer to resize
1611 *
1612 * Minimum size is 2 * BUF_PAGE_SIZE.
1613 *
1614 * Returns 0 on success and < 0 on failure.
1615 */
1616 int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
1617 int cpu_id)
1618 {
1619 struct ring_buffer_per_cpu *cpu_buffer;
1620 unsigned nr_pages;
1621 int cpu, err = 0;
1622
1623 /*
1624 * Always succeed at resizing a non-existent buffer:
1625 */
1626 if (!buffer)
1627 return size;
1628
1629 /* Make sure the requested buffer exists */
1630 if (cpu_id != RING_BUFFER_ALL_CPUS &&
1631 !cpumask_test_cpu(cpu_id, buffer->cpumask))
1632 return size;
1633
1634 size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1635 size *= BUF_PAGE_SIZE;
1636
1637 /* we need a minimum of two pages */
1638 if (size < BUF_PAGE_SIZE * 2)
1639 size = BUF_PAGE_SIZE * 2;
1640
1641 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1642
1643 /*
1644 * Don't succeed if resizing is disabled, as a reader might be
1645 * manipulating the ring buffer and is expecting a sane state while
1646 * this is true.
1647 */
1648 if (atomic_read(&buffer->resize_disabled))
1649 return -EBUSY;
1650
1651 /* prevent another thread from changing buffer sizes */
1652 mutex_lock(&buffer->mutex);
1653
1654 if (cpu_id == RING_BUFFER_ALL_CPUS) {
1655 /* calculate the pages to update */
1656 for_each_buffer_cpu(buffer, cpu) {
1657 cpu_buffer = buffer->buffers[cpu];
1658
1659 cpu_buffer->nr_pages_to_update = nr_pages -
1660 cpu_buffer->nr_pages;
1661 /*
1662 * nothing more to do for removing pages or no update
1663 */
1664 if (cpu_buffer->nr_pages_to_update <= 0)
1665 continue;
1666 /*
1667 * to add pages, make sure all new pages can be
1668 * allocated without receiving ENOMEM
1669 */
1670 INIT_LIST_HEAD(&cpu_buffer->new_pages);
1671 if (__rb_allocate_pages(cpu_buffer->nr_pages_to_update,
1672 &cpu_buffer->new_pages, cpu)) {
1673 /* not enough memory for new pages */
1674 err = -ENOMEM;
1675 goto out_err;
1676 }
1677 }
1678
1679 get_online_cpus();
1680 /*
1681 * Fire off all the required work handlers
1682 * We can't schedule on offline CPUs, but it's not necessary
1683 * since we can change their buffer sizes without any race.
1684 */
1685 for_each_buffer_cpu(buffer, cpu) {
1686 cpu_buffer = buffer->buffers[cpu];
1687 if (!cpu_buffer->nr_pages_to_update)
1688 continue;
1689
1690 /* The update must run on the CPU that is being updated. */
1691 preempt_disable();
1692 if (cpu == smp_processor_id() || !cpu_online(cpu)) {
1693 rb_update_pages(cpu_buffer);
1694 cpu_buffer->nr_pages_to_update = 0;
1695 } else {
1696 /*
1697 * Can not disable preemption for schedule_work_on()
1698 * on PREEMPT_RT.
1699 */
1700 preempt_enable();
1701 schedule_work_on(cpu,
1702 &cpu_buffer->update_pages_work);
1703 preempt_disable();
1704 }
1705 preempt_enable();
1706 }
1707
1708 /* wait for all the updates to complete */
1709 for_each_buffer_cpu(buffer, cpu) {
1710 cpu_buffer = buffer->buffers[cpu];
1711 if (!cpu_buffer->nr_pages_to_update)
1712 continue;
1713
1714 if (cpu_online(cpu))
1715 wait_for_completion(&cpu_buffer->update_done);
1716 cpu_buffer->nr_pages_to_update = 0;
1717 }
1718
1719 put_online_cpus();
1720 } else {
1721 /* Make sure this CPU has been intitialized */
1722 if (!cpumask_test_cpu(cpu_id, buffer->cpumask))
1723 goto out;
1724
1725 cpu_buffer = buffer->buffers[cpu_id];
1726
1727 if (nr_pages == cpu_buffer->nr_pages)
1728 goto out;
1729
1730 cpu_buffer->nr_pages_to_update = nr_pages -
1731 cpu_buffer->nr_pages;
1732
1733 INIT_LIST_HEAD(&cpu_buffer->new_pages);
1734 if (cpu_buffer->nr_pages_to_update > 0 &&
1735 __rb_allocate_pages(cpu_buffer->nr_pages_to_update,
1736 &cpu_buffer->new_pages, cpu_id)) {
1737 err = -ENOMEM;
1738 goto out_err;
1739 }
1740
1741 get_online_cpus();
1742
1743 preempt_disable();
1744 /* The update must run on the CPU that is being updated. */
1745 if (cpu_id == smp_processor_id() || !cpu_online(cpu_id))
1746 rb_update_pages(cpu_buffer);
1747 else {
1748 /*
1749 * Can not disable preemption for schedule_work_on()
1750 * on PREEMPT_RT.
1751 */
1752 preempt_enable();
1753 schedule_work_on(cpu_id,
1754 &cpu_buffer->update_pages_work);
1755 wait_for_completion(&cpu_buffer->update_done);
1756 preempt_disable();
1757 }
1758 preempt_enable();
1759
1760 cpu_buffer->nr_pages_to_update = 0;
1761 put_online_cpus();
1762 }
1763
1764 out:
1765 /*
1766 * The ring buffer resize can happen with the ring buffer
1767 * enabled, so that the update disturbs the tracing as little
1768 * as possible. But if the buffer is disabled, we do not need
1769 * to worry about that, and we can take the time to verify
1770 * that the buffer is not corrupt.
1771 */
1772 if (atomic_read(&buffer->record_disabled)) {
1773 atomic_inc(&buffer->record_disabled);
1774 /*
1775 * Even though the buffer was disabled, we must make sure
1776 * that it is truly disabled before calling rb_check_pages.
1777 * There could have been a race between checking
1778 * record_disable and incrementing it.
1779 */
1780 synchronize_sched();
1781 for_each_buffer_cpu(buffer, cpu) {
1782 cpu_buffer = buffer->buffers[cpu];
1783 rb_check_pages(cpu_buffer);
1784 }
1785 atomic_dec(&buffer->record_disabled);
1786 }
1787
1788 mutex_unlock(&buffer->mutex);
1789 return size;
1790
1791 out_err:
1792 for_each_buffer_cpu(buffer, cpu) {
1793 struct buffer_page *bpage, *tmp;
1794
1795 cpu_buffer = buffer->buffers[cpu];
1796 cpu_buffer->nr_pages_to_update = 0;
1797
1798 if (list_empty(&cpu_buffer->new_pages))
1799 continue;
1800
1801 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
1802 list) {
1803 list_del_init(&bpage->list);
1804 free_buffer_page(bpage);
1805 }
1806 }
1807 mutex_unlock(&buffer->mutex);
1808 return err;
1809 }
1810 EXPORT_SYMBOL_GPL(ring_buffer_resize);
1811
1812 void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val)
1813 {
1814 mutex_lock(&buffer->mutex);
1815 if (val)
1816 buffer->flags |= RB_FL_OVERWRITE;
1817 else
1818 buffer->flags &= ~RB_FL_OVERWRITE;
1819 mutex_unlock(&buffer->mutex);
1820 }
1821 EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite);
1822
1823 static inline void *
1824 __rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
1825 {
1826 return bpage->data + index;
1827 }
1828
1829 static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
1830 {
1831 return bpage->page->data + index;
1832 }
1833
1834 static inline struct ring_buffer_event *
1835 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
1836 {
1837 return __rb_page_index(cpu_buffer->reader_page,
1838 cpu_buffer->reader_page->read);
1839 }
1840
1841 static inline struct ring_buffer_event *
1842 rb_iter_head_event(struct ring_buffer_iter *iter)
1843 {
1844 return __rb_page_index(iter->head_page, iter->head);
1845 }
1846
1847 static inline unsigned rb_page_commit(struct buffer_page *bpage)
1848 {
1849 return local_read(&bpage->page->commit);
1850 }
1851
1852 /* Size is determined by what has been committed */
1853 static inline unsigned rb_page_size(struct buffer_page *bpage)
1854 {
1855 return rb_page_commit(bpage);
1856 }
1857
1858 static inline unsigned
1859 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
1860 {
1861 return rb_page_commit(cpu_buffer->commit_page);
1862 }
1863
1864 static inline unsigned
1865 rb_event_index(struct ring_buffer_event *event)
1866 {
1867 unsigned long addr = (unsigned long)event;
1868
1869 return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
1870 }
1871
1872 static inline int
1873 rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
1874 struct ring_buffer_event *event)
1875 {
1876 unsigned long addr = (unsigned long)event;
1877 unsigned long index;
1878
1879 index = rb_event_index(event);
1880 addr &= PAGE_MASK;
1881
1882 return cpu_buffer->commit_page->page == (void *)addr &&
1883 rb_commit_index(cpu_buffer) == index;
1884 }
1885
1886 static void
1887 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
1888 {
1889 unsigned long max_count;
1890
1891 /*
1892 * We only race with interrupts and NMIs on this CPU.
1893 * If we own the commit event, then we can commit
1894 * all others that interrupted us, since the interruptions
1895 * are in stack format (they finish before they come
1896 * back to us). This allows us to do a simple loop to
1897 * assign the commit to the tail.
1898 */
1899 again:
1900 max_count = cpu_buffer->nr_pages * 100;
1901
1902 while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
1903 if (RB_WARN_ON(cpu_buffer, !(--max_count)))
1904 return;
1905 if (RB_WARN_ON(cpu_buffer,
1906 rb_is_reader_page(cpu_buffer->tail_page)))
1907 return;
1908 local_set(&cpu_buffer->commit_page->page->commit,
1909 rb_page_write(cpu_buffer->commit_page));
1910 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
1911 cpu_buffer->write_stamp =
1912 cpu_buffer->commit_page->page->time_stamp;
1913 /* add barrier to keep gcc from optimizing too much */
1914 barrier();
1915 }
1916 while (rb_commit_index(cpu_buffer) !=
1917 rb_page_write(cpu_buffer->commit_page)) {
1918
1919 local_set(&cpu_buffer->commit_page->page->commit,
1920 rb_page_write(cpu_buffer->commit_page));
1921 RB_WARN_ON(cpu_buffer,
1922 local_read(&cpu_buffer->commit_page->page->commit) &
1923 ~RB_WRITE_MASK);
1924 barrier();
1925 }
1926
1927 /* again, keep gcc from optimizing */
1928 barrier();
1929
1930 /*
1931 * If an interrupt came in just after the first while loop
1932 * and pushed the tail page forward, we will be left with
1933 * a dangling commit that will never go forward.
1934 */
1935 if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
1936 goto again;
1937 }
1938
1939 static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1940 {
1941 cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
1942 cpu_buffer->reader_page->read = 0;
1943 }
1944
1945 static void rb_inc_iter(struct ring_buffer_iter *iter)
1946 {
1947 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1948
1949 /*
1950 * The iterator could be on the reader page (it starts there).
1951 * But the head could have moved, since the reader was
1952 * found. Check for this case and assign the iterator
1953 * to the head page instead of next.
1954 */
1955 if (iter->head_page == cpu_buffer->reader_page)
1956 iter->head_page = rb_set_head_page(cpu_buffer);
1957 else
1958 rb_inc_page(cpu_buffer, &iter->head_page);
1959
1960 iter->read_stamp = iter->head_page->page->time_stamp;
1961 iter->head = 0;
1962 }
1963
1964 /* Slow path, do not inline */
1965 static noinline struct ring_buffer_event *
1966 rb_add_time_stamp(struct ring_buffer_event *event, u64 delta)
1967 {
1968 event->type_len = RINGBUF_TYPE_TIME_EXTEND;
1969
1970 /* Not the first event on the page? */
1971 if (rb_event_index(event)) {
1972 event->time_delta = delta & TS_MASK;
1973 event->array[0] = delta >> TS_SHIFT;
1974 } else {
1975 /* nope, just zero it */
1976 event->time_delta = 0;
1977 event->array[0] = 0;
1978 }
1979
1980 return skip_time_extend(event);
1981 }
1982
1983 /**
1984 * rb_update_event - update event type and data
1985 * @event: the even to update
1986 * @type: the type of event
1987 * @length: the size of the event field in the ring buffer
1988 *
1989 * Update the type and data fields of the event. The length
1990 * is the actual size that is written to the ring buffer,
1991 * and with this, we can determine what to place into the
1992 * data field.
1993 */
1994 static void
1995 rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
1996 struct ring_buffer_event *event, unsigned length,
1997 int add_timestamp, u64 delta)
1998 {
1999 /* Only a commit updates the timestamp */
2000 if (unlikely(!rb_event_is_commit(cpu_buffer, event)))
2001 delta = 0;
2002
2003 /*
2004 * If we need to add a timestamp, then we
2005 * add it to the start of the resevered space.
2006 */
2007 if (unlikely(add_timestamp)) {
2008 event = rb_add_time_stamp(event, delta);
2009 length -= RB_LEN_TIME_EXTEND;
2010 delta = 0;
2011 }
2012
2013 event->time_delta = delta;
2014 length -= RB_EVNT_HDR_SIZE;
2015 if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) {
2016 event->type_len = 0;
2017 event->array[0] = length;
2018 } else
2019 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
2020 }
2021
2022 /*
2023 * rb_handle_head_page - writer hit the head page
2024 *
2025 * Returns: +1 to retry page
2026 * 0 to continue
2027 * -1 on error
2028 */
2029 static int
2030 rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
2031 struct buffer_page *tail_page,
2032 struct buffer_page *next_page)
2033 {
2034 struct buffer_page *new_head;
2035 int entries;
2036 int type;
2037 int ret;
2038
2039 entries = rb_page_entries(next_page);
2040
2041 /*
2042 * The hard part is here. We need to move the head
2043 * forward, and protect against both readers on
2044 * other CPUs and writers coming in via interrupts.
2045 */
2046 type = rb_head_page_set_update(cpu_buffer, next_page, tail_page,
2047 RB_PAGE_HEAD);
2048
2049 /*
2050 * type can be one of four:
2051 * NORMAL - an interrupt already moved it for us
2052 * HEAD - we are the first to get here.
2053 * UPDATE - we are the interrupt interrupting
2054 * a current move.
2055 * MOVED - a reader on another CPU moved the next
2056 * pointer to its reader page. Give up
2057 * and try again.
2058 */
2059
2060 switch (type) {
2061 case RB_PAGE_HEAD:
2062 /*
2063 * We changed the head to UPDATE, thus
2064 * it is our responsibility to update
2065 * the counters.
2066 */
2067 local_add(entries, &cpu_buffer->overrun);
2068 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
2069
2070 /*
2071 * The entries will be zeroed out when we move the
2072 * tail page.
2073 */
2074
2075 /* still more to do */
2076 break;
2077
2078 case RB_PAGE_UPDATE:
2079 /*
2080 * This is an interrupt that interrupt the
2081 * previous update. Still more to do.
2082 */
2083 break;
2084 case RB_PAGE_NORMAL:
2085 /*
2086 * An interrupt came in before the update
2087 * and processed this for us.
2088 * Nothing left to do.
2089 */
2090 return 1;
2091 case RB_PAGE_MOVED:
2092 /*
2093 * The reader is on another CPU and just did
2094 * a swap with our next_page.
2095 * Try again.
2096 */
2097 return 1;
2098 default:
2099 RB_WARN_ON(cpu_buffer, 1); /* WTF??? */
2100 return -1;
2101 }
2102
2103 /*
2104 * Now that we are here, the old head pointer is
2105 * set to UPDATE. This will keep the reader from
2106 * swapping the head page with the reader page.
2107 * The reader (on another CPU) will spin till
2108 * we are finished.
2109 *
2110 * We just need to protect against interrupts
2111 * doing the job. We will set the next pointer
2112 * to HEAD. After that, we set the old pointer
2113 * to NORMAL, but only if it was HEAD before.
2114 * otherwise we are an interrupt, and only
2115 * want the outer most commit to reset it.
2116 */
2117 new_head = next_page;
2118 rb_inc_page(cpu_buffer, &new_head);
2119
2120 ret = rb_head_page_set_head(cpu_buffer, new_head, next_page,
2121 RB_PAGE_NORMAL);
2122
2123 /*
2124 * Valid returns are:
2125 * HEAD - an interrupt came in and already set it.
2126 * NORMAL - One of two things:
2127 * 1) We really set it.
2128 * 2) A bunch of interrupts came in and moved
2129 * the page forward again.
2130 */
2131 switch (ret) {
2132 case RB_PAGE_HEAD:
2133 case RB_PAGE_NORMAL:
2134 /* OK */
2135 break;
2136 default:
2137 RB_WARN_ON(cpu_buffer, 1);
2138 return -1;
2139 }
2140
2141 /*
2142 * It is possible that an interrupt came in,
2143 * set the head up, then more interrupts came in
2144 * and moved it again. When we get back here,
2145 * the page would have been set to NORMAL but we
2146 * just set it back to HEAD.
2147 *
2148 * How do you detect this? Well, if that happened
2149 * the tail page would have moved.
2150 */
2151 if (ret == RB_PAGE_NORMAL) {
2152 /*
2153 * If the tail had moved passed next, then we need
2154 * to reset the pointer.
2155 */
2156 if (cpu_buffer->tail_page != tail_page &&
2157 cpu_buffer->tail_page != next_page)
2158 rb_head_page_set_normal(cpu_buffer, new_head,
2159 next_page,
2160 RB_PAGE_HEAD);
2161 }
2162
2163 /*
2164 * If this was the outer most commit (the one that
2165 * changed the original pointer from HEAD to UPDATE),
2166 * then it is up to us to reset it to NORMAL.
2167 */
2168 if (type == RB_PAGE_HEAD) {
2169 ret = rb_head_page_set_normal(cpu_buffer, next_page,
2170 tail_page,
2171 RB_PAGE_UPDATE);
2172 if (RB_WARN_ON(cpu_buffer,
2173 ret != RB_PAGE_UPDATE))
2174 return -1;
2175 }
2176
2177 return 0;
2178 }
2179
2180 static unsigned rb_calculate_event_length(unsigned length)
2181 {
2182 struct ring_buffer_event event; /* Used only for sizeof array */
2183
2184 /* zero length can cause confusions */
2185 if (!length)
2186 length = 1;
2187
2188 if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
2189 length += sizeof(event.array[0]);
2190
2191 length += RB_EVNT_HDR_SIZE;
2192 length = ALIGN(length, RB_ARCH_ALIGNMENT);
2193
2194 return length;
2195 }
2196
2197 static inline void
2198 rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
2199 struct buffer_page *tail_page,
2200 unsigned long tail, unsigned long length)
2201 {
2202 struct ring_buffer_event *event;
2203
2204 /*
2205 * Only the event that crossed the page boundary
2206 * must fill the old tail_page with padding.
2207 */
2208 if (tail >= BUF_PAGE_SIZE) {
2209 /*
2210 * If the page was filled, then we still need
2211 * to update the real_end. Reset it to zero
2212 * and the reader will ignore it.
2213 */
2214 if (tail == BUF_PAGE_SIZE)
2215 tail_page->real_end = 0;
2216
2217 local_sub(length, &tail_page->write);
2218 return;
2219 }
2220
2221 event = __rb_page_index(tail_page, tail);
2222 kmemcheck_annotate_bitfield(event, bitfield);
2223
2224 /* account for padding bytes */
2225 local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
2226
2227 /*
2228 * Save the original length to the meta data.
2229 * This will be used by the reader to add lost event
2230 * counter.
2231 */
2232 tail_page->real_end = tail;
2233
2234 /*
2235 * If this event is bigger than the minimum size, then
2236 * we need to be careful that we don't subtract the
2237 * write counter enough to allow another writer to slip
2238 * in on this page.
2239 * We put in a discarded commit instead, to make sure
2240 * that this space is not used again.
2241 *
2242 * If we are less than the minimum size, we don't need to
2243 * worry about it.
2244 */
2245 if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) {
2246 /* No room for any events */
2247
2248 /* Mark the rest of the page with padding */
2249 rb_event_set_padding(event);
2250
2251 /* Set the write back to the previous setting */
2252 local_sub(length, &tail_page->write);
2253 return;
2254 }
2255
2256 /* Put in a discarded event */
2257 event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE;
2258 event->type_len = RINGBUF_TYPE_PADDING;
2259 /* time delta must be non zero */
2260 event->time_delta = 1;
2261
2262 /* Set write to end of buffer */
2263 length = (tail + length) - BUF_PAGE_SIZE;
2264 local_sub(length, &tail_page->write);
2265 }
2266
2267 /*
2268 * This is the slow path, force gcc not to inline it.
2269 */
2270 static noinline struct ring_buffer_event *
2271 rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
2272 unsigned long length, unsigned long tail,
2273 struct buffer_page *tail_page, u64 ts)
2274 {
2275 struct buffer_page *commit_page = cpu_buffer->commit_page;
2276 struct ring_buffer *buffer = cpu_buffer->buffer;
2277 struct buffer_page *next_page;
2278 int ret;
2279
2280 next_page = tail_page;
2281
2282 rb_inc_page(cpu_buffer, &next_page);
2283
2284 /*
2285 * If for some reason, we had an interrupt storm that made
2286 * it all the way around the buffer, bail, and warn
2287 * about it.
2288 */
2289 if (unlikely(next_page == commit_page)) {
2290 local_inc(&cpu_buffer->commit_overrun);
2291 goto out_reset;
2292 }
2293
2294 /*
2295 * This is where the fun begins!
2296 *
2297 * We are fighting against races between a reader that
2298 * could be on another CPU trying to swap its reader
2299 * page with the buffer head.
2300 *
2301 * We are also fighting against interrupts coming in and
2302 * moving the head or tail on us as well.
2303 *
2304 * If the next page is the head page then we have filled
2305 * the buffer, unless the commit page is still on the
2306 * reader page.
2307 */
2308 if (rb_is_head_page(cpu_buffer, next_page, &tail_page->list)) {
2309
2310 /*
2311 * If the commit is not on the reader page, then
2312 * move the header page.
2313 */
2314 if (!rb_is_reader_page(cpu_buffer->commit_page)) {
2315 /*
2316 * If we are not in overwrite mode,
2317 * this is easy, just stop here.
2318 */
2319 if (!(buffer->flags & RB_FL_OVERWRITE)) {
2320 local_inc(&cpu_buffer->dropped_events);
2321 goto out_reset;
2322 }
2323
2324 ret = rb_handle_head_page(cpu_buffer,
2325 tail_page,
2326 next_page);
2327 if (ret < 0)
2328 goto out_reset;
2329 if (ret)
2330 goto out_again;
2331 } else {
2332 /*
2333 * We need to be careful here too. The
2334 * commit page could still be on the reader
2335 * page. We could have a small buffer, and
2336 * have filled up the buffer with events
2337 * from interrupts and such, and wrapped.
2338 *
2339 * Note, if the tail page is also the on the
2340 * reader_page, we let it move out.
2341 */
2342 if (unlikely((cpu_buffer->commit_page !=
2343 cpu_buffer->tail_page) &&
2344 (cpu_buffer->commit_page ==
2345 cpu_buffer->reader_page))) {
2346 local_inc(&cpu_buffer->commit_overrun);
2347 goto out_reset;
2348 }
2349 }
2350 }
2351
2352 ret = rb_tail_page_update(cpu_buffer, tail_page, next_page);
2353 if (ret) {
2354 /*
2355 * Nested commits always have zero deltas, so
2356 * just reread the time stamp
2357 */
2358 ts = rb_time_stamp(buffer);
2359 next_page->page->time_stamp = ts;
2360 }
2361
2362 out_again:
2363
2364 rb_reset_tail(cpu_buffer, tail_page, tail, length);
2365
2366 /* fail and let the caller try again */
2367 return ERR_PTR(-EAGAIN);
2368
2369 out_reset:
2370 /* reset write */
2371 rb_reset_tail(cpu_buffer, tail_page, tail, length);
2372
2373 return NULL;
2374 }
2375
2376 static struct ring_buffer_event *
2377 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
2378 unsigned long length, u64 ts,
2379 u64 delta, int add_timestamp)
2380 {
2381 struct buffer_page *tail_page;
2382 struct ring_buffer_event *event;
2383 unsigned long tail, write;
2384
2385 /*
2386 * If the time delta since the last event is too big to
2387 * hold in the time field of the event, then we append a
2388 * TIME EXTEND event ahead of the data event.
2389 */
2390 if (unlikely(add_timestamp))
2391 length += RB_LEN_TIME_EXTEND;
2392
2393 tail_page = cpu_buffer->tail_page;
2394 write = local_add_return(length, &tail_page->write);
2395
2396 /* set write to only the index of the write */
2397 write &= RB_WRITE_MASK;
2398 tail = write - length;
2399
2400 /*
2401 * If this is the first commit on the page, then it has the same
2402 * timestamp as the page itself.
2403 */
2404 if (!tail)
2405 delta = 0;
2406
2407 /* See if we shot pass the end of this buffer page */
2408 if (unlikely(write > BUF_PAGE_SIZE))
2409 return rb_move_tail(cpu_buffer, length, tail,
2410 tail_page, ts);
2411
2412 /* We reserved something on the buffer */
2413
2414 event = __rb_page_index(tail_page, tail);
2415 kmemcheck_annotate_bitfield(event, bitfield);
2416 rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
2417
2418 local_inc(&tail_page->entries);
2419
2420 /*
2421 * If this is the first commit on the page, then update
2422 * its timestamp.
2423 */
2424 if (!tail)
2425 tail_page->page->time_stamp = ts;
2426
2427 /* account for these added bytes */
2428 local_add(length, &cpu_buffer->entries_bytes);
2429
2430 return event;
2431 }
2432
2433 static inline int
2434 rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
2435 struct ring_buffer_event *event)
2436 {
2437 unsigned long new_index, old_index;
2438 struct buffer_page *bpage;
2439 unsigned long index;
2440 unsigned long addr;
2441
2442 new_index = rb_event_index(event);
2443 old_index = new_index + rb_event_ts_length(event);
2444 addr = (unsigned long)event;
2445 addr &= PAGE_MASK;
2446
2447 bpage = cpu_buffer->tail_page;
2448
2449 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
2450 unsigned long write_mask =
2451 local_read(&bpage->write) & ~RB_WRITE_MASK;
2452 unsigned long event_length = rb_event_length(event);
2453 /*
2454 * This is on the tail page. It is possible that
2455 * a write could come in and move the tail page
2456 * and write to the next page. That is fine
2457 * because we just shorten what is on this page.
2458 */
2459 old_index += write_mask;
2460 new_index += write_mask;
2461 index = local_cmpxchg(&bpage->write, old_index, new_index);
2462 if (index == old_index) {
2463 /* update counters */
2464 local_sub(event_length, &cpu_buffer->entries_bytes);
2465 return 1;
2466 }
2467 }
2468
2469 /* could not discard */
2470 return 0;
2471 }
2472
2473 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
2474 {
2475 local_inc(&cpu_buffer->committing);
2476 local_inc(&cpu_buffer->commits);
2477 }
2478
2479 static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
2480 {
2481 unsigned long commits;
2482
2483 if (RB_WARN_ON(cpu_buffer,
2484 !local_read(&cpu_buffer->committing)))
2485 return;
2486
2487 again:
2488 commits = local_read(&cpu_buffer->commits);
2489 /* synchronize with interrupts */
2490 barrier();
2491 if (local_read(&cpu_buffer->committing) == 1)
2492 rb_set_commit_to_write(cpu_buffer);
2493
2494 local_dec(&cpu_buffer->committing);
2495
2496 /* synchronize with interrupts */
2497 barrier();
2498
2499 /*
2500 * Need to account for interrupts coming in between the
2501 * updating of the commit page and the clearing of the
2502 * committing counter.
2503 */
2504 if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
2505 !local_read(&cpu_buffer->committing)) {
2506 local_inc(&cpu_buffer->committing);
2507 goto again;
2508 }
2509 }
2510
2511 static struct ring_buffer_event *
2512 rb_reserve_next_event(struct ring_buffer *buffer,
2513 struct ring_buffer_per_cpu *cpu_buffer,
2514 unsigned long length)
2515 {
2516 struct ring_buffer_event *event;
2517 u64 ts, delta;
2518 int nr_loops = 0;
2519 int add_timestamp;
2520 u64 diff;
2521
2522 rb_start_commit(cpu_buffer);
2523
2524 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2525 /*
2526 * Due to the ability to swap a cpu buffer from a buffer
2527 * it is possible it was swapped before we committed.
2528 * (committing stops a swap). We check for it here and
2529 * if it happened, we have to fail the write.
2530 */
2531 barrier();
2532 if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) {
2533 local_dec(&cpu_buffer->committing);
2534 local_dec(&cpu_buffer->commits);
2535 return NULL;
2536 }
2537 #endif
2538
2539 length = rb_calculate_event_length(length);
2540 again:
2541 add_timestamp = 0;
2542 delta = 0;
2543
2544 /*
2545 * We allow for interrupts to reenter here and do a trace.
2546 * If one does, it will cause this original code to loop
2547 * back here. Even with heavy interrupts happening, this
2548 * should only happen a few times in a row. If this happens
2549 * 1000 times in a row, there must be either an interrupt
2550 * storm or we have something buggy.
2551 * Bail!
2552 */
2553 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
2554 goto out_fail;
2555
2556 ts = rb_time_stamp(cpu_buffer->buffer);
2557 diff = ts - cpu_buffer->write_stamp;
2558
2559 /* make sure this diff is calculated here */
2560 barrier();
2561
2562 /* Did the write stamp get updated already? */
2563 if (likely(ts >= cpu_buffer->write_stamp)) {
2564 delta = diff;
2565 if (unlikely(test_time_stamp(delta))) {
2566 int local_clock_stable = 1;
2567 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2568 local_clock_stable = sched_clock_stable();
2569 #endif
2570 WARN_ONCE(delta > (1ULL << 59),
2571 KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n%s",
2572 (unsigned long long)delta,
2573 (unsigned long long)ts,
2574 (unsigned long long)cpu_buffer->write_stamp,
2575 local_clock_stable ? "" :
2576 "If you just came from a suspend/resume,\n"
2577 "please switch to the trace global clock:\n"
2578 " echo global > /sys/kernel/debug/tracing/trace_clock\n");
2579 add_timestamp = 1;
2580 }
2581 }
2582
2583 event = __rb_reserve_next(cpu_buffer, length, ts,
2584 delta, add_timestamp);
2585 if (unlikely(PTR_ERR(event) == -EAGAIN))
2586 goto again;
2587
2588 if (!event)
2589 goto out_fail;
2590
2591 return event;
2592
2593 out_fail:
2594 rb_end_commit(cpu_buffer);
2595 return NULL;
2596 }
2597
2598 #ifdef CONFIG_TRACING
2599
2600 /*
2601 * The lock and unlock are done within a preempt disable section.
2602 * The current_context per_cpu variable can only be modified
2603 * by the current task between lock and unlock. But it can
2604 * be modified more than once via an interrupt. To pass this
2605 * information from the lock to the unlock without having to
2606 * access the 'in_interrupt()' functions again (which do show
2607 * a bit of overhead in something as critical as function tracing,
2608 * we use a bitmask trick.
2609 *
2610 * bit 0 = NMI context
2611 * bit 1 = IRQ context
2612 * bit 2 = SoftIRQ context
2613 * bit 3 = normal context.
2614 *
2615 * This works because this is the order of contexts that can
2616 * preempt other contexts. A SoftIRQ never preempts an IRQ
2617 * context.
2618 *
2619 * When the context is determined, the corresponding bit is
2620 * checked and set (if it was set, then a recursion of that context
2621 * happened).
2622 *
2623 * On unlock, we need to clear this bit. To do so, just subtract
2624 * 1 from the current_context and AND it to itself.
2625 *
2626 * (binary)
2627 * 101 - 1 = 100
2628 * 101 & 100 = 100 (clearing bit zero)
2629 *
2630 * 1010 - 1 = 1001
2631 * 1010 & 1001 = 1000 (clearing bit 1)
2632 *
2633 * The least significant bit can be cleared this way, and it
2634 * just so happens that it is the same bit corresponding to
2635 * the current context.
2636 */
2637 static DEFINE_PER_CPU(unsigned int, current_context);
2638
2639 static __always_inline int trace_recursive_lock(void)
2640 {
2641 unsigned int val = this_cpu_read(current_context);
2642 int bit;
2643
2644 if (in_interrupt()) {
2645 if (in_nmi())
2646 bit = 0;
2647 else if (in_irq())
2648 bit = 1;
2649 else
2650 bit = 2;
2651 } else
2652 bit = 3;
2653
2654 if (unlikely(val & (1 << bit)))
2655 return 1;
2656
2657 val |= (1 << bit);
2658 this_cpu_write(current_context, val);
2659
2660 return 0;
2661 }
2662
2663 static __always_inline void trace_recursive_unlock(void)
2664 {
2665 unsigned int val = this_cpu_read(current_context);
2666
2667 val--;
2668 val &= this_cpu_read(current_context);
2669 this_cpu_write(current_context, val);
2670 }
2671
2672 #else
2673
2674 #define trace_recursive_lock() (0)
2675 #define trace_recursive_unlock() do { } while (0)
2676
2677 #endif
2678
2679 /**
2680 * ring_buffer_lock_reserve - reserve a part of the buffer
2681 * @buffer: the ring buffer to reserve from
2682 * @length: the length of the data to reserve (excluding event header)
2683 *
2684 * Returns a reseverd event on the ring buffer to copy directly to.
2685 * The user of this interface will need to get the body to write into
2686 * and can use the ring_buffer_event_data() interface.
2687 *
2688 * The length is the length of the data needed, not the event length
2689 * which also includes the event header.
2690 *
2691 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
2692 * If NULL is returned, then nothing has been allocated or locked.
2693 */
2694 struct ring_buffer_event *
2695 ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
2696 {
2697 struct ring_buffer_per_cpu *cpu_buffer;
2698 struct ring_buffer_event *event;
2699 int cpu;
2700
2701 if (ring_buffer_flags != RB_BUFFERS_ON)
2702 return NULL;
2703
2704 /* If we are tracing schedule, we don't want to recurse */
2705 preempt_disable_notrace();
2706
2707 if (atomic_read(&buffer->record_disabled))
2708 goto out_nocheck;
2709
2710 if (trace_recursive_lock())
2711 goto out_nocheck;
2712
2713 cpu = raw_smp_processor_id();
2714
2715 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2716 goto out;
2717
2718 cpu_buffer = buffer->buffers[cpu];
2719
2720 if (atomic_read(&cpu_buffer->record_disabled))
2721 goto out;
2722
2723 if (length > BUF_MAX_DATA_SIZE)
2724 goto out;
2725
2726 event = rb_reserve_next_event(buffer, cpu_buffer, length);
2727 if (!event)
2728 goto out;
2729
2730 return event;
2731
2732 out:
2733 trace_recursive_unlock();
2734
2735 out_nocheck:
2736 preempt_enable_notrace();
2737 return NULL;
2738 }
2739 EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
2740
2741 static void
2742 rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer,
2743 struct ring_buffer_event *event)
2744 {
2745 u64 delta;
2746
2747 /*
2748 * The event first in the commit queue updates the
2749 * time stamp.
2750 */
2751 if (rb_event_is_commit(cpu_buffer, event)) {
2752 /*
2753 * A commit event that is first on a page
2754 * updates the write timestamp with the page stamp
2755 */
2756 if (!rb_event_index(event))
2757 cpu_buffer->write_stamp =
2758 cpu_buffer->commit_page->page->time_stamp;
2759 else if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
2760 delta = event->array[0];
2761 delta <<= TS_SHIFT;
2762 delta += event->time_delta;
2763 cpu_buffer->write_stamp += delta;
2764 } else
2765 cpu_buffer->write_stamp += event->time_delta;
2766 }
2767 }
2768
2769 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
2770 struct ring_buffer_event *event)
2771 {
2772 local_inc(&cpu_buffer->entries);
2773 rb_update_write_stamp(cpu_buffer, event);
2774 rb_end_commit(cpu_buffer);
2775 }
2776
2777 static __always_inline void
2778 rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
2779 {
2780 if (buffer->irq_work.waiters_pending) {
2781 buffer->irq_work.waiters_pending = false;
2782 /* irq_work_queue() supplies it's own memory barriers */
2783 irq_work_queue(&buffer->irq_work.work);
2784 }
2785
2786 if (cpu_buffer->irq_work.waiters_pending) {
2787 cpu_buffer->irq_work.waiters_pending = false;
2788 /* irq_work_queue() supplies it's own memory barriers */
2789 irq_work_queue(&cpu_buffer->irq_work.work);
2790 }
2791 }
2792
2793 /**
2794 * ring_buffer_unlock_commit - commit a reserved
2795 * @buffer: The buffer to commit to
2796 * @event: The event pointer to commit.
2797 *
2798 * This commits the data to the ring buffer, and releases any locks held.
2799 *
2800 * Must be paired with ring_buffer_lock_reserve.
2801 */
2802 int ring_buffer_unlock_commit(struct ring_buffer *buffer,
2803 struct ring_buffer_event *event)
2804 {
2805 struct ring_buffer_per_cpu *cpu_buffer;
2806 int cpu = raw_smp_processor_id();
2807
2808 cpu_buffer = buffer->buffers[cpu];
2809
2810 rb_commit(cpu_buffer, event);
2811
2812 rb_wakeups(buffer, cpu_buffer);
2813
2814 trace_recursive_unlock();
2815
2816 preempt_enable_notrace();
2817
2818 return 0;
2819 }
2820 EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
2821
2822 static inline void rb_event_discard(struct ring_buffer_event *event)
2823 {
2824 if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
2825 event = skip_time_extend(event);
2826
2827 /* array[0] holds the actual length for the discarded event */
2828 event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
2829 event->type_len = RINGBUF_TYPE_PADDING;
2830 /* time delta must be non zero */
2831 if (!event->time_delta)
2832 event->time_delta = 1;
2833 }
2834
2835 /*
2836 * Decrement the entries to the page that an event is on.
2837 * The event does not even need to exist, only the pointer
2838 * to the page it is on. This may only be called before the commit
2839 * takes place.
2840 */
2841 static inline void
2842 rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
2843 struct ring_buffer_event *event)
2844 {
2845 unsigned long addr = (unsigned long)event;
2846 struct buffer_page *bpage = cpu_buffer->commit_page;
2847 struct buffer_page *start;
2848
2849 addr &= PAGE_MASK;
2850
2851 /* Do the likely case first */
2852 if (likely(bpage->page == (void *)addr)) {
2853 local_dec(&bpage->entries);
2854 return;
2855 }
2856
2857 /*
2858 * Because the commit page may be on the reader page we
2859 * start with the next page and check the end loop there.
2860 */
2861 rb_inc_page(cpu_buffer, &bpage);
2862 start = bpage;
2863 do {
2864 if (bpage->page == (void *)addr) {
2865 local_dec(&bpage->entries);
2866 return;
2867 }
2868 rb_inc_page(cpu_buffer, &bpage);
2869 } while (bpage != start);
2870
2871 /* commit not part of this buffer?? */
2872 RB_WARN_ON(cpu_buffer, 1);
2873 }
2874
2875 /**
2876 * ring_buffer_commit_discard - discard an event that has not been committed
2877 * @buffer: the ring buffer
2878 * @event: non committed event to discard
2879 *
2880 * Sometimes an event that is in the ring buffer needs to be ignored.
2881 * This function lets the user discard an event in the ring buffer
2882 * and then that event will not be read later.
2883 *
2884 * This function only works if it is called before the the item has been
2885 * committed. It will try to free the event from the ring buffer
2886 * if another event has not been added behind it.
2887 *
2888 * If another event has been added behind it, it will set the event
2889 * up as discarded, and perform the commit.
2890 *
2891 * If this function is called, do not call ring_buffer_unlock_commit on
2892 * the event.
2893 */
2894 void ring_buffer_discard_commit(struct ring_buffer *buffer,
2895 struct ring_buffer_event *event)
2896 {
2897 struct ring_buffer_per_cpu *cpu_buffer;
2898 int cpu;
2899
2900 /* The event is discarded regardless */
2901 rb_event_discard(event);
2902
2903 cpu = smp_processor_id();
2904 cpu_buffer = buffer->buffers[cpu];
2905
2906 /*
2907 * This must only be called if the event has not been
2908 * committed yet. Thus we can assume that preemption
2909 * is still disabled.
2910 */
2911 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
2912
2913 rb_decrement_entry(cpu_buffer, event);
2914 if (rb_try_to_discard(cpu_buffer, event))
2915 goto out;
2916
2917 /*
2918 * The commit is still visible by the reader, so we
2919 * must still update the timestamp.
2920 */
2921 rb_update_write_stamp(cpu_buffer, event);
2922 out:
2923 rb_end_commit(cpu_buffer);
2924
2925 trace_recursive_unlock();
2926
2927 preempt_enable_notrace();
2928
2929 }
2930 EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
2931
2932 /**
2933 * ring_buffer_write - write data to the buffer without reserving
2934 * @buffer: The ring buffer to write to.
2935 * @length: The length of the data being written (excluding the event header)
2936 * @data: The data to write to the buffer.
2937 *
2938 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
2939 * one function. If you already have the data to write to the buffer, it
2940 * may be easier to simply call this function.
2941 *
2942 * Note, like ring_buffer_lock_reserve, the length is the length of the data
2943 * and not the length of the event which would hold the header.
2944 */
2945 int ring_buffer_write(struct ring_buffer *buffer,
2946 unsigned long length,
2947 void *data)
2948 {
2949 struct ring_buffer_per_cpu *cpu_buffer;
2950 struct ring_buffer_event *event;
2951 void *body;
2952 int ret = -EBUSY;
2953 int cpu;
2954
2955 if (ring_buffer_flags != RB_BUFFERS_ON)
2956 return -EBUSY;
2957
2958 preempt_disable_notrace();
2959
2960 if (atomic_read(&buffer->record_disabled))
2961 goto out;
2962
2963 cpu = raw_smp_processor_id();
2964
2965 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2966 goto out;
2967
2968 cpu_buffer = buffer->buffers[cpu];
2969
2970 if (atomic_read(&cpu_buffer->record_disabled))
2971 goto out;
2972
2973 if (length > BUF_MAX_DATA_SIZE)
2974 goto out;
2975
2976 event = rb_reserve_next_event(buffer, cpu_buffer, length);
2977 if (!event)
2978 goto out;
2979
2980 body = rb_event_data(event);
2981
2982 memcpy(body, data, length);
2983
2984 rb_commit(cpu_buffer, event);
2985
2986 rb_wakeups(buffer, cpu_buffer);
2987
2988 ret = 0;
2989 out:
2990 preempt_enable_notrace();
2991
2992 return ret;
2993 }
2994 EXPORT_SYMBOL_GPL(ring_buffer_write);
2995
2996 static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
2997 {
2998 struct buffer_page *reader = cpu_buffer->reader_page;
2999 struct buffer_page *head = rb_set_head_page(cpu_buffer);
3000 struct buffer_page *commit = cpu_buffer->commit_page;
3001
3002 /* In case of error, head will be NULL */
3003 if (unlikely(!head))
3004 return 1;
3005
3006 return reader->read == rb_page_commit(reader) &&
3007 (commit == reader ||
3008 (commit == head &&
3009 head->read == rb_page_commit(commit)));
3010 }
3011
3012 /**
3013 * ring_buffer_record_disable - stop all writes into the buffer
3014 * @buffer: The ring buffer to stop writes to.
3015 *
3016 * This prevents all writes to the buffer. Any attempt to write
3017 * to the buffer after this will fail and return NULL.
3018 *
3019 * The caller should call synchronize_sched() after this.
3020 */
3021 void ring_buffer_record_disable(struct ring_buffer *buffer)
3022 {
3023 atomic_inc(&buffer->record_disabled);
3024 }
3025 EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
3026
3027 /**
3028 * ring_buffer_record_enable - enable writes to the buffer
3029 * @buffer: The ring buffer to enable writes
3030 *
3031 * Note, multiple disables will need the same number of enables
3032 * to truly enable the writing (much like preempt_disable).
3033 */
3034 void ring_buffer_record_enable(struct ring_buffer *buffer)
3035 {
3036 atomic_dec(&buffer->record_disabled);
3037 }
3038 EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
3039
3040 /**
3041 * ring_buffer_record_off - stop all writes into the buffer
3042 * @buffer: The ring buffer to stop writes to.
3043 *
3044 * This prevents all writes to the buffer. Any attempt to write
3045 * to the buffer after this will fail and return NULL.
3046 *
3047 * This is different than ring_buffer_record_disable() as
3048 * it works like an on/off switch, where as the disable() version
3049 * must be paired with a enable().
3050 */
3051 void ring_buffer_record_off(struct ring_buffer *buffer)
3052 {
3053 unsigned int rd;
3054 unsigned int new_rd;
3055
3056 do {
3057 rd = atomic_read(&buffer->record_disabled);
3058 new_rd = rd | RB_BUFFER_OFF;
3059 } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
3060 }
3061 EXPORT_SYMBOL_GPL(ring_buffer_record_off);
3062
3063 /**
3064 * ring_buffer_record_on - restart writes into the buffer
3065 * @buffer: The ring buffer to start writes to.
3066 *
3067 * This enables all writes to the buffer that was disabled by
3068 * ring_buffer_record_off().
3069 *
3070 * This is different than ring_buffer_record_enable() as
3071 * it works like an on/off switch, where as the enable() version
3072 * must be paired with a disable().
3073 */
3074 void ring_buffer_record_on(struct ring_buffer *buffer)
3075 {
3076 unsigned int rd;
3077 unsigned int new_rd;
3078
3079 do {
3080 rd = atomic_read(&buffer->record_disabled);
3081 new_rd = rd & ~RB_BUFFER_OFF;
3082 } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
3083 }
3084 EXPORT_SYMBOL_GPL(ring_buffer_record_on);
3085
3086 /**
3087 * ring_buffer_record_is_on - return true if the ring buffer can write
3088 * @buffer: The ring buffer to see if write is enabled
3089 *
3090 * Returns true if the ring buffer is in a state that it accepts writes.
3091 */
3092 int ring_buffer_record_is_on(struct ring_buffer *buffer)
3093 {
3094 return !atomic_read(&buffer->record_disabled);
3095 }
3096
3097 /**
3098 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
3099 * @buffer: The ring buffer to stop writes to.
3100 * @cpu: The CPU buffer to stop
3101 *
3102 * This prevents all writes to the buffer. Any attempt to write
3103 * to the buffer after this will fail and return NULL.
3104 *
3105 * The caller should call synchronize_sched() after this.
3106 */
3107 void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
3108 {
3109 struct ring_buffer_per_cpu *cpu_buffer;
3110
3111 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3112 return;
3113
3114 cpu_buffer = buffer->buffers[cpu];
3115 atomic_inc(&cpu_buffer->record_disabled);
3116 }
3117 EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
3118
3119 /**
3120 * ring_buffer_record_enable_cpu - enable writes to the buffer
3121 * @buffer: The ring buffer to enable writes
3122 * @cpu: The CPU to enable.
3123 *
3124 * Note, multiple disables will need the same number of enables
3125 * to truly enable the writing (much like preempt_disable).
3126 */
3127 void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
3128 {
3129 struct ring_buffer_per_cpu *cpu_buffer;
3130
3131 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3132 return;
3133
3134 cpu_buffer = buffer->buffers[cpu];
3135 atomic_dec(&cpu_buffer->record_disabled);
3136 }
3137 EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
3138
3139 /*
3140 * The total entries in the ring buffer is the running counter
3141 * of entries entered into the ring buffer, minus the sum of
3142 * the entries read from the ring buffer and the number of
3143 * entries that were overwritten.
3144 */
3145 static inline unsigned long
3146 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
3147 {
3148 return local_read(&cpu_buffer->entries) -
3149 (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
3150 }
3151
3152 /**
3153 * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer
3154 * @buffer: The ring buffer
3155 * @cpu: The per CPU buffer to read from.
3156 */
3157 u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
3158 {
3159 unsigned long flags;
3160 struct ring_buffer_per_cpu *cpu_buffer;
3161 struct buffer_page *bpage;
3162 u64 ret = 0;
3163
3164 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3165 return 0;
3166
3167 cpu_buffer = buffer->buffers[cpu];
3168 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3169 /*
3170 * if the tail is on reader_page, oldest time stamp is on the reader
3171 * page
3172 */
3173 if (cpu_buffer->tail_page == cpu_buffer->reader_page)
3174 bpage = cpu_buffer->reader_page;
3175 else
3176 bpage = rb_set_head_page(cpu_buffer);
3177 if (bpage)
3178 ret = bpage->page->time_stamp;
3179 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3180
3181 return ret;
3182 }
3183 EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
3184
3185 /**
3186 * ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer
3187 * @buffer: The ring buffer
3188 * @cpu: The per CPU buffer to read from.
3189 */
3190 unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu)
3191 {
3192 struct ring_buffer_per_cpu *cpu_buffer;
3193 unsigned long ret;
3194
3195 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3196 return 0;
3197
3198 cpu_buffer = buffer->buffers[cpu];
3199 ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes;
3200
3201 return ret;
3202 }
3203 EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu);
3204
3205 /**
3206 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
3207 * @buffer: The ring buffer
3208 * @cpu: The per CPU buffer to get the entries from.
3209 */
3210 unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
3211 {
3212 struct ring_buffer_per_cpu *cpu_buffer;
3213
3214 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3215 return 0;
3216
3217 cpu_buffer = buffer->buffers[cpu];
3218
3219 return rb_num_of_entries(cpu_buffer);
3220 }
3221 EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
3222
3223 /**
3224 * ring_buffer_overrun_cpu - get the number of overruns caused by the ring
3225 * buffer wrapping around (only if RB_FL_OVERWRITE is on).
3226 * @buffer: The ring buffer
3227 * @cpu: The per CPU buffer to get the number of overruns from
3228 */
3229 unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
3230 {
3231 struct ring_buffer_per_cpu *cpu_buffer;
3232 unsigned long ret;
3233
3234 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3235 return 0;
3236
3237 cpu_buffer = buffer->buffers[cpu];
3238 ret = local_read(&cpu_buffer->overrun);
3239
3240 return ret;
3241 }
3242 EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
3243
3244 /**
3245 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by
3246 * commits failing due to the buffer wrapping around while there are uncommitted
3247 * events, such as during an interrupt storm.
3248 * @buffer: The ring buffer
3249 * @cpu: The per CPU buffer to get the number of overruns from
3250 */
3251 unsigned long
3252 ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
3253 {
3254 struct ring_buffer_per_cpu *cpu_buffer;
3255 unsigned long ret;
3256
3257 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3258 return 0;
3259
3260 cpu_buffer = buffer->buffers[cpu];
3261 ret = local_read(&cpu_buffer->commit_overrun);
3262
3263 return ret;
3264 }
3265 EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
3266
3267 /**
3268 * ring_buffer_dropped_events_cpu - get the number of dropped events caused by
3269 * the ring buffer filling up (only if RB_FL_OVERWRITE is off).
3270 * @buffer: The ring buffer
3271 * @cpu: The per CPU buffer to get the number of overruns from
3272 */
3273 unsigned long
3274 ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu)
3275 {
3276 struct ring_buffer_per_cpu *cpu_buffer;
3277 unsigned long ret;
3278
3279 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3280 return 0;
3281
3282 cpu_buffer = buffer->buffers[cpu];
3283 ret = local_read(&cpu_buffer->dropped_events);
3284
3285 return ret;
3286 }
3287 EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu);
3288
3289 /**
3290 * ring_buffer_read_events_cpu - get the number of events successfully read
3291 * @buffer: The ring buffer
3292 * @cpu: The per CPU buffer to get the number of events read
3293 */
3294 unsigned long
3295 ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu)
3296 {
3297 struct ring_buffer_per_cpu *cpu_buffer;
3298
3299 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3300 return 0;
3301
3302 cpu_buffer = buffer->buffers[cpu];
3303 return cpu_buffer->read;
3304 }
3305 EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu);
3306
3307 /**
3308 * ring_buffer_entries - get the number of entries in a buffer
3309 * @buffer: The ring buffer
3310 *
3311 * Returns the total number of entries in the ring buffer
3312 * (all CPU entries)
3313 */
3314 unsigned long ring_buffer_entries(struct ring_buffer *buffer)
3315 {
3316 struct ring_buffer_per_cpu *cpu_buffer;
3317 unsigned long entries = 0;
3318 int cpu;
3319
3320 /* if you care about this being correct, lock the buffer */
3321 for_each_buffer_cpu(buffer, cpu) {
3322 cpu_buffer = buffer->buffers[cpu];
3323 entries += rb_num_of_entries(cpu_buffer);
3324 }
3325
3326 return entries;
3327 }
3328 EXPORT_SYMBOL_GPL(ring_buffer_entries);
3329
3330 /**
3331 * ring_buffer_overruns - get the number of overruns in buffer
3332 * @buffer: The ring buffer
3333 *
3334 * Returns the total number of overruns in the ring buffer
3335 * (all CPU entries)
3336 */
3337 unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
3338 {
3339 struct ring_buffer_per_cpu *cpu_buffer;
3340 unsigned long overruns = 0;
3341 int cpu;
3342
3343 /* if you care about this being correct, lock the buffer */
3344 for_each_buffer_cpu(buffer, cpu) {
3345 cpu_buffer = buffer->buffers[cpu];
3346 overruns += local_read(&cpu_buffer->overrun);
3347 }
3348
3349 return overruns;
3350 }
3351 EXPORT_SYMBOL_GPL(ring_buffer_overruns);
3352
3353 static void rb_iter_reset(struct ring_buffer_iter *iter)
3354 {
3355 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3356
3357 /* Iterator usage is expected to have record disabled */
3358 if (list_empty(&cpu_buffer->reader_page->list)) {
3359 iter->head_page = rb_set_head_page(cpu_buffer);
3360 if (unlikely(!iter->head_page))
3361 return;
3362 iter->head = iter->head_page->read;
3363 } else {
3364 iter->head_page = cpu_buffer->reader_page;
3365 iter->head = cpu_buffer->reader_page->read;
3366 }
3367 if (iter->head)
3368 iter->read_stamp = cpu_buffer->read_stamp;
3369 else
3370 iter->read_stamp = iter->head_page->page->time_stamp;
3371 iter->cache_reader_page = cpu_buffer->reader_page;
3372 iter->cache_read = cpu_buffer->read;
3373 }
3374
3375 /**
3376 * ring_buffer_iter_reset - reset an iterator
3377 * @iter: The iterator to reset
3378 *
3379 * Resets the iterator, so that it will start from the beginning
3380 * again.
3381 */
3382 void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
3383 {
3384 struct ring_buffer_per_cpu *cpu_buffer;
3385 unsigned long flags;
3386
3387 if (!iter)
3388 return;
3389
3390 cpu_buffer = iter->cpu_buffer;
3391
3392 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3393 rb_iter_reset(iter);
3394 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3395 }
3396 EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
3397
3398 /**
3399 * ring_buffer_iter_empty - check if an iterator has no more to read
3400 * @iter: The iterator to check
3401 */
3402 int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
3403 {
3404 struct ring_buffer_per_cpu *cpu_buffer;
3405
3406 cpu_buffer = iter->cpu_buffer;
3407
3408 return iter->head_page == cpu_buffer->commit_page &&
3409 iter->head == rb_commit_index(cpu_buffer);
3410 }
3411 EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
3412
3413 static void
3414 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
3415 struct ring_buffer_event *event)
3416 {
3417 u64 delta;
3418
3419 switch (event->type_len) {
3420 case RINGBUF_TYPE_PADDING:
3421 return;
3422
3423 case RINGBUF_TYPE_TIME_EXTEND:
3424 delta = event->array[0];
3425 delta <<= TS_SHIFT;
3426 delta += event->time_delta;
3427 cpu_buffer->read_stamp += delta;
3428 return;
3429
3430 case RINGBUF_TYPE_TIME_STAMP:
3431 /* FIXME: not implemented */
3432 return;
3433
3434 case RINGBUF_TYPE_DATA:
3435 cpu_buffer->read_stamp += event->time_delta;
3436 return;
3437
3438 default:
3439 BUG();
3440 }
3441 return;
3442 }
3443
3444 static void
3445 rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
3446 struct ring_buffer_event *event)
3447 {
3448 u64 delta;
3449
3450 switch (event->type_len) {
3451 case RINGBUF_TYPE_PADDING:
3452 return;
3453
3454 case RINGBUF_TYPE_TIME_EXTEND:
3455 delta = event->array[0];
3456 delta <<= TS_SHIFT;
3457 delta += event->time_delta;
3458 iter->read_stamp += delta;
3459 return;
3460
3461 case RINGBUF_TYPE_TIME_STAMP:
3462 /* FIXME: not implemented */
3463 return;
3464
3465 case RINGBUF_TYPE_DATA:
3466 iter->read_stamp += event->time_delta;
3467 return;
3468
3469 default:
3470 BUG();
3471 }
3472 return;
3473 }
3474
3475 static struct buffer_page *
3476 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
3477 {
3478 struct buffer_page *reader = NULL;
3479 unsigned long overwrite;
3480 unsigned long flags;
3481 int nr_loops = 0;
3482 int ret;
3483
3484 local_irq_save(flags);
3485 arch_spin_lock(&cpu_buffer->lock);
3486
3487 again:
3488 /*
3489 * This should normally only loop twice. But because the
3490 * start of the reader inserts an empty page, it causes
3491 * a case where we will loop three times. There should be no
3492 * reason to loop four times (that I know of).
3493 */
3494 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
3495 reader = NULL;
3496 goto out;
3497 }
3498
3499 reader = cpu_buffer->reader_page;
3500
3501 /* If there's more to read, return this page */
3502 if (cpu_buffer->reader_page->read < rb_page_size(reader))
3503 goto out;
3504
3505 /* Never should we have an index greater than the size */
3506 if (RB_WARN_ON(cpu_buffer,
3507 cpu_buffer->reader_page->read > rb_page_size(reader)))
3508 goto out;
3509
3510 /* check if we caught up to the tail */
3511 reader = NULL;
3512 if (cpu_buffer->commit_page == cpu_buffer->reader_page)
3513 goto out;
3514
3515 /* Don't bother swapping if the ring buffer is empty */
3516 if (rb_num_of_entries(cpu_buffer) == 0)
3517 goto out;
3518
3519 /*
3520 * Reset the reader page to size zero.
3521 */
3522 local_set(&cpu_buffer->reader_page->write, 0);
3523 local_set(&cpu_buffer->reader_page->entries, 0);
3524 local_set(&cpu_buffer->reader_page->page->commit, 0);
3525 cpu_buffer->reader_page->real_end = 0;
3526
3527 spin:
3528 /*
3529 * Splice the empty reader page into the list around the head.
3530 */
3531 reader = rb_set_head_page(cpu_buffer);
3532 if (!reader)
3533 goto out;
3534 cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
3535 cpu_buffer->reader_page->list.prev = reader->list.prev;
3536
3537 /*
3538 * cpu_buffer->pages just needs to point to the buffer, it
3539 * has no specific buffer page to point to. Lets move it out
3540 * of our way so we don't accidentally swap it.
3541 */
3542 cpu_buffer->pages = reader->list.prev;
3543
3544 /* The reader page will be pointing to the new head */
3545 rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list);
3546
3547 /*
3548 * We want to make sure we read the overruns after we set up our
3549 * pointers to the next object. The writer side does a
3550 * cmpxchg to cross pages which acts as the mb on the writer
3551 * side. Note, the reader will constantly fail the swap
3552 * while the writer is updating the pointers, so this
3553 * guarantees that the overwrite recorded here is the one we
3554 * want to compare with the last_overrun.
3555 */
3556 smp_mb();
3557 overwrite = local_read(&(cpu_buffer->overrun));
3558
3559 /*
3560 * Here's the tricky part.
3561 *
3562 * We need to move the pointer past the header page.
3563 * But we can only do that if a writer is not currently
3564 * moving it. The page before the header page has the
3565 * flag bit '1' set if it is pointing to the page we want.
3566 * but if the writer is in the process of moving it
3567 * than it will be '2' or already moved '0'.
3568 */
3569
3570 ret = rb_head_page_replace(reader, cpu_buffer->reader_page);
3571
3572 /*
3573 * If we did not convert it, then we must try again.
3574 */
3575 if (!ret)
3576 goto spin;
3577
3578 /*
3579 * Yeah! We succeeded in replacing the page.
3580 *
3581 * Now make the new head point back to the reader page.
3582 */
3583 rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
3584 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
3585
3586 /* Finally update the reader page to the new head */
3587 cpu_buffer->reader_page = reader;
3588 rb_reset_reader_page(cpu_buffer);
3589
3590 if (overwrite != cpu_buffer->last_overrun) {
3591 cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
3592 cpu_buffer->last_overrun = overwrite;
3593 }
3594
3595 goto again;
3596
3597 out:
3598 arch_spin_unlock(&cpu_buffer->lock);
3599 local_irq_restore(flags);
3600
3601 return reader;
3602 }
3603
3604 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
3605 {
3606 struct ring_buffer_event *event;
3607 struct buffer_page *reader;
3608 unsigned length;
3609
3610 reader = rb_get_reader_page(cpu_buffer);
3611
3612 /* This function should not be called when buffer is empty */
3613 if (RB_WARN_ON(cpu_buffer, !reader))
3614 return;
3615
3616 event = rb_reader_event(cpu_buffer);
3617
3618 if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
3619 cpu_buffer->read++;
3620
3621 rb_update_read_stamp(cpu_buffer, event);
3622
3623 length = rb_event_length(event);
3624 cpu_buffer->reader_page->read += length;
3625 }
3626
3627 static void rb_advance_iter(struct ring_buffer_iter *iter)
3628 {
3629 struct ring_buffer_per_cpu *cpu_buffer;
3630 struct ring_buffer_event *event;
3631 unsigned length;
3632
3633 cpu_buffer = iter->cpu_buffer;
3634
3635 /*
3636 * Check if we are at the end of the buffer.
3637 */
3638 if (iter->head >= rb_page_size(iter->head_page)) {
3639 /* discarded commits can make the page empty */
3640 if (iter->head_page == cpu_buffer->commit_page)
3641 return;
3642 rb_inc_iter(iter);
3643 return;
3644 }
3645
3646 event = rb_iter_head_event(iter);
3647
3648 length = rb_event_length(event);
3649
3650 /*
3651 * This should not be called to advance the header if we are
3652 * at the tail of the buffer.
3653 */
3654 if (RB_WARN_ON(cpu_buffer,
3655 (iter->head_page == cpu_buffer->commit_page) &&
3656 (iter->head + length > rb_commit_index(cpu_buffer))))
3657 return;
3658
3659 rb_update_iter_read_stamp(iter, event);
3660
3661 iter->head += length;
3662
3663 /* check for end of page padding */
3664 if ((iter->head >= rb_page_size(iter->head_page)) &&
3665 (iter->head_page != cpu_buffer->commit_page))
3666 rb_inc_iter(iter);
3667 }
3668
3669 static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer)
3670 {
3671 return cpu_buffer->lost_events;
3672 }
3673
3674 static struct ring_buffer_event *
3675 rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
3676 unsigned long *lost_events)
3677 {
3678 struct ring_buffer_event *event;
3679 struct buffer_page *reader;
3680 int nr_loops = 0;
3681
3682 again:
3683 /*
3684 * We repeat when a time extend is encountered.
3685 * Since the time extend is always attached to a data event,
3686 * we should never loop more than once.
3687 * (We never hit the following condition more than twice).
3688 */
3689 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
3690 return NULL;
3691
3692 reader = rb_get_reader_page(cpu_buffer);
3693 if (!reader)
3694 return NULL;
3695
3696 event = rb_reader_event(cpu_buffer);
3697
3698 switch (event->type_len) {
3699 case RINGBUF_TYPE_PADDING:
3700 if (rb_null_event(event))
3701 RB_WARN_ON(cpu_buffer, 1);
3702 /*
3703 * Because the writer could be discarding every
3704 * event it creates (which would probably be bad)
3705 * if we were to go back to "again" then we may never
3706 * catch up, and will trigger the warn on, or lock
3707 * the box. Return the padding, and we will release
3708 * the current locks, and try again.
3709 */
3710 return event;
3711
3712 case RINGBUF_TYPE_TIME_EXTEND:
3713 /* Internal data, OK to advance */
3714 rb_advance_reader(cpu_buffer);
3715 goto again;
3716
3717 case RINGBUF_TYPE_TIME_STAMP:
3718 /* FIXME: not implemented */
3719 rb_advance_reader(cpu_buffer);
3720 goto again;
3721
3722 case RINGBUF_TYPE_DATA:
3723 if (ts) {
3724 *ts = cpu_buffer->read_stamp + event->time_delta;
3725 ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
3726 cpu_buffer->cpu, ts);
3727 }
3728 if (lost_events)
3729 *lost_events = rb_lost_events(cpu_buffer);
3730 return event;
3731
3732 default:
3733 BUG();
3734 }
3735
3736 return NULL;
3737 }
3738 EXPORT_SYMBOL_GPL(ring_buffer_peek);
3739
3740 static struct ring_buffer_event *
3741 rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3742 {
3743 struct ring_buffer *buffer;
3744 struct ring_buffer_per_cpu *cpu_buffer;
3745 struct ring_buffer_event *event;
3746 int nr_loops = 0;
3747
3748 cpu_buffer = iter->cpu_buffer;
3749 buffer = cpu_buffer->buffer;
3750
3751 /*
3752 * Check if someone performed a consuming read to
3753 * the buffer. A consuming read invalidates the iterator
3754 * and we need to reset the iterator in this case.
3755 */
3756 if (unlikely(iter->cache_read != cpu_buffer->read ||
3757 iter->cache_reader_page != cpu_buffer->reader_page))
3758 rb_iter_reset(iter);
3759
3760 again:
3761 if (ring_buffer_iter_empty(iter))
3762 return NULL;
3763
3764 /*
3765 * We repeat when a time extend is encountered.
3766 * Since the time extend is always attached to a data event,
3767 * we should never loop more than once.
3768 * (We never hit the following condition more than twice).
3769 */
3770 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
3771 return NULL;
3772
3773 if (rb_per_cpu_empty(cpu_buffer))
3774 return NULL;
3775
3776 if (iter->head >= local_read(&iter->head_page->page->commit)) {
3777 rb_inc_iter(iter);
3778 goto again;
3779 }
3780
3781 event = rb_iter_head_event(iter);
3782
3783 switch (event->type_len) {
3784 case RINGBUF_TYPE_PADDING:
3785 if (rb_null_event(event)) {
3786 rb_inc_iter(iter);
3787 goto again;
3788 }
3789 rb_advance_iter(iter);
3790 return event;
3791
3792 case RINGBUF_TYPE_TIME_EXTEND:
3793 /* Internal data, OK to advance */
3794 rb_advance_iter(iter);
3795 goto again;
3796
3797 case RINGBUF_TYPE_TIME_STAMP:
3798 /* FIXME: not implemented */
3799 rb_advance_iter(iter);
3800 goto again;
3801
3802 case RINGBUF_TYPE_DATA:
3803 if (ts) {
3804 *ts = iter->read_stamp + event->time_delta;
3805 ring_buffer_normalize_time_stamp(buffer,
3806 cpu_buffer->cpu, ts);
3807 }
3808 return event;
3809
3810 default:
3811 BUG();
3812 }
3813
3814 return NULL;
3815 }
3816 EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
3817
3818 static inline int rb_ok_to_lock(void)
3819 {
3820 /*
3821 * If an NMI die dumps out the content of the ring buffer
3822 * do not grab locks. We also permanently disable the ring
3823 * buffer too. A one time deal is all you get from reading
3824 * the ring buffer from an NMI.
3825 */
3826 if (likely(!in_nmi()))
3827 return 1;
3828
3829 tracing_off_permanent();
3830 return 0;
3831 }
3832
3833 /**
3834 * ring_buffer_peek - peek at the next event to be read
3835 * @buffer: The ring buffer to read
3836 * @cpu: The cpu to peak at
3837 * @ts: The timestamp counter of this event.
3838 * @lost_events: a variable to store if events were lost (may be NULL)
3839 *
3840 * This will return the event that will be read next, but does
3841 * not consume the data.
3842 */
3843 struct ring_buffer_event *
3844 ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
3845 unsigned long *lost_events)
3846 {
3847 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
3848 struct ring_buffer_event *event;
3849 unsigned long flags;
3850 int dolock;
3851
3852 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3853 return NULL;
3854
3855 dolock = rb_ok_to_lock();
3856 again:
3857 local_irq_save(flags);
3858 if (dolock)
3859 raw_spin_lock(&cpu_buffer->reader_lock);
3860 event = rb_buffer_peek(cpu_buffer, ts, lost_events);
3861 if (event && event->type_len == RINGBUF_TYPE_PADDING)
3862 rb_advance_reader(cpu_buffer);
3863 if (dolock)
3864 raw_spin_unlock(&cpu_buffer->reader_lock);
3865 local_irq_restore(flags);
3866
3867 if (event && event->type_len == RINGBUF_TYPE_PADDING)
3868 goto again;
3869
3870 return event;
3871 }
3872
3873 /**
3874 * ring_buffer_iter_peek - peek at the next event to be read
3875 * @iter: The ring buffer iterator
3876 * @ts: The timestamp counter of this event.
3877 *
3878 * This will return the event that will be read next, but does
3879 * not increment the iterator.
3880 */
3881 struct ring_buffer_event *
3882 ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3883 {
3884 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3885 struct ring_buffer_event *event;
3886 unsigned long flags;
3887
3888 again:
3889 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3890 event = rb_iter_peek(iter, ts);
3891 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3892
3893 if (event && event->type_len == RINGBUF_TYPE_PADDING)
3894 goto again;
3895
3896 return event;
3897 }
3898
3899 /**
3900 * ring_buffer_consume - return an event and consume it
3901 * @buffer: The ring buffer to get the next event from
3902 * @cpu: the cpu to read the buffer from
3903 * @ts: a variable to store the timestamp (may be NULL)
3904 * @lost_events: a variable to store if events were lost (may be NULL)
3905 *
3906 * Returns the next event in the ring buffer, and that event is consumed.
3907 * Meaning, that sequential reads will keep returning a different event,
3908 * and eventually empty the ring buffer if the producer is slower.
3909 */
3910 struct ring_buffer_event *
3911 ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
3912 unsigned long *lost_events)
3913 {
3914 struct ring_buffer_per_cpu *cpu_buffer;
3915 struct ring_buffer_event *event = NULL;
3916 unsigned long flags;
3917 int dolock;
3918
3919 dolock = rb_ok_to_lock();
3920
3921 again:
3922 /* might be called in atomic */
3923 preempt_disable();
3924
3925 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3926 goto out;
3927
3928 cpu_buffer = buffer->buffers[cpu];
3929 local_irq_save(flags);
3930 if (dolock)
3931 raw_spin_lock(&cpu_buffer->reader_lock);
3932
3933 event = rb_buffer_peek(cpu_buffer, ts, lost_events);
3934 if (event) {
3935 cpu_buffer->lost_events = 0;
3936 rb_advance_reader(cpu_buffer);
3937 }
3938
3939 if (dolock)
3940 raw_spin_unlock(&cpu_buffer->reader_lock);
3941 local_irq_restore(flags);
3942
3943 out:
3944 preempt_enable();
3945
3946 if (event && event->type_len == RINGBUF_TYPE_PADDING)
3947 goto again;
3948
3949 return event;
3950 }
3951 EXPORT_SYMBOL_GPL(ring_buffer_consume);
3952
3953 /**
3954 * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
3955 * @buffer: The ring buffer to read from
3956 * @cpu: The cpu buffer to iterate over
3957 *
3958 * This performs the initial preparations necessary to iterate
3959 * through the buffer. Memory is allocated, buffer recording
3960 * is disabled, and the iterator pointer is returned to the caller.
3961 *
3962 * Disabling buffer recordng prevents the reading from being
3963 * corrupted. This is not a consuming read, so a producer is not
3964 * expected.
3965 *
3966 * After a sequence of ring_buffer_read_prepare calls, the user is
3967 * expected to make at least one call to ring_buffer_read_prepare_sync.
3968 * Afterwards, ring_buffer_read_start is invoked to get things going
3969 * for real.
3970 *
3971 * This overall must be paired with ring_buffer_read_finish.
3972 */
3973 struct ring_buffer_iter *
3974 ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu)
3975 {
3976 struct ring_buffer_per_cpu *cpu_buffer;
3977 struct ring_buffer_iter *iter;
3978
3979 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3980 return NULL;
3981
3982 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
3983 if (!iter)
3984 return NULL;
3985
3986 cpu_buffer = buffer->buffers[cpu];
3987
3988 iter->cpu_buffer = cpu_buffer;
3989
3990 atomic_inc(&buffer->resize_disabled);
3991 atomic_inc(&cpu_buffer->record_disabled);
3992
3993 return iter;
3994 }
3995 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare);
3996
3997 /**
3998 * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls
3999 *
4000 * All previously invoked ring_buffer_read_prepare calls to prepare
4001 * iterators will be synchronized. Afterwards, read_buffer_read_start
4002 * calls on those iterators are allowed.
4003 */
4004 void
4005 ring_buffer_read_prepare_sync(void)
4006 {
4007 synchronize_sched();
4008 }
4009 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);
4010
4011 /**
4012 * ring_buffer_read_start - start a non consuming read of the buffer
4013 * @iter: The iterator returned by ring_buffer_read_prepare
4014 *
4015 * This finalizes the startup of an iteration through the buffer.
4016 * The iterator comes from a call to ring_buffer_read_prepare and
4017 * an intervening ring_buffer_read_prepare_sync must have been
4018 * performed.
4019 *
4020 * Must be paired with ring_buffer_read_finish.
4021 */
4022 void
4023 ring_buffer_read_start(struct ring_buffer_iter *iter)
4024 {
4025 struct ring_buffer_per_cpu *cpu_buffer;
4026 unsigned long flags;
4027
4028 if (!iter)
4029 return;
4030
4031 cpu_buffer = iter->cpu_buffer;
4032
4033 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4034 arch_spin_lock(&cpu_buffer->lock);
4035 rb_iter_reset(iter);
4036 arch_spin_unlock(&cpu_buffer->lock);
4037 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4038 }
4039 EXPORT_SYMBOL_GPL(ring_buffer_read_start);
4040
4041 /**
4042 * ring_buffer_read_finish - finish reading the iterator of the buffer
4043 * @iter: The iterator retrieved by ring_buffer_start
4044 *
4045 * This re-enables the recording to the buffer, and frees the
4046 * iterator.
4047 */
4048 void
4049 ring_buffer_read_finish(struct ring_buffer_iter *iter)
4050 {
4051 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
4052 unsigned long flags;
4053
4054 /*
4055 * Ring buffer is disabled from recording, here's a good place
4056 * to check the integrity of the ring buffer.
4057 * Must prevent readers from trying to read, as the check
4058 * clears the HEAD page and readers require it.
4059 */
4060 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4061 rb_check_pages(cpu_buffer);
4062 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4063
4064 atomic_dec(&cpu_buffer->record_disabled);
4065 atomic_dec(&cpu_buffer->buffer->resize_disabled);
4066 kfree(iter);
4067 }
4068 EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
4069
4070 /**
4071 * ring_buffer_read - read the next item in the ring buffer by the iterator
4072 * @iter: The ring buffer iterator
4073 * @ts: The time stamp of the event read.
4074 *
4075 * This reads the next event in the ring buffer and increments the iterator.
4076 */
4077 struct ring_buffer_event *
4078 ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
4079 {
4080 struct ring_buffer_event *event;
4081 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
4082 unsigned long flags;
4083
4084 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4085 again:
4086 event = rb_iter_peek(iter, ts);
4087 if (!event)
4088 goto out;
4089
4090 if (event->type_len == RINGBUF_TYPE_PADDING)
4091 goto again;
4092
4093 rb_advance_iter(iter);
4094 out:
4095 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4096
4097 return event;
4098 }
4099 EXPORT_SYMBOL_GPL(ring_buffer_read);
4100
4101 /**
4102 * ring_buffer_size - return the size of the ring buffer (in bytes)
4103 * @buffer: The ring buffer.
4104 */
4105 unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu)
4106 {
4107 /*
4108 * Earlier, this method returned
4109 * BUF_PAGE_SIZE * buffer->nr_pages
4110 * Since the nr_pages field is now removed, we have converted this to
4111 * return the per cpu buffer value.
4112 */
4113 if (!cpumask_test_cpu(cpu, buffer->cpumask))
4114 return 0;
4115
4116 return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages;
4117 }
4118 EXPORT_SYMBOL_GPL(ring_buffer_size);
4119
4120 static void
4121 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
4122 {
4123 rb_head_page_deactivate(cpu_buffer);
4124
4125 cpu_buffer->head_page
4126 = list_entry(cpu_buffer->pages, struct buffer_page, list);
4127 local_set(&cpu_buffer->head_page->write, 0);
4128 local_set(&cpu_buffer->head_page->entries, 0);
4129 local_set(&cpu_buffer->head_page->page->commit, 0);
4130
4131 cpu_buffer->head_page->read = 0;
4132
4133 cpu_buffer->tail_page = cpu_buffer->head_page;
4134 cpu_buffer->commit_page = cpu_buffer->head_page;
4135
4136 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
4137 INIT_LIST_HEAD(&cpu_buffer->new_pages);
4138 local_set(&cpu_buffer->reader_page->write, 0);
4139 local_set(&cpu_buffer->reader_page->entries, 0);
4140 local_set(&cpu_buffer->reader_page->page->commit, 0);
4141 cpu_buffer->reader_page->read = 0;
4142
4143 local_set(&cpu_buffer->entries_bytes, 0);
4144 local_set(&cpu_buffer->overrun, 0);
4145 local_set(&cpu_buffer->commit_overrun, 0);
4146 local_set(&cpu_buffer->dropped_events, 0);
4147 local_set(&cpu_buffer->entries, 0);
4148 local_set(&cpu_buffer->committing, 0);
4149 local_set(&cpu_buffer->commits, 0);
4150 cpu_buffer->read = 0;
4151 cpu_buffer->read_bytes = 0;
4152
4153 cpu_buffer->write_stamp = 0;
4154 cpu_buffer->read_stamp = 0;
4155
4156 cpu_buffer->lost_events = 0;
4157 cpu_buffer->last_overrun = 0;
4158
4159 rb_head_page_activate(cpu_buffer);
4160 }
4161
4162 /**
4163 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
4164 * @buffer: The ring buffer to reset a per cpu buffer of
4165 * @cpu: The CPU buffer to be reset
4166 */
4167 void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
4168 {
4169 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
4170 unsigned long flags;
4171
4172 if (!cpumask_test_cpu(cpu, buffer->cpumask))
4173 return;
4174
4175 atomic_inc(&buffer->resize_disabled);
4176 atomic_inc(&cpu_buffer->record_disabled);
4177
4178 /* Make sure all commits have finished */
4179 synchronize_sched();
4180
4181 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4182
4183 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
4184 goto out;
4185
4186 arch_spin_lock(&cpu_buffer->lock);
4187
4188 rb_reset_cpu(cpu_buffer);
4189
4190 arch_spin_unlock(&cpu_buffer->lock);
4191
4192 out:
4193 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4194
4195 atomic_dec(&cpu_buffer->record_disabled);
4196 atomic_dec(&buffer->resize_disabled);
4197 }
4198 EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
4199
4200 /**
4201 * ring_buffer_reset - reset a ring buffer
4202 * @buffer: The ring buffer to reset all cpu buffers
4203 */
4204 void ring_buffer_reset(struct ring_buffer *buffer)
4205 {
4206 int cpu;
4207
4208 for_each_buffer_cpu(buffer, cpu)
4209 ring_buffer_reset_cpu(buffer, cpu);
4210 }
4211 EXPORT_SYMBOL_GPL(ring_buffer_reset);
4212
4213 /**
4214 * rind_buffer_empty - is the ring buffer empty?
4215 * @buffer: The ring buffer to test
4216 */
4217 int ring_buffer_empty(struct ring_buffer *buffer)
4218 {
4219 struct ring_buffer_per_cpu *cpu_buffer;
4220 unsigned long flags;
4221 int dolock;
4222 int cpu;
4223 int ret;
4224
4225 dolock = rb_ok_to_lock();
4226
4227 /* yes this is racy, but if you don't like the race, lock the buffer */
4228 for_each_buffer_cpu(buffer, cpu) {
4229 cpu_buffer = buffer->buffers[cpu];
4230 local_irq_save(flags);
4231 if (dolock)
4232 raw_spin_lock(&cpu_buffer->reader_lock);
4233 ret = rb_per_cpu_empty(cpu_buffer);
4234 if (dolock)
4235 raw_spin_unlock(&cpu_buffer->reader_lock);
4236 local_irq_restore(flags);
4237
4238 if (!ret)
4239 return 0;
4240 }
4241
4242 return 1;
4243 }
4244 EXPORT_SYMBOL_GPL(ring_buffer_empty);
4245
4246 /**
4247 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
4248 * @buffer: The ring buffer
4249 * @cpu: The CPU buffer to test
4250 */
4251 int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
4252 {
4253 struct ring_buffer_per_cpu *cpu_buffer;
4254 unsigned long flags;
4255 int dolock;
4256 int ret;
4257
4258 if (!cpumask_test_cpu(cpu, buffer->cpumask))
4259 return 1;
4260
4261 dolock = rb_ok_to_lock();
4262
4263 cpu_buffer = buffer->buffers[cpu];
4264 local_irq_save(flags);
4265 if (dolock)
4266 raw_spin_lock(&cpu_buffer->reader_lock);
4267 ret = rb_per_cpu_empty(cpu_buffer);
4268 if (dolock)
4269 raw_spin_unlock(&cpu_buffer->reader_lock);
4270 local_irq_restore(flags);
4271
4272 return ret;
4273 }
4274 EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
4275
4276 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4277 /**
4278 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
4279 * @buffer_a: One buffer to swap with
4280 * @buffer_b: The other buffer to swap with
4281 *
4282 * This function is useful for tracers that want to take a "snapshot"
4283 * of a CPU buffer and has another back up buffer lying around.
4284 * it is expected that the tracer handles the cpu buffer not being
4285 * used at the moment.
4286 */
4287 int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
4288 struct ring_buffer *buffer_b, int cpu)
4289 {
4290 struct ring_buffer_per_cpu *cpu_buffer_a;
4291 struct ring_buffer_per_cpu *cpu_buffer_b;
4292 int ret = -EINVAL;
4293
4294 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
4295 !cpumask_test_cpu(cpu, buffer_b->cpumask))
4296 goto out;
4297
4298 cpu_buffer_a = buffer_a->buffers[cpu];
4299 cpu_buffer_b = buffer_b->buffers[cpu];
4300
4301 /* At least make sure the two buffers are somewhat the same */
4302 if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages)
4303 goto out;
4304
4305 ret = -EAGAIN;
4306
4307 if (ring_buffer_flags != RB_BUFFERS_ON)
4308 goto out;
4309
4310 if (atomic_read(&buffer_a->record_disabled))
4311 goto out;
4312
4313 if (atomic_read(&buffer_b->record_disabled))
4314 goto out;
4315
4316 if (atomic_read(&cpu_buffer_a->record_disabled))
4317 goto out;
4318
4319 if (atomic_read(&cpu_buffer_b->record_disabled))
4320 goto out;
4321
4322 /*
4323 * We can't do a synchronize_sched here because this
4324 * function can be called in atomic context.
4325 * Normally this will be called from the same CPU as cpu.
4326 * If not it's up to the caller to protect this.
4327 */
4328 atomic_inc(&cpu_buffer_a->record_disabled);
4329 atomic_inc(&cpu_buffer_b->record_disabled);
4330
4331 ret = -EBUSY;
4332 if (local_read(&cpu_buffer_a->committing))
4333 goto out_dec;
4334 if (local_read(&cpu_buffer_b->committing))
4335 goto out_dec;
4336
4337 buffer_a->buffers[cpu] = cpu_buffer_b;
4338 buffer_b->buffers[cpu] = cpu_buffer_a;
4339
4340 cpu_buffer_b->buffer = buffer_a;
4341 cpu_buffer_a->buffer = buffer_b;
4342
4343 ret = 0;
4344
4345 out_dec:
4346 atomic_dec(&cpu_buffer_a->record_disabled);
4347 atomic_dec(&cpu_buffer_b->record_disabled);
4348 out:
4349 return ret;
4350 }
4351 EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
4352 #endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */
4353
4354 /**
4355 * ring_buffer_alloc_read_page - allocate a page to read from buffer
4356 * @buffer: the buffer to allocate for.
4357 * @cpu: the cpu buffer to allocate.
4358 *
4359 * This function is used in conjunction with ring_buffer_read_page.
4360 * When reading a full page from the ring buffer, these functions
4361 * can be used to speed up the process. The calling function should
4362 * allocate a few pages first with this function. Then when it
4363 * needs to get pages from the ring buffer, it passes the result
4364 * of this function into ring_buffer_read_page, which will swap
4365 * the page that was allocated, with the read page of the buffer.
4366 *
4367 * Returns:
4368 * The page allocated, or NULL on error.
4369 */
4370 void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
4371 {
4372 struct buffer_data_page *bpage;
4373 struct page *page;
4374
4375 page = alloc_pages_node(cpu_to_node(cpu),
4376 GFP_KERNEL | __GFP_NORETRY, 0);
4377 if (!page)
4378 return NULL;
4379
4380 bpage = page_address(page);
4381
4382 rb_init_page(bpage);
4383
4384 return bpage;
4385 }
4386 EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
4387
4388 /**
4389 * ring_buffer_free_read_page - free an allocated read page
4390 * @buffer: the buffer the page was allocate for
4391 * @data: the page to free
4392 *
4393 * Free a page allocated from ring_buffer_alloc_read_page.
4394 */
4395 void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
4396 {
4397 free_page((unsigned long)data);
4398 }
4399 EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
4400
4401 /**
4402 * ring_buffer_read_page - extract a page from the ring buffer
4403 * @buffer: buffer to extract from
4404 * @data_page: the page to use allocated from ring_buffer_alloc_read_page
4405 * @len: amount to extract
4406 * @cpu: the cpu of the buffer to extract
4407 * @full: should the extraction only happen when the page is full.
4408 *
4409 * This function will pull out a page from the ring buffer and consume it.
4410 * @data_page must be the address of the variable that was returned
4411 * from ring_buffer_alloc_read_page. This is because the page might be used
4412 * to swap with a page in the ring buffer.
4413 *
4414 * for example:
4415 * rpage = ring_buffer_alloc_read_page(buffer, cpu);
4416 * if (!rpage)
4417 * return error;
4418 * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
4419 * if (ret >= 0)
4420 * process_page(rpage, ret);
4421 *
4422 * When @full is set, the function will not return true unless
4423 * the writer is off the reader page.
4424 *
4425 * Note: it is up to the calling functions to handle sleeps and wakeups.
4426 * The ring buffer can be used anywhere in the kernel and can not
4427 * blindly call wake_up. The layer that uses the ring buffer must be
4428 * responsible for that.
4429 *
4430 * Returns:
4431 * >=0 if data has been transferred, returns the offset of consumed data.
4432 * <0 if no data has been transferred.
4433 */
4434 int ring_buffer_read_page(struct ring_buffer *buffer,
4435 void **data_page, size_t len, int cpu, int full)
4436 {
4437 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
4438 struct ring_buffer_event *event;
4439 struct buffer_data_page *bpage;
4440 struct buffer_page *reader;
4441 unsigned long missed_events;
4442 unsigned long flags;
4443 unsigned int commit;
4444 unsigned int read;
4445 u64 save_timestamp;
4446 int ret = -1;
4447
4448 if (!cpumask_test_cpu(cpu, buffer->cpumask))
4449 goto out;
4450
4451 /*
4452 * If len is not big enough to hold the page header, then
4453 * we can not copy anything.
4454 */
4455 if (len <= BUF_PAGE_HDR_SIZE)
4456 goto out;
4457
4458 len -= BUF_PAGE_HDR_SIZE;
4459
4460 if (!data_page)
4461 goto out;
4462
4463 bpage = *data_page;
4464 if (!bpage)
4465 goto out;
4466
4467 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4468
4469 reader = rb_get_reader_page(cpu_buffer);
4470 if (!reader)
4471 goto out_unlock;
4472
4473 event = rb_reader_event(cpu_buffer);
4474
4475 read = reader->read;
4476 commit = rb_page_commit(reader);
4477
4478 /* Check if any events were dropped */
4479 missed_events = cpu_buffer->lost_events;
4480
4481 /*
4482 * If this page has been partially read or
4483 * if len is not big enough to read the rest of the page or
4484 * a writer is still on the page, then
4485 * we must copy the data from the page to the buffer.
4486 * Otherwise, we can simply swap the page with the one passed in.
4487 */
4488 if (read || (len < (commit - read)) ||
4489 cpu_buffer->reader_page == cpu_buffer->commit_page) {
4490 struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
4491 unsigned int rpos = read;
4492 unsigned int pos = 0;
4493 unsigned int size;
4494
4495 if (full)
4496 goto out_unlock;
4497
4498 if (len > (commit - read))
4499 len = (commit - read);
4500
4501 /* Always keep the time extend and data together */
4502 size = rb_event_ts_length(event);
4503
4504 if (len < size)
4505 goto out_unlock;
4506
4507 /* save the current timestamp, since the user will need it */
4508 save_timestamp = cpu_buffer->read_stamp;
4509
4510 /* Need to copy one event at a time */
4511 do {
4512 /* We need the size of one event, because
4513 * rb_advance_reader only advances by one event,
4514 * whereas rb_event_ts_length may include the size of
4515 * one or two events.
4516 * We have already ensured there's enough space if this
4517 * is a time extend. */
4518 size = rb_event_length(event);
4519 memcpy(bpage->data + pos, rpage->data + rpos, size);
4520
4521 len -= size;
4522
4523 rb_advance_reader(cpu_buffer);
4524 rpos = reader->read;
4525 pos += size;
4526
4527 if (rpos >= commit)
4528 break;
4529
4530 event = rb_reader_event(cpu_buffer);
4531 /* Always keep the time extend and data together */
4532 size = rb_event_ts_length(event);
4533 } while (len >= size);
4534
4535 /* update bpage */
4536 local_set(&bpage->commit, pos);
4537 bpage->time_stamp = save_timestamp;
4538
4539 /* we copied everything to the beginning */
4540 read = 0;
4541 } else {
4542 /* update the entry counter */
4543 cpu_buffer->read += rb_page_entries(reader);
4544 cpu_buffer->read_bytes += BUF_PAGE_SIZE;
4545
4546 /* swap the pages */
4547 rb_init_page(bpage);
4548 bpage = reader->page;
4549 reader->page = *data_page;
4550 local_set(&reader->write, 0);
4551 local_set(&reader->entries, 0);
4552 reader->read = 0;
4553 *data_page = bpage;
4554
4555 /*
4556 * Use the real_end for the data size,
4557 * This gives us a chance to store the lost events
4558 * on the page.
4559 */
4560 if (reader->real_end)
4561 local_set(&bpage->commit, reader->real_end);
4562 }
4563 ret = read;
4564
4565 cpu_buffer->lost_events = 0;
4566
4567 commit = local_read(&bpage->commit);
4568 /*
4569 * Set a flag in the commit field if we lost events
4570 */
4571 if (missed_events) {
4572 /* If there is room at the end of the page to save the
4573 * missed events, then record it there.
4574 */
4575 if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) {
4576 memcpy(&bpage->data[commit], &missed_events,
4577 sizeof(missed_events));
4578 local_add(RB_MISSED_STORED, &bpage->commit);
4579 commit += sizeof(missed_events);
4580 }
4581 local_add(RB_MISSED_EVENTS, &bpage->commit);
4582 }
4583
4584 /*
4585 * This page may be off to user land. Zero it out here.
4586 */
4587 if (commit < BUF_PAGE_SIZE)
4588 memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit);
4589
4590 out_unlock:
4591 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4592
4593 out:
4594 return ret;
4595 }
4596 EXPORT_SYMBOL_GPL(ring_buffer_read_page);
4597
4598 #ifdef CONFIG_HOTPLUG_CPU
4599 static int rb_cpu_notify(struct notifier_block *self,
4600 unsigned long action, void *hcpu)
4601 {
4602 struct ring_buffer *buffer =
4603 container_of(self, struct ring_buffer, cpu_notify);
4604 long cpu = (long)hcpu;
4605 int cpu_i, nr_pages_same;
4606 unsigned int nr_pages;
4607
4608 switch (action) {
4609 case CPU_UP_PREPARE:
4610 case CPU_UP_PREPARE_FROZEN:
4611 if (cpumask_test_cpu(cpu, buffer->cpumask))
4612 return NOTIFY_OK;
4613
4614 nr_pages = 0;
4615 nr_pages_same = 1;
4616 /* check if all cpu sizes are same */
4617 for_each_buffer_cpu(buffer, cpu_i) {
4618 /* fill in the size from first enabled cpu */
4619 if (nr_pages == 0)
4620 nr_pages = buffer->buffers[cpu_i]->nr_pages;
4621 if (nr_pages != buffer->buffers[cpu_i]->nr_pages) {
4622 nr_pages_same = 0;
4623 break;
4624 }
4625 }
4626 /* allocate minimum pages, user can later expand it */
4627 if (!nr_pages_same)
4628 nr_pages = 2;
4629 buffer->buffers[cpu] =
4630 rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
4631 if (!buffer->buffers[cpu]) {
4632 WARN(1, "failed to allocate ring buffer on CPU %ld\n",
4633 cpu);
4634 return NOTIFY_OK;
4635 }
4636 smp_wmb();
4637 cpumask_set_cpu(cpu, buffer->cpumask);
4638 break;
4639 case CPU_DOWN_PREPARE:
4640 case CPU_DOWN_PREPARE_FROZEN:
4641 /*
4642 * Do nothing.
4643 * If we were to free the buffer, then the user would
4644 * lose any trace that was in the buffer.
4645 */
4646 break;
4647 default:
4648 break;
4649 }
4650 return NOTIFY_OK;
4651 }
4652 #endif
4653
4654 #ifdef CONFIG_RING_BUFFER_STARTUP_TEST
4655 /*
4656 * This is a basic integrity check of the ring buffer.
4657 * Late in the boot cycle this test will run when configured in.
4658 * It will kick off a thread per CPU that will go into a loop
4659 * writing to the per cpu ring buffer various sizes of data.
4660 * Some of the data will be large items, some small.
4661 *
4662 * Another thread is created that goes into a spin, sending out
4663 * IPIs to the other CPUs to also write into the ring buffer.
4664 * this is to test the nesting ability of the buffer.
4665 *
4666 * Basic stats are recorded and reported. If something in the
4667 * ring buffer should happen that's not expected, a big warning
4668 * is displayed and all ring buffers are disabled.
4669 */
4670 static struct task_struct *rb_threads[NR_CPUS] __initdata;
4671
4672 struct rb_test_data {
4673 struct ring_buffer *buffer;
4674 unsigned long events;
4675 unsigned long bytes_written;
4676 unsigned long bytes_alloc;
4677 unsigned long bytes_dropped;
4678 unsigned long events_nested;
4679 unsigned long bytes_written_nested;
4680 unsigned long bytes_alloc_nested;
4681 unsigned long bytes_dropped_nested;
4682 int min_size_nested;
4683 int max_size_nested;
4684 int max_size;
4685 int min_size;
4686 int cpu;
4687 int cnt;
4688 };
4689
4690 static struct rb_test_data rb_data[NR_CPUS] __initdata;
4691
4692 /* 1 meg per cpu */
4693 #define RB_TEST_BUFFER_SIZE 1048576
4694
4695 static char rb_string[] __initdata =
4696 "abcdefghijklmnopqrstuvwxyz1234567890!@#$%^&*()?+\\"
4697 "?+|:';\",.<>/?abcdefghijklmnopqrstuvwxyz1234567890"
4698 "!@#$%^&*()?+\\?+|:';\",.<>/?abcdefghijklmnopqrstuv";
4699
4700 static bool rb_test_started __initdata;
4701
4702 struct rb_item {
4703 int size;
4704 char str[];
4705 };
4706
4707 static __init int rb_write_something(struct rb_test_data *data, bool nested)
4708 {
4709 struct ring_buffer_event *event;
4710 struct rb_item *item;
4711 bool started;
4712 int event_len;
4713 int size;
4714 int len;
4715 int cnt;
4716
4717 /* Have nested writes different that what is written */
4718 cnt = data->cnt + (nested ? 27 : 0);
4719
4720 /* Multiply cnt by ~e, to make some unique increment */
4721 size = (data->cnt * 68 / 25) % (sizeof(rb_string) - 1);
4722
4723 len = size + sizeof(struct rb_item);
4724
4725 started = rb_test_started;
4726 /* read rb_test_started before checking buffer enabled */
4727 smp_rmb();
4728
4729 event = ring_buffer_lock_reserve(data->buffer, len);
4730 if (!event) {
4731 /* Ignore dropped events before test starts. */
4732 if (started) {
4733 if (nested)
4734 data->bytes_dropped += len;
4735 else
4736 data->bytes_dropped_nested += len;
4737 }
4738 return len;
4739 }
4740
4741 event_len = ring_buffer_event_length(event);
4742
4743 if (RB_WARN_ON(data->buffer, event_len < len))
4744 goto out;
4745
4746 item = ring_buffer_event_data(event);
4747 item->size = size;
4748 memcpy(item->str, rb_string, size);
4749
4750 if (nested) {
4751 data->bytes_alloc_nested += event_len;
4752 data->bytes_written_nested += len;
4753 data->events_nested++;
4754 if (!data->min_size_nested || len < data->min_size_nested)
4755 data->min_size_nested = len;
4756 if (len > data->max_size_nested)
4757 data->max_size_nested = len;
4758 } else {
4759 data->bytes_alloc += event_len;
4760 data->bytes_written += len;
4761 data->events++;
4762 if (!data->min_size || len < data->min_size)
4763 data->max_size = len;
4764 if (len > data->max_size)
4765 data->max_size = len;
4766 }
4767
4768 out:
4769 ring_buffer_unlock_commit(data->buffer, event);
4770
4771 return 0;
4772 }
4773
4774 static __init int rb_test(void *arg)
4775 {
4776 struct rb_test_data *data = arg;
4777
4778 while (!kthread_should_stop()) {
4779 rb_write_something(data, false);
4780 data->cnt++;
4781
4782 set_current_state(TASK_INTERRUPTIBLE);
4783 /* Now sleep between a min of 100-300us and a max of 1ms */
4784 usleep_range(((data->cnt % 3) + 1) * 100, 1000);
4785 }
4786
4787 return 0;
4788 }
4789
4790 static __init void rb_ipi(void *ignore)
4791 {
4792 struct rb_test_data *data;
4793 int cpu = smp_processor_id();
4794
4795 data = &rb_data[cpu];
4796 rb_write_something(data, true);
4797 }
4798
4799 static __init int rb_hammer_test(void *arg)
4800 {
4801 while (!kthread_should_stop()) {
4802
4803 /* Send an IPI to all cpus to write data! */
4804 smp_call_function(rb_ipi, NULL, 1);
4805 /* No sleep, but for non preempt, let others run */
4806 schedule();
4807 }
4808
4809 return 0;
4810 }
4811
4812 static __init int test_ringbuffer(void)
4813 {
4814 struct task_struct *rb_hammer;
4815 struct ring_buffer *buffer;
4816 int cpu;
4817 int ret = 0;
4818
4819 pr_info("Running ring buffer tests...\n");
4820
4821 buffer = ring_buffer_alloc(RB_TEST_BUFFER_SIZE, RB_FL_OVERWRITE);
4822 if (WARN_ON(!buffer))
4823 return 0;
4824
4825 /* Disable buffer so that threads can't write to it yet */
4826 ring_buffer_record_off(buffer);
4827
4828 for_each_online_cpu(cpu) {
4829 rb_data[cpu].buffer = buffer;
4830 rb_data[cpu].cpu = cpu;
4831 rb_data[cpu].cnt = cpu;
4832 rb_threads[cpu] = kthread_create(rb_test, &rb_data[cpu],
4833 "rbtester/%d", cpu);
4834 if (WARN_ON(!rb_threads[cpu])) {
4835 pr_cont("FAILED\n");
4836 ret = -1;
4837 goto out_free;
4838 }
4839
4840 kthread_bind(rb_threads[cpu], cpu);
4841 wake_up_process(rb_threads[cpu]);
4842 }
4843
4844 /* Now create the rb hammer! */
4845 rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer");
4846 if (WARN_ON(!rb_hammer)) {
4847 pr_cont("FAILED\n");
4848 ret = -1;
4849 goto out_free;
4850 }
4851
4852 ring_buffer_record_on(buffer);
4853 /*
4854 * Show buffer is enabled before setting rb_test_started.
4855 * Yes there's a small race window where events could be
4856 * dropped and the thread wont catch it. But when a ring
4857 * buffer gets enabled, there will always be some kind of
4858 * delay before other CPUs see it. Thus, we don't care about
4859 * those dropped events. We care about events dropped after
4860 * the threads see that the buffer is active.
4861 */
4862 smp_wmb();
4863 rb_test_started = true;
4864
4865 set_current_state(TASK_INTERRUPTIBLE);
4866 /* Just run for 10 seconds */;
4867 schedule_timeout(10 * HZ);
4868
4869 kthread_stop(rb_hammer);
4870
4871 out_free:
4872 for_each_online_cpu(cpu) {
4873 if (!rb_threads[cpu])
4874 break;
4875 kthread_stop(rb_threads[cpu]);
4876 }
4877 if (ret) {
4878 ring_buffer_free(buffer);
4879 return ret;
4880 }
4881
4882 /* Report! */
4883 pr_info("finished\n");
4884 for_each_online_cpu(cpu) {
4885 struct ring_buffer_event *event;
4886 struct rb_test_data *data = &rb_data[cpu];
4887 struct rb_item *item;
4888 unsigned long total_events;
4889 unsigned long total_dropped;
4890 unsigned long total_written;
4891 unsigned long total_alloc;
4892 unsigned long total_read = 0;
4893 unsigned long total_size = 0;
4894 unsigned long total_len = 0;
4895 unsigned long total_lost = 0;
4896 unsigned long lost;
4897 int big_event_size;
4898 int small_event_size;
4899
4900 ret = -1;
4901
4902 total_events = data->events + data->events_nested;
4903 total_written = data->bytes_written + data->bytes_written_nested;
4904 total_alloc = data->bytes_alloc + data->bytes_alloc_nested;
4905 total_dropped = data->bytes_dropped + data->bytes_dropped_nested;
4906
4907 big_event_size = data->max_size + data->max_size_nested;
4908 small_event_size = data->min_size + data->min_size_nested;
4909
4910 pr_info("CPU %d:\n", cpu);
4911 pr_info(" events: %ld\n", total_events);
4912 pr_info(" dropped bytes: %ld\n", total_dropped);
4913 pr_info(" alloced bytes: %ld\n", total_alloc);
4914 pr_info(" written bytes: %ld\n", total_written);
4915 pr_info(" biggest event: %d\n", big_event_size);
4916 pr_info(" smallest event: %d\n", small_event_size);
4917
4918 if (RB_WARN_ON(buffer, total_dropped))
4919 break;
4920
4921 ret = 0;
4922
4923 while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) {
4924 total_lost += lost;
4925 item = ring_buffer_event_data(event);
4926 total_len += ring_buffer_event_length(event);
4927 total_size += item->size + sizeof(struct rb_item);
4928 if (memcmp(&item->str[0], rb_string, item->size) != 0) {
4929 pr_info("FAILED!\n");
4930 pr_info("buffer had: %.*s\n", item->size, item->str);
4931 pr_info("expected: %.*s\n", item->size, rb_string);
4932 RB_WARN_ON(buffer, 1);
4933 ret = -1;
4934 break;
4935 }
4936 total_read++;
4937 }
4938 if (ret)
4939 break;
4940
4941 ret = -1;
4942
4943 pr_info(" read events: %ld\n", total_read);
4944 pr_info(" lost events: %ld\n", total_lost);
4945 pr_info(" total events: %ld\n", total_lost + total_read);
4946 pr_info(" recorded len bytes: %ld\n", total_len);
4947 pr_info(" recorded size bytes: %ld\n", total_size);
4948 if (total_lost)
4949 pr_info(" With dropped events, record len and size may not match\n"
4950 " alloced and written from above\n");
4951 if (!total_lost) {
4952 if (RB_WARN_ON(buffer, total_len != total_alloc ||
4953 total_size != total_written))
4954 break;
4955 }
4956 if (RB_WARN_ON(buffer, total_lost + total_read != total_events))
4957 break;
4958
4959 ret = 0;
4960 }
4961 if (!ret)
4962 pr_info("Ring buffer PASSED!\n");
4963
4964 ring_buffer_free(buffer);
4965 return 0;
4966 }
4967
4968 late_initcall(test_ringbuffer);
4969 #endif /* CONFIG_RING_BUFFER_STARTUP_TEST */
This page took 0.197306 seconds and 5 git commands to generate.