ftrace: prevent ftrace_special from recursion
[deliverable/linux.git] / kernel / trace / ring_buffer.c
CommitLineData
7a8e76a3
SR
1/*
2 * Generic ring buffer
3 *
4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5 */
6#include <linux/ring_buffer.h>
7#include <linux/spinlock.h>
8#include <linux/debugfs.h>
9#include <linux/uaccess.h>
10#include <linux/module.h>
11#include <linux/percpu.h>
12#include <linux/mutex.h>
13#include <linux/sched.h> /* used for sched_clock() (for now) */
14#include <linux/init.h>
15#include <linux/hash.h>
16#include <linux/list.h>
17#include <linux/fs.h>
18
182e9f5f
SR
19#include "trace.h"
20
7a8e76a3
SR
21/* Up this if you want to test the TIME_EXTENTS and normalization */
22#define DEBUG_SHIFT 0
23
24/* FIXME!!! */
25u64 ring_buffer_time_stamp(int cpu)
26{
27 /* shift to debug/test normalization and TIME_EXTENTS */
28 return sched_clock() << DEBUG_SHIFT;
29}
30
31void ring_buffer_normalize_time_stamp(int cpu, u64 *ts)
32{
33 /* Just stupid testing the normalize function and deltas */
34 *ts >>= DEBUG_SHIFT;
35}
36
37#define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event))
38#define RB_ALIGNMENT_SHIFT 2
39#define RB_ALIGNMENT (1 << RB_ALIGNMENT_SHIFT)
40#define RB_MAX_SMALL_DATA 28
41
42enum {
43 RB_LEN_TIME_EXTEND = 8,
44 RB_LEN_TIME_STAMP = 16,
45};
46
47/* inline for ring buffer fast paths */
48static inline unsigned
49rb_event_length(struct ring_buffer_event *event)
50{
51 unsigned length;
52
53 switch (event->type) {
54 case RINGBUF_TYPE_PADDING:
55 /* undefined */
56 return -1;
57
58 case RINGBUF_TYPE_TIME_EXTEND:
59 return RB_LEN_TIME_EXTEND;
60
61 case RINGBUF_TYPE_TIME_STAMP:
62 return RB_LEN_TIME_STAMP;
63
64 case RINGBUF_TYPE_DATA:
65 if (event->len)
66 length = event->len << RB_ALIGNMENT_SHIFT;
67 else
68 length = event->array[0];
69 return length + RB_EVNT_HDR_SIZE;
70 default:
71 BUG();
72 }
73 /* not hit */
74 return 0;
75}
76
77/**
78 * ring_buffer_event_length - return the length of the event
79 * @event: the event to get the length of
80 */
81unsigned ring_buffer_event_length(struct ring_buffer_event *event)
82{
83 return rb_event_length(event);
84}
85
86/* inline for ring buffer fast paths */
87static inline void *
88rb_event_data(struct ring_buffer_event *event)
89{
90 BUG_ON(event->type != RINGBUF_TYPE_DATA);
91 /* If length is in len field, then array[0] has the data */
92 if (event->len)
93 return (void *)&event->array[0];
94 /* Otherwise length is in array[0] and array[1] has the data */
95 return (void *)&event->array[1];
96}
97
98/**
99 * ring_buffer_event_data - return the data of the event
100 * @event: the event to get the data from
101 */
102void *ring_buffer_event_data(struct ring_buffer_event *event)
103{
104 return rb_event_data(event);
105}
106
107#define for_each_buffer_cpu(buffer, cpu) \
108 for_each_cpu_mask(cpu, buffer->cpumask)
109
110#define TS_SHIFT 27
111#define TS_MASK ((1ULL << TS_SHIFT) - 1)
112#define TS_DELTA_TEST (~TS_MASK)
113
114/*
115 * This hack stolen from mm/slob.c.
116 * We can store per page timing information in the page frame of the page.
117 * Thanks to Peter Zijlstra for suggesting this idea.
118 */
119struct buffer_page {
e4c2ce82 120 u64 time_stamp; /* page time stamp */
bf41a158
SR
121 local_t write; /* index for next write */
122 local_t commit; /* write commited index */
6f807acd 123 unsigned read; /* index for next read */
e4c2ce82
SR
124 struct list_head list; /* list of free pages */
125 void *page; /* Actual data page */
7a8e76a3
SR
126};
127
ed56829c
SR
128/*
129 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
130 * this issue out.
131 */
132static inline void free_buffer_page(struct buffer_page *bpage)
133{
e4c2ce82 134 if (bpage->page)
6ae2a076 135 free_page((unsigned long)bpage->page);
e4c2ce82 136 kfree(bpage);
ed56829c
SR
137}
138
7a8e76a3
SR
139/*
140 * We need to fit the time_stamp delta into 27 bits.
141 */
142static inline int test_time_stamp(u64 delta)
143{
144 if (delta & TS_DELTA_TEST)
145 return 1;
146 return 0;
147}
148
149#define BUF_PAGE_SIZE PAGE_SIZE
150
151/*
152 * head_page == tail_page && head == tail then buffer is empty.
153 */
154struct ring_buffer_per_cpu {
155 int cpu;
156 struct ring_buffer *buffer;
3e03fb7f 157 raw_spinlock_t lock;
7a8e76a3
SR
158 struct lock_class_key lock_key;
159 struct list_head pages;
6f807acd
SR
160 struct buffer_page *head_page; /* read from head */
161 struct buffer_page *tail_page; /* write to tail */
bf41a158 162 struct buffer_page *commit_page; /* commited pages */
d769041f 163 struct buffer_page *reader_page;
7a8e76a3
SR
164 unsigned long overrun;
165 unsigned long entries;
166 u64 write_stamp;
167 u64 read_stamp;
168 atomic_t record_disabled;
169};
170
171struct ring_buffer {
172 unsigned long size;
173 unsigned pages;
174 unsigned flags;
175 int cpus;
176 cpumask_t cpumask;
177 atomic_t record_disabled;
178
179 struct mutex mutex;
180
181 struct ring_buffer_per_cpu **buffers;
182};
183
184struct ring_buffer_iter {
185 struct ring_buffer_per_cpu *cpu_buffer;
186 unsigned long head;
187 struct buffer_page *head_page;
188 u64 read_stamp;
189};
190
bf41a158
SR
191#define RB_WARN_ON(buffer, cond) \
192 do { \
193 if (unlikely(cond)) { \
194 atomic_inc(&buffer->record_disabled); \
195 WARN_ON(1); \
196 } \
197 } while (0)
198
199#define RB_WARN_ON_RET(buffer, cond) \
200 do { \
201 if (unlikely(cond)) { \
202 atomic_inc(&buffer->record_disabled); \
203 WARN_ON(1); \
204 return -1; \
205 } \
206 } while (0)
207
208#define RB_WARN_ON_ONCE(buffer, cond) \
209 do { \
210 static int once; \
211 if (unlikely(cond) && !once) { \
212 once++; \
213 atomic_inc(&buffer->record_disabled); \
214 WARN_ON(1); \
215 } \
216 } while (0)
7a8e76a3
SR
217
218/**
219 * check_pages - integrity check of buffer pages
220 * @cpu_buffer: CPU buffer with pages to test
221 *
222 * As a safty measure we check to make sure the data pages have not
223 * been corrupted.
224 */
225static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
226{
227 struct list_head *head = &cpu_buffer->pages;
228 struct buffer_page *page, *tmp;
229
bf41a158
SR
230 RB_WARN_ON_RET(cpu_buffer, head->next->prev != head);
231 RB_WARN_ON_RET(cpu_buffer, head->prev->next != head);
7a8e76a3
SR
232
233 list_for_each_entry_safe(page, tmp, head, list) {
bf41a158
SR
234 RB_WARN_ON_RET(cpu_buffer,
235 page->list.next->prev != &page->list);
236 RB_WARN_ON_RET(cpu_buffer,
237 page->list.prev->next != &page->list);
7a8e76a3
SR
238 }
239
240 return 0;
241}
242
7a8e76a3
SR
243static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
244 unsigned nr_pages)
245{
246 struct list_head *head = &cpu_buffer->pages;
247 struct buffer_page *page, *tmp;
248 unsigned long addr;
249 LIST_HEAD(pages);
250 unsigned i;
251
252 for (i = 0; i < nr_pages; i++) {
e4c2ce82 253 page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()),
aa1e0e3b 254 GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
e4c2ce82
SR
255 if (!page)
256 goto free_pages;
257 list_add(&page->list, &pages);
258
7a8e76a3
SR
259 addr = __get_free_page(GFP_KERNEL);
260 if (!addr)
261 goto free_pages;
e4c2ce82 262 page->page = (void *)addr;
7a8e76a3
SR
263 }
264
265 list_splice(&pages, head);
266
267 rb_check_pages(cpu_buffer);
268
269 return 0;
270
271 free_pages:
272 list_for_each_entry_safe(page, tmp, &pages, list) {
273 list_del_init(&page->list);
ed56829c 274 free_buffer_page(page);
7a8e76a3
SR
275 }
276 return -ENOMEM;
277}
278
279static struct ring_buffer_per_cpu *
280rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
281{
282 struct ring_buffer_per_cpu *cpu_buffer;
e4c2ce82 283 struct buffer_page *page;
d769041f 284 unsigned long addr;
7a8e76a3
SR
285 int ret;
286
287 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
288 GFP_KERNEL, cpu_to_node(cpu));
289 if (!cpu_buffer)
290 return NULL;
291
292 cpu_buffer->cpu = cpu;
293 cpu_buffer->buffer = buffer;
3e03fb7f 294 cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
7a8e76a3
SR
295 INIT_LIST_HEAD(&cpu_buffer->pages);
296
e4c2ce82
SR
297 page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()),
298 GFP_KERNEL, cpu_to_node(cpu));
299 if (!page)
300 goto fail_free_buffer;
301
302 cpu_buffer->reader_page = page;
d769041f
SR
303 addr = __get_free_page(GFP_KERNEL);
304 if (!addr)
e4c2ce82
SR
305 goto fail_free_reader;
306 page->page = (void *)addr;
307
d769041f 308 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
d769041f 309
7a8e76a3
SR
310 ret = rb_allocate_pages(cpu_buffer, buffer->pages);
311 if (ret < 0)
d769041f 312 goto fail_free_reader;
7a8e76a3
SR
313
314 cpu_buffer->head_page
315 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
bf41a158 316 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
7a8e76a3
SR
317
318 return cpu_buffer;
319
d769041f
SR
320 fail_free_reader:
321 free_buffer_page(cpu_buffer->reader_page);
322
7a8e76a3
SR
323 fail_free_buffer:
324 kfree(cpu_buffer);
325 return NULL;
326}
327
328static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
329{
330 struct list_head *head = &cpu_buffer->pages;
331 struct buffer_page *page, *tmp;
332
d769041f
SR
333 list_del_init(&cpu_buffer->reader_page->list);
334 free_buffer_page(cpu_buffer->reader_page);
335
7a8e76a3
SR
336 list_for_each_entry_safe(page, tmp, head, list) {
337 list_del_init(&page->list);
ed56829c 338 free_buffer_page(page);
7a8e76a3
SR
339 }
340 kfree(cpu_buffer);
341}
342
a7b13743
SR
343/*
344 * Causes compile errors if the struct buffer_page gets bigger
345 * than the struct page.
346 */
347extern int ring_buffer_page_too_big(void);
348
7a8e76a3
SR
349/**
350 * ring_buffer_alloc - allocate a new ring_buffer
351 * @size: the size in bytes that is needed.
352 * @flags: attributes to set for the ring buffer.
353 *
354 * Currently the only flag that is available is the RB_FL_OVERWRITE
355 * flag. This flag means that the buffer will overwrite old data
356 * when the buffer wraps. If this flag is not set, the buffer will
357 * drop data when the tail hits the head.
358 */
359struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
360{
361 struct ring_buffer *buffer;
362 int bsize;
363 int cpu;
364
a7b13743
SR
365 /* Paranoid! Optimizes out when all is well */
366 if (sizeof(struct buffer_page) > sizeof(struct page))
367 ring_buffer_page_too_big();
368
369
7a8e76a3
SR
370 /* keep it in its own cache line */
371 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
372 GFP_KERNEL);
373 if (!buffer)
374 return NULL;
375
376 buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
377 buffer->flags = flags;
378
379 /* need at least two pages */
380 if (buffer->pages == 1)
381 buffer->pages++;
382
383 buffer->cpumask = cpu_possible_map;
384 buffer->cpus = nr_cpu_ids;
385
386 bsize = sizeof(void *) * nr_cpu_ids;
387 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
388 GFP_KERNEL);
389 if (!buffer->buffers)
390 goto fail_free_buffer;
391
392 for_each_buffer_cpu(buffer, cpu) {
393 buffer->buffers[cpu] =
394 rb_allocate_cpu_buffer(buffer, cpu);
395 if (!buffer->buffers[cpu])
396 goto fail_free_buffers;
397 }
398
399 mutex_init(&buffer->mutex);
400
401 return buffer;
402
403 fail_free_buffers:
404 for_each_buffer_cpu(buffer, cpu) {
405 if (buffer->buffers[cpu])
406 rb_free_cpu_buffer(buffer->buffers[cpu]);
407 }
408 kfree(buffer->buffers);
409
410 fail_free_buffer:
411 kfree(buffer);
412 return NULL;
413}
414
415/**
416 * ring_buffer_free - free a ring buffer.
417 * @buffer: the buffer to free.
418 */
419void
420ring_buffer_free(struct ring_buffer *buffer)
421{
422 int cpu;
423
424 for_each_buffer_cpu(buffer, cpu)
425 rb_free_cpu_buffer(buffer->buffers[cpu]);
426
427 kfree(buffer);
428}
429
430static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
431
432static void
433rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
434{
435 struct buffer_page *page;
436 struct list_head *p;
437 unsigned i;
438
439 atomic_inc(&cpu_buffer->record_disabled);
440 synchronize_sched();
441
442 for (i = 0; i < nr_pages; i++) {
443 BUG_ON(list_empty(&cpu_buffer->pages));
444 p = cpu_buffer->pages.next;
445 page = list_entry(p, struct buffer_page, list);
446 list_del_init(&page->list);
ed56829c 447 free_buffer_page(page);
7a8e76a3
SR
448 }
449 BUG_ON(list_empty(&cpu_buffer->pages));
450
451 rb_reset_cpu(cpu_buffer);
452
453 rb_check_pages(cpu_buffer);
454
455 atomic_dec(&cpu_buffer->record_disabled);
456
457}
458
459static void
460rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
461 struct list_head *pages, unsigned nr_pages)
462{
463 struct buffer_page *page;
464 struct list_head *p;
465 unsigned i;
466
467 atomic_inc(&cpu_buffer->record_disabled);
468 synchronize_sched();
469
470 for (i = 0; i < nr_pages; i++) {
471 BUG_ON(list_empty(pages));
472 p = pages->next;
473 page = list_entry(p, struct buffer_page, list);
474 list_del_init(&page->list);
475 list_add_tail(&page->list, &cpu_buffer->pages);
476 }
477 rb_reset_cpu(cpu_buffer);
478
479 rb_check_pages(cpu_buffer);
480
481 atomic_dec(&cpu_buffer->record_disabled);
482}
483
484/**
485 * ring_buffer_resize - resize the ring buffer
486 * @buffer: the buffer to resize.
487 * @size: the new size.
488 *
489 * The tracer is responsible for making sure that the buffer is
490 * not being used while changing the size.
491 * Note: We may be able to change the above requirement by using
492 * RCU synchronizations.
493 *
494 * Minimum size is 2 * BUF_PAGE_SIZE.
495 *
496 * Returns -1 on failure.
497 */
498int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
499{
500 struct ring_buffer_per_cpu *cpu_buffer;
501 unsigned nr_pages, rm_pages, new_pages;
502 struct buffer_page *page, *tmp;
503 unsigned long buffer_size;
504 unsigned long addr;
505 LIST_HEAD(pages);
506 int i, cpu;
507
508 size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
509 size *= BUF_PAGE_SIZE;
510 buffer_size = buffer->pages * BUF_PAGE_SIZE;
511
512 /* we need a minimum of two pages */
513 if (size < BUF_PAGE_SIZE * 2)
514 size = BUF_PAGE_SIZE * 2;
515
516 if (size == buffer_size)
517 return size;
518
519 mutex_lock(&buffer->mutex);
520
521 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
522
523 if (size < buffer_size) {
524
525 /* easy case, just free pages */
526 BUG_ON(nr_pages >= buffer->pages);
527
528 rm_pages = buffer->pages - nr_pages;
529
530 for_each_buffer_cpu(buffer, cpu) {
531 cpu_buffer = buffer->buffers[cpu];
532 rb_remove_pages(cpu_buffer, rm_pages);
533 }
534 goto out;
535 }
536
537 /*
538 * This is a bit more difficult. We only want to add pages
539 * when we can allocate enough for all CPUs. We do this
540 * by allocating all the pages and storing them on a local
541 * link list. If we succeed in our allocation, then we
542 * add these pages to the cpu_buffers. Otherwise we just free
543 * them all and return -ENOMEM;
544 */
545 BUG_ON(nr_pages <= buffer->pages);
546 new_pages = nr_pages - buffer->pages;
547
548 for_each_buffer_cpu(buffer, cpu) {
549 for (i = 0; i < new_pages; i++) {
e4c2ce82
SR
550 page = kzalloc_node(ALIGN(sizeof(*page),
551 cache_line_size()),
552 GFP_KERNEL, cpu_to_node(cpu));
553 if (!page)
554 goto free_pages;
555 list_add(&page->list, &pages);
7a8e76a3
SR
556 addr = __get_free_page(GFP_KERNEL);
557 if (!addr)
558 goto free_pages;
e4c2ce82 559 page->page = (void *)addr;
7a8e76a3
SR
560 }
561 }
562
563 for_each_buffer_cpu(buffer, cpu) {
564 cpu_buffer = buffer->buffers[cpu];
565 rb_insert_pages(cpu_buffer, &pages, new_pages);
566 }
567
568 BUG_ON(!list_empty(&pages));
569
570 out:
571 buffer->pages = nr_pages;
572 mutex_unlock(&buffer->mutex);
573
574 return size;
575
576 free_pages:
577 list_for_each_entry_safe(page, tmp, &pages, list) {
578 list_del_init(&page->list);
ed56829c 579 free_buffer_page(page);
7a8e76a3
SR
580 }
581 return -ENOMEM;
582}
583
7a8e76a3
SR
584static inline int rb_null_event(struct ring_buffer_event *event)
585{
586 return event->type == RINGBUF_TYPE_PADDING;
587}
588
6f807acd 589static inline void *__rb_page_index(struct buffer_page *page, unsigned index)
7a8e76a3 590{
e4c2ce82 591 return page->page + index;
7a8e76a3
SR
592}
593
594static inline struct ring_buffer_event *
d769041f 595rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
7a8e76a3 596{
6f807acd
SR
597 return __rb_page_index(cpu_buffer->reader_page,
598 cpu_buffer->reader_page->read);
599}
600
601static inline struct ring_buffer_event *
602rb_head_event(struct ring_buffer_per_cpu *cpu_buffer)
603{
604 return __rb_page_index(cpu_buffer->head_page,
605 cpu_buffer->head_page->read);
7a8e76a3
SR
606}
607
608static inline struct ring_buffer_event *
609rb_iter_head_event(struct ring_buffer_iter *iter)
610{
6f807acd 611 return __rb_page_index(iter->head_page, iter->head);
7a8e76a3
SR
612}
613
bf41a158
SR
614static inline unsigned rb_page_write(struct buffer_page *bpage)
615{
616 return local_read(&bpage->write);
617}
618
619static inline unsigned rb_page_commit(struct buffer_page *bpage)
620{
621 return local_read(&bpage->commit);
622}
623
624/* Size is determined by what has been commited */
625static inline unsigned rb_page_size(struct buffer_page *bpage)
626{
627 return rb_page_commit(bpage);
628}
629
630static inline unsigned
631rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
632{
633 return rb_page_commit(cpu_buffer->commit_page);
634}
635
636static inline unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer)
637{
638 return rb_page_commit(cpu_buffer->head_page);
639}
640
7a8e76a3
SR
641/*
642 * When the tail hits the head and the buffer is in overwrite mode,
643 * the head jumps to the next page and all content on the previous
644 * page is discarded. But before doing so, we update the overrun
645 * variable of the buffer.
646 */
647static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer)
648{
649 struct ring_buffer_event *event;
650 unsigned long head;
651
652 for (head = 0; head < rb_head_size(cpu_buffer);
653 head += rb_event_length(event)) {
654
6f807acd 655 event = __rb_page_index(cpu_buffer->head_page, head);
7a8e76a3
SR
656 BUG_ON(rb_null_event(event));
657 /* Only count data entries */
658 if (event->type != RINGBUF_TYPE_DATA)
659 continue;
660 cpu_buffer->overrun++;
661 cpu_buffer->entries--;
662 }
663}
664
665static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
666 struct buffer_page **page)
667{
668 struct list_head *p = (*page)->list.next;
669
670 if (p == &cpu_buffer->pages)
671 p = p->next;
672
673 *page = list_entry(p, struct buffer_page, list);
674}
675
bf41a158
SR
676static inline unsigned
677rb_event_index(struct ring_buffer_event *event)
678{
679 unsigned long addr = (unsigned long)event;
680
681 return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE);
682}
683
684static inline int
685rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
686 struct ring_buffer_event *event)
687{
688 unsigned long addr = (unsigned long)event;
689 unsigned long index;
690
691 index = rb_event_index(event);
692 addr &= PAGE_MASK;
693
694 return cpu_buffer->commit_page->page == (void *)addr &&
695 rb_commit_index(cpu_buffer) == index;
696}
697
7a8e76a3 698static inline void
bf41a158
SR
699rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer,
700 struct ring_buffer_event *event)
7a8e76a3 701{
bf41a158
SR
702 unsigned long addr = (unsigned long)event;
703 unsigned long index;
704
705 index = rb_event_index(event);
706 addr &= PAGE_MASK;
707
708 while (cpu_buffer->commit_page->page != (void *)addr) {
709 RB_WARN_ON(cpu_buffer,
710 cpu_buffer->commit_page == cpu_buffer->tail_page);
711 cpu_buffer->commit_page->commit =
712 cpu_buffer->commit_page->write;
713 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
714 cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp;
715 }
716
717 /* Now set the commit to the event's index */
718 local_set(&cpu_buffer->commit_page->commit, index);
7a8e76a3
SR
719}
720
bf41a158
SR
721static inline void
722rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
7a8e76a3 723{
bf41a158
SR
724 /*
725 * We only race with interrupts and NMIs on this CPU.
726 * If we own the commit event, then we can commit
727 * all others that interrupted us, since the interruptions
728 * are in stack format (they finish before they come
729 * back to us). This allows us to do a simple loop to
730 * assign the commit to the tail.
731 */
732 while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
733 cpu_buffer->commit_page->commit =
734 cpu_buffer->commit_page->write;
735 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
736 cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp;
737 /* add barrier to keep gcc from optimizing too much */
738 barrier();
739 }
740 while (rb_commit_index(cpu_buffer) !=
741 rb_page_write(cpu_buffer->commit_page)) {
742 cpu_buffer->commit_page->commit =
743 cpu_buffer->commit_page->write;
744 barrier();
745 }
7a8e76a3
SR
746}
747
d769041f 748static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
7a8e76a3 749{
d769041f 750 cpu_buffer->read_stamp = cpu_buffer->reader_page->time_stamp;
6f807acd 751 cpu_buffer->reader_page->read = 0;
d769041f
SR
752}
753
754static inline void rb_inc_iter(struct ring_buffer_iter *iter)
755{
756 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
757
758 /*
759 * The iterator could be on the reader page (it starts there).
760 * But the head could have moved, since the reader was
761 * found. Check for this case and assign the iterator
762 * to the head page instead of next.
763 */
764 if (iter->head_page == cpu_buffer->reader_page)
765 iter->head_page = cpu_buffer->head_page;
766 else
767 rb_inc_page(cpu_buffer, &iter->head_page);
768
7a8e76a3
SR
769 iter->read_stamp = iter->head_page->time_stamp;
770 iter->head = 0;
771}
772
773/**
774 * ring_buffer_update_event - update event type and data
775 * @event: the even to update
776 * @type: the type of event
777 * @length: the size of the event field in the ring buffer
778 *
779 * Update the type and data fields of the event. The length
780 * is the actual size that is written to the ring buffer,
781 * and with this, we can determine what to place into the
782 * data field.
783 */
784static inline void
785rb_update_event(struct ring_buffer_event *event,
786 unsigned type, unsigned length)
787{
788 event->type = type;
789
790 switch (type) {
791
792 case RINGBUF_TYPE_PADDING:
793 break;
794
795 case RINGBUF_TYPE_TIME_EXTEND:
796 event->len =
797 (RB_LEN_TIME_EXTEND + (RB_ALIGNMENT-1))
798 >> RB_ALIGNMENT_SHIFT;
799 break;
800
801 case RINGBUF_TYPE_TIME_STAMP:
802 event->len =
803 (RB_LEN_TIME_STAMP + (RB_ALIGNMENT-1))
804 >> RB_ALIGNMENT_SHIFT;
805 break;
806
807 case RINGBUF_TYPE_DATA:
808 length -= RB_EVNT_HDR_SIZE;
809 if (length > RB_MAX_SMALL_DATA) {
810 event->len = 0;
811 event->array[0] = length;
812 } else
813 event->len =
814 (length + (RB_ALIGNMENT-1))
815 >> RB_ALIGNMENT_SHIFT;
816 break;
817 default:
818 BUG();
819 }
820}
821
822static inline unsigned rb_calculate_event_length(unsigned length)
823{
824 struct ring_buffer_event event; /* Used only for sizeof array */
825
826 /* zero length can cause confusions */
827 if (!length)
828 length = 1;
829
830 if (length > RB_MAX_SMALL_DATA)
831 length += sizeof(event.array[0]);
832
833 length += RB_EVNT_HDR_SIZE;
834 length = ALIGN(length, RB_ALIGNMENT);
835
836 return length;
837}
838
839static struct ring_buffer_event *
840__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
841 unsigned type, unsigned long length, u64 *ts)
842{
d769041f 843 struct buffer_page *tail_page, *head_page, *reader_page;
bf41a158 844 unsigned long tail, write;
7a8e76a3
SR
845 struct ring_buffer *buffer = cpu_buffer->buffer;
846 struct ring_buffer_event *event;
bf41a158 847 unsigned long flags;
7a8e76a3
SR
848
849 tail_page = cpu_buffer->tail_page;
bf41a158
SR
850 write = local_add_return(length, &tail_page->write);
851 tail = write - length;
7a8e76a3 852
bf41a158
SR
853 /* See if we shot pass the end of this buffer page */
854 if (write > BUF_PAGE_SIZE) {
7a8e76a3
SR
855 struct buffer_page *next_page = tail_page;
856
3e03fb7f
SR
857 local_irq_save(flags);
858 __raw_spin_lock(&cpu_buffer->lock);
bf41a158 859
7a8e76a3
SR
860 rb_inc_page(cpu_buffer, &next_page);
861
d769041f
SR
862 head_page = cpu_buffer->head_page;
863 reader_page = cpu_buffer->reader_page;
864
865 /* we grabbed the lock before incrementing */
bf41a158
SR
866 RB_WARN_ON(cpu_buffer, next_page == reader_page);
867
868 /*
869 * If for some reason, we had an interrupt storm that made
870 * it all the way around the buffer, bail, and warn
871 * about it.
872 */
873 if (unlikely(next_page == cpu_buffer->commit_page)) {
874 WARN_ON_ONCE(1);
875 goto out_unlock;
876 }
d769041f 877
7a8e76a3 878 if (next_page == head_page) {
d769041f 879 if (!(buffer->flags & RB_FL_OVERWRITE)) {
bf41a158
SR
880 /* reset write */
881 if (tail <= BUF_PAGE_SIZE)
882 local_set(&tail_page->write, tail);
883 goto out_unlock;
d769041f 884 }
7a8e76a3 885
bf41a158
SR
886 /* tail_page has not moved yet? */
887 if (tail_page == cpu_buffer->tail_page) {
888 /* count overflows */
889 rb_update_overflow(cpu_buffer);
890
891 rb_inc_page(cpu_buffer, &head_page);
892 cpu_buffer->head_page = head_page;
893 cpu_buffer->head_page->read = 0;
894 }
895 }
7a8e76a3 896
bf41a158
SR
897 /*
898 * If the tail page is still the same as what we think
899 * it is, then it is up to us to update the tail
900 * pointer.
901 */
902 if (tail_page == cpu_buffer->tail_page) {
903 local_set(&next_page->write, 0);
904 local_set(&next_page->commit, 0);
905 cpu_buffer->tail_page = next_page;
906
907 /* reread the time stamp */
908 *ts = ring_buffer_time_stamp(cpu_buffer->cpu);
909 cpu_buffer->tail_page->time_stamp = *ts;
7a8e76a3
SR
910 }
911
bf41a158
SR
912 /*
913 * The actual tail page has moved forward.
914 */
915 if (tail < BUF_PAGE_SIZE) {
916 /* Mark the rest of the page with padding */
6f807acd 917 event = __rb_page_index(tail_page, tail);
7a8e76a3
SR
918 event->type = RINGBUF_TYPE_PADDING;
919 }
920
bf41a158
SR
921 if (tail <= BUF_PAGE_SIZE)
922 /* Set the write back to the previous setting */
923 local_set(&tail_page->write, tail);
924
925 /*
926 * If this was a commit entry that failed,
927 * increment that too
928 */
929 if (tail_page == cpu_buffer->commit_page &&
930 tail == rb_commit_index(cpu_buffer)) {
931 rb_set_commit_to_write(cpu_buffer);
932 }
933
3e03fb7f
SR
934 __raw_spin_unlock(&cpu_buffer->lock);
935 local_irq_restore(flags);
bf41a158
SR
936
937 /* fail and let the caller try again */
938 return ERR_PTR(-EAGAIN);
7a8e76a3
SR
939 }
940
bf41a158
SR
941 /* We reserved something on the buffer */
942
943 BUG_ON(write > BUF_PAGE_SIZE);
7a8e76a3 944
6f807acd 945 event = __rb_page_index(tail_page, tail);
7a8e76a3
SR
946 rb_update_event(event, type, length);
947
bf41a158
SR
948 /*
949 * If this is a commit and the tail is zero, then update
950 * this page's time stamp.
951 */
952 if (!tail && rb_is_commit(cpu_buffer, event))
953 cpu_buffer->commit_page->time_stamp = *ts;
954
7a8e76a3 955 return event;
bf41a158
SR
956
957 out_unlock:
3e03fb7f
SR
958 __raw_spin_unlock(&cpu_buffer->lock);
959 local_irq_restore(flags);
bf41a158 960 return NULL;
7a8e76a3
SR
961}
962
963static int
964rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
965 u64 *ts, u64 *delta)
966{
967 struct ring_buffer_event *event;
968 static int once;
bf41a158 969 int ret;
7a8e76a3
SR
970
971 if (unlikely(*delta > (1ULL << 59) && !once++)) {
972 printk(KERN_WARNING "Delta way too big! %llu"
973 " ts=%llu write stamp = %llu\n",
e2862c94
SR
974 (unsigned long long)*delta,
975 (unsigned long long)*ts,
976 (unsigned long long)cpu_buffer->write_stamp);
7a8e76a3
SR
977 WARN_ON(1);
978 }
979
980 /*
981 * The delta is too big, we to add a
982 * new timestamp.
983 */
984 event = __rb_reserve_next(cpu_buffer,
985 RINGBUF_TYPE_TIME_EXTEND,
986 RB_LEN_TIME_EXTEND,
987 ts);
988 if (!event)
bf41a158 989 return -EBUSY;
7a8e76a3 990
bf41a158
SR
991 if (PTR_ERR(event) == -EAGAIN)
992 return -EAGAIN;
993
994 /* Only a commited time event can update the write stamp */
995 if (rb_is_commit(cpu_buffer, event)) {
996 /*
997 * If this is the first on the page, then we need to
998 * update the page itself, and just put in a zero.
999 */
1000 if (rb_event_index(event)) {
1001 event->time_delta = *delta & TS_MASK;
1002 event->array[0] = *delta >> TS_SHIFT;
1003 } else {
1004 cpu_buffer->commit_page->time_stamp = *ts;
1005 event->time_delta = 0;
1006 event->array[0] = 0;
1007 }
7a8e76a3 1008 cpu_buffer->write_stamp = *ts;
bf41a158
SR
1009 /* let the caller know this was the commit */
1010 ret = 1;
1011 } else {
1012 /* Darn, this is just wasted space */
1013 event->time_delta = 0;
1014 event->array[0] = 0;
1015 ret = 0;
7a8e76a3
SR
1016 }
1017
bf41a158
SR
1018 *delta = 0;
1019
1020 return ret;
7a8e76a3
SR
1021}
1022
1023static struct ring_buffer_event *
1024rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
1025 unsigned type, unsigned long length)
1026{
1027 struct ring_buffer_event *event;
1028 u64 ts, delta;
bf41a158 1029 int commit = 0;
818e3dd3 1030 int nr_loops = 0;
7a8e76a3 1031
bf41a158 1032 again:
818e3dd3
SR
1033 /*
1034 * We allow for interrupts to reenter here and do a trace.
1035 * If one does, it will cause this original code to loop
1036 * back here. Even with heavy interrupts happening, this
1037 * should only happen a few times in a row. If this happens
1038 * 1000 times in a row, there must be either an interrupt
1039 * storm or we have something buggy.
1040 * Bail!
1041 */
1042 if (unlikely(++nr_loops > 1000)) {
1043 RB_WARN_ON(cpu_buffer, 1);
1044 return NULL;
1045 }
1046
7a8e76a3
SR
1047 ts = ring_buffer_time_stamp(cpu_buffer->cpu);
1048
bf41a158
SR
1049 /*
1050 * Only the first commit can update the timestamp.
1051 * Yes there is a race here. If an interrupt comes in
1052 * just after the conditional and it traces too, then it
1053 * will also check the deltas. More than one timestamp may
1054 * also be made. But only the entry that did the actual
1055 * commit will be something other than zero.
1056 */
1057 if (cpu_buffer->tail_page == cpu_buffer->commit_page &&
1058 rb_page_write(cpu_buffer->tail_page) ==
1059 rb_commit_index(cpu_buffer)) {
1060
7a8e76a3
SR
1061 delta = ts - cpu_buffer->write_stamp;
1062
bf41a158
SR
1063 /* make sure this delta is calculated here */
1064 barrier();
1065
1066 /* Did the write stamp get updated already? */
1067 if (unlikely(ts < cpu_buffer->write_stamp))
4143c5cb 1068 delta = 0;
bf41a158 1069
7a8e76a3 1070 if (test_time_stamp(delta)) {
7a8e76a3 1071
bf41a158
SR
1072 commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
1073
1074 if (commit == -EBUSY)
7a8e76a3 1075 return NULL;
bf41a158
SR
1076
1077 if (commit == -EAGAIN)
1078 goto again;
1079
1080 RB_WARN_ON(cpu_buffer, commit < 0);
7a8e76a3 1081 }
bf41a158
SR
1082 } else
1083 /* Non commits have zero deltas */
7a8e76a3 1084 delta = 0;
7a8e76a3
SR
1085
1086 event = __rb_reserve_next(cpu_buffer, type, length, &ts);
bf41a158
SR
1087 if (PTR_ERR(event) == -EAGAIN)
1088 goto again;
1089
1090 if (!event) {
1091 if (unlikely(commit))
1092 /*
1093 * Ouch! We needed a timestamp and it was commited. But
1094 * we didn't get our event reserved.
1095 */
1096 rb_set_commit_to_write(cpu_buffer);
7a8e76a3 1097 return NULL;
bf41a158 1098 }
7a8e76a3 1099
bf41a158
SR
1100 /*
1101 * If the timestamp was commited, make the commit our entry
1102 * now so that we will update it when needed.
1103 */
1104 if (commit)
1105 rb_set_commit_event(cpu_buffer, event);
1106 else if (!rb_is_commit(cpu_buffer, event))
7a8e76a3
SR
1107 delta = 0;
1108
1109 event->time_delta = delta;
1110
1111 return event;
1112}
1113
bf41a158
SR
1114static DEFINE_PER_CPU(int, rb_need_resched);
1115
7a8e76a3
SR
1116/**
1117 * ring_buffer_lock_reserve - reserve a part of the buffer
1118 * @buffer: the ring buffer to reserve from
1119 * @length: the length of the data to reserve (excluding event header)
1120 * @flags: a pointer to save the interrupt flags
1121 *
1122 * Returns a reseverd event on the ring buffer to copy directly to.
1123 * The user of this interface will need to get the body to write into
1124 * and can use the ring_buffer_event_data() interface.
1125 *
1126 * The length is the length of the data needed, not the event length
1127 * which also includes the event header.
1128 *
1129 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
1130 * If NULL is returned, then nothing has been allocated or locked.
1131 */
1132struct ring_buffer_event *
1133ring_buffer_lock_reserve(struct ring_buffer *buffer,
1134 unsigned long length,
1135 unsigned long *flags)
1136{
1137 struct ring_buffer_per_cpu *cpu_buffer;
1138 struct ring_buffer_event *event;
bf41a158 1139 int cpu, resched;
7a8e76a3
SR
1140
1141 if (atomic_read(&buffer->record_disabled))
1142 return NULL;
1143
bf41a158 1144 /* If we are tracing schedule, we don't want to recurse */
182e9f5f 1145 resched = ftrace_preempt_disable();
bf41a158 1146
7a8e76a3
SR
1147 cpu = raw_smp_processor_id();
1148
1149 if (!cpu_isset(cpu, buffer->cpumask))
d769041f 1150 goto out;
7a8e76a3
SR
1151
1152 cpu_buffer = buffer->buffers[cpu];
7a8e76a3
SR
1153
1154 if (atomic_read(&cpu_buffer->record_disabled))
d769041f 1155 goto out;
7a8e76a3
SR
1156
1157 length = rb_calculate_event_length(length);
1158 if (length > BUF_PAGE_SIZE)
bf41a158 1159 goto out;
7a8e76a3
SR
1160
1161 event = rb_reserve_next_event(cpu_buffer, RINGBUF_TYPE_DATA, length);
1162 if (!event)
d769041f 1163 goto out;
7a8e76a3 1164
bf41a158
SR
1165 /*
1166 * Need to store resched state on this cpu.
1167 * Only the first needs to.
1168 */
1169
1170 if (preempt_count() == 1)
1171 per_cpu(rb_need_resched, cpu) = resched;
1172
7a8e76a3
SR
1173 return event;
1174
d769041f 1175 out:
182e9f5f 1176 ftrace_preempt_enable(resched);
7a8e76a3
SR
1177 return NULL;
1178}
1179
1180static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
1181 struct ring_buffer_event *event)
1182{
7a8e76a3 1183 cpu_buffer->entries++;
bf41a158
SR
1184
1185 /* Only process further if we own the commit */
1186 if (!rb_is_commit(cpu_buffer, event))
1187 return;
1188
1189 cpu_buffer->write_stamp += event->time_delta;
1190
1191 rb_set_commit_to_write(cpu_buffer);
7a8e76a3
SR
1192}
1193
1194/**
1195 * ring_buffer_unlock_commit - commit a reserved
1196 * @buffer: The buffer to commit to
1197 * @event: The event pointer to commit.
1198 * @flags: the interrupt flags received from ring_buffer_lock_reserve.
1199 *
1200 * This commits the data to the ring buffer, and releases any locks held.
1201 *
1202 * Must be paired with ring_buffer_lock_reserve.
1203 */
1204int ring_buffer_unlock_commit(struct ring_buffer *buffer,
1205 struct ring_buffer_event *event,
1206 unsigned long flags)
1207{
1208 struct ring_buffer_per_cpu *cpu_buffer;
1209 int cpu = raw_smp_processor_id();
1210
1211 cpu_buffer = buffer->buffers[cpu];
1212
7a8e76a3
SR
1213 rb_commit(cpu_buffer, event);
1214
bf41a158
SR
1215 /*
1216 * Only the last preempt count needs to restore preemption.
1217 */
182e9f5f
SR
1218 if (preempt_count() == 1)
1219 ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
1220 else
bf41a158 1221 preempt_enable_no_resched_notrace();
7a8e76a3
SR
1222
1223 return 0;
1224}
1225
1226/**
1227 * ring_buffer_write - write data to the buffer without reserving
1228 * @buffer: The ring buffer to write to.
1229 * @length: The length of the data being written (excluding the event header)
1230 * @data: The data to write to the buffer.
1231 *
1232 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
1233 * one function. If you already have the data to write to the buffer, it
1234 * may be easier to simply call this function.
1235 *
1236 * Note, like ring_buffer_lock_reserve, the length is the length of the data
1237 * and not the length of the event which would hold the header.
1238 */
1239int ring_buffer_write(struct ring_buffer *buffer,
1240 unsigned long length,
1241 void *data)
1242{
1243 struct ring_buffer_per_cpu *cpu_buffer;
1244 struct ring_buffer_event *event;
bf41a158 1245 unsigned long event_length;
7a8e76a3
SR
1246 void *body;
1247 int ret = -EBUSY;
bf41a158 1248 int cpu, resched;
7a8e76a3
SR
1249
1250 if (atomic_read(&buffer->record_disabled))
1251 return -EBUSY;
1252
182e9f5f 1253 resched = ftrace_preempt_disable();
bf41a158 1254
7a8e76a3
SR
1255 cpu = raw_smp_processor_id();
1256
1257 if (!cpu_isset(cpu, buffer->cpumask))
d769041f 1258 goto out;
7a8e76a3
SR
1259
1260 cpu_buffer = buffer->buffers[cpu];
7a8e76a3
SR
1261
1262 if (atomic_read(&cpu_buffer->record_disabled))
1263 goto out;
1264
1265 event_length = rb_calculate_event_length(length);
1266 event = rb_reserve_next_event(cpu_buffer,
1267 RINGBUF_TYPE_DATA, event_length);
1268 if (!event)
1269 goto out;
1270
1271 body = rb_event_data(event);
1272
1273 memcpy(body, data, length);
1274
1275 rb_commit(cpu_buffer, event);
1276
1277 ret = 0;
1278 out:
182e9f5f 1279 ftrace_preempt_enable(resched);
7a8e76a3
SR
1280
1281 return ret;
1282}
1283
bf41a158
SR
1284static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
1285{
1286 struct buffer_page *reader = cpu_buffer->reader_page;
1287 struct buffer_page *head = cpu_buffer->head_page;
1288 struct buffer_page *commit = cpu_buffer->commit_page;
1289
1290 return reader->read == rb_page_commit(reader) &&
1291 (commit == reader ||
1292 (commit == head &&
1293 head->read == rb_page_commit(commit)));
1294}
1295
7a8e76a3
SR
1296/**
1297 * ring_buffer_record_disable - stop all writes into the buffer
1298 * @buffer: The ring buffer to stop writes to.
1299 *
1300 * This prevents all writes to the buffer. Any attempt to write
1301 * to the buffer after this will fail and return NULL.
1302 *
1303 * The caller should call synchronize_sched() after this.
1304 */
1305void ring_buffer_record_disable(struct ring_buffer *buffer)
1306{
1307 atomic_inc(&buffer->record_disabled);
1308}
1309
1310/**
1311 * ring_buffer_record_enable - enable writes to the buffer
1312 * @buffer: The ring buffer to enable writes
1313 *
1314 * Note, multiple disables will need the same number of enables
1315 * to truely enable the writing (much like preempt_disable).
1316 */
1317void ring_buffer_record_enable(struct ring_buffer *buffer)
1318{
1319 atomic_dec(&buffer->record_disabled);
1320}
1321
1322/**
1323 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
1324 * @buffer: The ring buffer to stop writes to.
1325 * @cpu: The CPU buffer to stop
1326 *
1327 * This prevents all writes to the buffer. Any attempt to write
1328 * to the buffer after this will fail and return NULL.
1329 *
1330 * The caller should call synchronize_sched() after this.
1331 */
1332void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
1333{
1334 struct ring_buffer_per_cpu *cpu_buffer;
1335
1336 if (!cpu_isset(cpu, buffer->cpumask))
1337 return;
1338
1339 cpu_buffer = buffer->buffers[cpu];
1340 atomic_inc(&cpu_buffer->record_disabled);
1341}
1342
1343/**
1344 * ring_buffer_record_enable_cpu - enable writes to the buffer
1345 * @buffer: The ring buffer to enable writes
1346 * @cpu: The CPU to enable.
1347 *
1348 * Note, multiple disables will need the same number of enables
1349 * to truely enable the writing (much like preempt_disable).
1350 */
1351void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
1352{
1353 struct ring_buffer_per_cpu *cpu_buffer;
1354
1355 if (!cpu_isset(cpu, buffer->cpumask))
1356 return;
1357
1358 cpu_buffer = buffer->buffers[cpu];
1359 atomic_dec(&cpu_buffer->record_disabled);
1360}
1361
1362/**
1363 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
1364 * @buffer: The ring buffer
1365 * @cpu: The per CPU buffer to get the entries from.
1366 */
1367unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
1368{
1369 struct ring_buffer_per_cpu *cpu_buffer;
1370
1371 if (!cpu_isset(cpu, buffer->cpumask))
1372 return 0;
1373
1374 cpu_buffer = buffer->buffers[cpu];
1375 return cpu_buffer->entries;
1376}
1377
1378/**
1379 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
1380 * @buffer: The ring buffer
1381 * @cpu: The per CPU buffer to get the number of overruns from
1382 */
1383unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
1384{
1385 struct ring_buffer_per_cpu *cpu_buffer;
1386
1387 if (!cpu_isset(cpu, buffer->cpumask))
1388 return 0;
1389
1390 cpu_buffer = buffer->buffers[cpu];
1391 return cpu_buffer->overrun;
1392}
1393
1394/**
1395 * ring_buffer_entries - get the number of entries in a buffer
1396 * @buffer: The ring buffer
1397 *
1398 * Returns the total number of entries in the ring buffer
1399 * (all CPU entries)
1400 */
1401unsigned long ring_buffer_entries(struct ring_buffer *buffer)
1402{
1403 struct ring_buffer_per_cpu *cpu_buffer;
1404 unsigned long entries = 0;
1405 int cpu;
1406
1407 /* if you care about this being correct, lock the buffer */
1408 for_each_buffer_cpu(buffer, cpu) {
1409 cpu_buffer = buffer->buffers[cpu];
1410 entries += cpu_buffer->entries;
1411 }
1412
1413 return entries;
1414}
1415
1416/**
1417 * ring_buffer_overrun_cpu - get the number of overruns in buffer
1418 * @buffer: The ring buffer
1419 *
1420 * Returns the total number of overruns in the ring buffer
1421 * (all CPU entries)
1422 */
1423unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
1424{
1425 struct ring_buffer_per_cpu *cpu_buffer;
1426 unsigned long overruns = 0;
1427 int cpu;
1428
1429 /* if you care about this being correct, lock the buffer */
1430 for_each_buffer_cpu(buffer, cpu) {
1431 cpu_buffer = buffer->buffers[cpu];
1432 overruns += cpu_buffer->overrun;
1433 }
1434
1435 return overruns;
1436}
1437
1438/**
1439 * ring_buffer_iter_reset - reset an iterator
1440 * @iter: The iterator to reset
1441 *
1442 * Resets the iterator, so that it will start from the beginning
1443 * again.
1444 */
1445void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
1446{
1447 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1448
d769041f
SR
1449 /* Iterator usage is expected to have record disabled */
1450 if (list_empty(&cpu_buffer->reader_page->list)) {
1451 iter->head_page = cpu_buffer->head_page;
6f807acd 1452 iter->head = cpu_buffer->head_page->read;
d769041f
SR
1453 } else {
1454 iter->head_page = cpu_buffer->reader_page;
6f807acd 1455 iter->head = cpu_buffer->reader_page->read;
d769041f
SR
1456 }
1457 if (iter->head)
1458 iter->read_stamp = cpu_buffer->read_stamp;
1459 else
1460 iter->read_stamp = iter->head_page->time_stamp;
7a8e76a3
SR
1461}
1462
1463/**
1464 * ring_buffer_iter_empty - check if an iterator has no more to read
1465 * @iter: The iterator to check
1466 */
1467int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
1468{
1469 struct ring_buffer_per_cpu *cpu_buffer;
1470
1471 cpu_buffer = iter->cpu_buffer;
1472
bf41a158
SR
1473 return iter->head_page == cpu_buffer->commit_page &&
1474 iter->head == rb_commit_index(cpu_buffer);
7a8e76a3
SR
1475}
1476
1477static void
1478rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1479 struct ring_buffer_event *event)
1480{
1481 u64 delta;
1482
1483 switch (event->type) {
1484 case RINGBUF_TYPE_PADDING:
1485 return;
1486
1487 case RINGBUF_TYPE_TIME_EXTEND:
1488 delta = event->array[0];
1489 delta <<= TS_SHIFT;
1490 delta += event->time_delta;
1491 cpu_buffer->read_stamp += delta;
1492 return;
1493
1494 case RINGBUF_TYPE_TIME_STAMP:
1495 /* FIXME: not implemented */
1496 return;
1497
1498 case RINGBUF_TYPE_DATA:
1499 cpu_buffer->read_stamp += event->time_delta;
1500 return;
1501
1502 default:
1503 BUG();
1504 }
1505 return;
1506}
1507
1508static void
1509rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
1510 struct ring_buffer_event *event)
1511{
1512 u64 delta;
1513
1514 switch (event->type) {
1515 case RINGBUF_TYPE_PADDING:
1516 return;
1517
1518 case RINGBUF_TYPE_TIME_EXTEND:
1519 delta = event->array[0];
1520 delta <<= TS_SHIFT;
1521 delta += event->time_delta;
1522 iter->read_stamp += delta;
1523 return;
1524
1525 case RINGBUF_TYPE_TIME_STAMP:
1526 /* FIXME: not implemented */
1527 return;
1528
1529 case RINGBUF_TYPE_DATA:
1530 iter->read_stamp += event->time_delta;
1531 return;
1532
1533 default:
1534 BUG();
1535 }
1536 return;
1537}
1538
d769041f
SR
1539static struct buffer_page *
1540rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
7a8e76a3 1541{
d769041f
SR
1542 struct buffer_page *reader = NULL;
1543 unsigned long flags;
818e3dd3 1544 int nr_loops = 0;
d769041f 1545
3e03fb7f
SR
1546 local_irq_save(flags);
1547 __raw_spin_lock(&cpu_buffer->lock);
d769041f
SR
1548
1549 again:
818e3dd3
SR
1550 /*
1551 * This should normally only loop twice. But because the
1552 * start of the reader inserts an empty page, it causes
1553 * a case where we will loop three times. There should be no
1554 * reason to loop four times (that I know of).
1555 */
1556 if (unlikely(++nr_loops > 3)) {
1557 RB_WARN_ON(cpu_buffer, 1);
1558 reader = NULL;
1559 goto out;
1560 }
1561
d769041f
SR
1562 reader = cpu_buffer->reader_page;
1563
1564 /* If there's more to read, return this page */
bf41a158 1565 if (cpu_buffer->reader_page->read < rb_page_size(reader))
d769041f
SR
1566 goto out;
1567
1568 /* Never should we have an index greater than the size */
bf41a158
SR
1569 RB_WARN_ON(cpu_buffer,
1570 cpu_buffer->reader_page->read > rb_page_size(reader));
d769041f
SR
1571
1572 /* check if we caught up to the tail */
1573 reader = NULL;
bf41a158 1574 if (cpu_buffer->commit_page == cpu_buffer->reader_page)
d769041f 1575 goto out;
7a8e76a3
SR
1576
1577 /*
d769041f
SR
1578 * Splice the empty reader page into the list around the head.
1579 * Reset the reader page to size zero.
7a8e76a3 1580 */
7a8e76a3 1581
d769041f
SR
1582 reader = cpu_buffer->head_page;
1583 cpu_buffer->reader_page->list.next = reader->list.next;
1584 cpu_buffer->reader_page->list.prev = reader->list.prev;
bf41a158
SR
1585
1586 local_set(&cpu_buffer->reader_page->write, 0);
1587 local_set(&cpu_buffer->reader_page->commit, 0);
7a8e76a3 1588
d769041f
SR
1589 /* Make the reader page now replace the head */
1590 reader->list.prev->next = &cpu_buffer->reader_page->list;
1591 reader->list.next->prev = &cpu_buffer->reader_page->list;
7a8e76a3
SR
1592
1593 /*
d769041f
SR
1594 * If the tail is on the reader, then we must set the head
1595 * to the inserted page, otherwise we set it one before.
7a8e76a3 1596 */
d769041f 1597 cpu_buffer->head_page = cpu_buffer->reader_page;
7a8e76a3 1598
bf41a158 1599 if (cpu_buffer->commit_page != reader)
d769041f
SR
1600 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
1601
1602 /* Finally update the reader page to the new head */
1603 cpu_buffer->reader_page = reader;
1604 rb_reset_reader_page(cpu_buffer);
1605
1606 goto again;
1607
1608 out:
3e03fb7f
SR
1609 __raw_spin_unlock(&cpu_buffer->lock);
1610 local_irq_restore(flags);
d769041f
SR
1611
1612 return reader;
1613}
1614
1615static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
1616{
1617 struct ring_buffer_event *event;
1618 struct buffer_page *reader;
1619 unsigned length;
1620
1621 reader = rb_get_reader_page(cpu_buffer);
7a8e76a3 1622
d769041f
SR
1623 /* This function should not be called when buffer is empty */
1624 BUG_ON(!reader);
7a8e76a3 1625
d769041f
SR
1626 event = rb_reader_event(cpu_buffer);
1627
1628 if (event->type == RINGBUF_TYPE_DATA)
1629 cpu_buffer->entries--;
1630
1631 rb_update_read_stamp(cpu_buffer, event);
1632
1633 length = rb_event_length(event);
6f807acd 1634 cpu_buffer->reader_page->read += length;
7a8e76a3
SR
1635}
1636
1637static void rb_advance_iter(struct ring_buffer_iter *iter)
1638{
1639 struct ring_buffer *buffer;
1640 struct ring_buffer_per_cpu *cpu_buffer;
1641 struct ring_buffer_event *event;
1642 unsigned length;
1643
1644 cpu_buffer = iter->cpu_buffer;
1645 buffer = cpu_buffer->buffer;
1646
1647 /*
1648 * Check if we are at the end of the buffer.
1649 */
bf41a158
SR
1650 if (iter->head >= rb_page_size(iter->head_page)) {
1651 BUG_ON(iter->head_page == cpu_buffer->commit_page);
d769041f 1652 rb_inc_iter(iter);
7a8e76a3
SR
1653 return;
1654 }
1655
1656 event = rb_iter_head_event(iter);
1657
1658 length = rb_event_length(event);
1659
1660 /*
1661 * This should not be called to advance the header if we are
1662 * at the tail of the buffer.
1663 */
bf41a158
SR
1664 BUG_ON((iter->head_page == cpu_buffer->commit_page) &&
1665 (iter->head + length > rb_commit_index(cpu_buffer)));
7a8e76a3
SR
1666
1667 rb_update_iter_read_stamp(iter, event);
1668
1669 iter->head += length;
1670
1671 /* check for end of page padding */
bf41a158
SR
1672 if ((iter->head >= rb_page_size(iter->head_page)) &&
1673 (iter->head_page != cpu_buffer->commit_page))
7a8e76a3
SR
1674 rb_advance_iter(iter);
1675}
1676
1677/**
1678 * ring_buffer_peek - peek at the next event to be read
1679 * @buffer: The ring buffer to read
1680 * @cpu: The cpu to peak at
1681 * @ts: The timestamp counter of this event.
1682 *
1683 * This will return the event that will be read next, but does
1684 * not consume the data.
1685 */
1686struct ring_buffer_event *
1687ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1688{
1689 struct ring_buffer_per_cpu *cpu_buffer;
1690 struct ring_buffer_event *event;
d769041f 1691 struct buffer_page *reader;
818e3dd3 1692 int nr_loops = 0;
7a8e76a3
SR
1693
1694 if (!cpu_isset(cpu, buffer->cpumask))
1695 return NULL;
1696
1697 cpu_buffer = buffer->buffers[cpu];
1698
1699 again:
818e3dd3
SR
1700 /*
1701 * We repeat when a timestamp is encountered. It is possible
1702 * to get multiple timestamps from an interrupt entering just
1703 * as one timestamp is about to be written. The max times
1704 * that this can happen is the number of nested interrupts we
1705 * can have. Nesting 10 deep of interrupts is clearly
1706 * an anomaly.
1707 */
1708 if (unlikely(++nr_loops > 10)) {
1709 RB_WARN_ON(cpu_buffer, 1);
1710 return NULL;
1711 }
1712
d769041f
SR
1713 reader = rb_get_reader_page(cpu_buffer);
1714 if (!reader)
7a8e76a3
SR
1715 return NULL;
1716
d769041f 1717 event = rb_reader_event(cpu_buffer);
7a8e76a3
SR
1718
1719 switch (event->type) {
1720 case RINGBUF_TYPE_PADDING:
bf41a158 1721 RB_WARN_ON(cpu_buffer, 1);
d769041f
SR
1722 rb_advance_reader(cpu_buffer);
1723 return NULL;
7a8e76a3
SR
1724
1725 case RINGBUF_TYPE_TIME_EXTEND:
1726 /* Internal data, OK to advance */
d769041f 1727 rb_advance_reader(cpu_buffer);
7a8e76a3
SR
1728 goto again;
1729
1730 case RINGBUF_TYPE_TIME_STAMP:
1731 /* FIXME: not implemented */
d769041f 1732 rb_advance_reader(cpu_buffer);
7a8e76a3
SR
1733 goto again;
1734
1735 case RINGBUF_TYPE_DATA:
1736 if (ts) {
1737 *ts = cpu_buffer->read_stamp + event->time_delta;
1738 ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
1739 }
1740 return event;
1741
1742 default:
1743 BUG();
1744 }
1745
1746 return NULL;
1747}
1748
1749/**
1750 * ring_buffer_iter_peek - peek at the next event to be read
1751 * @iter: The ring buffer iterator
1752 * @ts: The timestamp counter of this event.
1753 *
1754 * This will return the event that will be read next, but does
1755 * not increment the iterator.
1756 */
1757struct ring_buffer_event *
1758ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1759{
1760 struct ring_buffer *buffer;
1761 struct ring_buffer_per_cpu *cpu_buffer;
1762 struct ring_buffer_event *event;
818e3dd3 1763 int nr_loops = 0;
7a8e76a3
SR
1764
1765 if (ring_buffer_iter_empty(iter))
1766 return NULL;
1767
1768 cpu_buffer = iter->cpu_buffer;
1769 buffer = cpu_buffer->buffer;
1770
1771 again:
818e3dd3
SR
1772 /*
1773 * We repeat when a timestamp is encountered. It is possible
1774 * to get multiple timestamps from an interrupt entering just
1775 * as one timestamp is about to be written. The max times
1776 * that this can happen is the number of nested interrupts we
1777 * can have. Nesting 10 deep of interrupts is clearly
1778 * an anomaly.
1779 */
1780 if (unlikely(++nr_loops > 10)) {
1781 RB_WARN_ON(cpu_buffer, 1);
1782 return NULL;
1783 }
1784
7a8e76a3
SR
1785 if (rb_per_cpu_empty(cpu_buffer))
1786 return NULL;
1787
1788 event = rb_iter_head_event(iter);
1789
1790 switch (event->type) {
1791 case RINGBUF_TYPE_PADDING:
d769041f 1792 rb_inc_iter(iter);
7a8e76a3
SR
1793 goto again;
1794
1795 case RINGBUF_TYPE_TIME_EXTEND:
1796 /* Internal data, OK to advance */
1797 rb_advance_iter(iter);
1798 goto again;
1799
1800 case RINGBUF_TYPE_TIME_STAMP:
1801 /* FIXME: not implemented */
1802 rb_advance_iter(iter);
1803 goto again;
1804
1805 case RINGBUF_TYPE_DATA:
1806 if (ts) {
1807 *ts = iter->read_stamp + event->time_delta;
1808 ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
1809 }
1810 return event;
1811
1812 default:
1813 BUG();
1814 }
1815
1816 return NULL;
1817}
1818
1819/**
1820 * ring_buffer_consume - return an event and consume it
1821 * @buffer: The ring buffer to get the next event from
1822 *
1823 * Returns the next event in the ring buffer, and that event is consumed.
1824 * Meaning, that sequential reads will keep returning a different event,
1825 * and eventually empty the ring buffer if the producer is slower.
1826 */
1827struct ring_buffer_event *
1828ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
1829{
1830 struct ring_buffer_per_cpu *cpu_buffer;
1831 struct ring_buffer_event *event;
1832
1833 if (!cpu_isset(cpu, buffer->cpumask))
1834 return NULL;
1835
1836 event = ring_buffer_peek(buffer, cpu, ts);
1837 if (!event)
1838 return NULL;
1839
1840 cpu_buffer = buffer->buffers[cpu];
d769041f 1841 rb_advance_reader(cpu_buffer);
7a8e76a3
SR
1842
1843 return event;
1844}
1845
1846/**
1847 * ring_buffer_read_start - start a non consuming read of the buffer
1848 * @buffer: The ring buffer to read from
1849 * @cpu: The cpu buffer to iterate over
1850 *
1851 * This starts up an iteration through the buffer. It also disables
1852 * the recording to the buffer until the reading is finished.
1853 * This prevents the reading from being corrupted. This is not
1854 * a consuming read, so a producer is not expected.
1855 *
1856 * Must be paired with ring_buffer_finish.
1857 */
1858struct ring_buffer_iter *
1859ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
1860{
1861 struct ring_buffer_per_cpu *cpu_buffer;
1862 struct ring_buffer_iter *iter;
d769041f 1863 unsigned long flags;
7a8e76a3
SR
1864
1865 if (!cpu_isset(cpu, buffer->cpumask))
1866 return NULL;
1867
1868 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
1869 if (!iter)
1870 return NULL;
1871
1872 cpu_buffer = buffer->buffers[cpu];
1873
1874 iter->cpu_buffer = cpu_buffer;
1875
1876 atomic_inc(&cpu_buffer->record_disabled);
1877 synchronize_sched();
1878
3e03fb7f
SR
1879 local_irq_save(flags);
1880 __raw_spin_lock(&cpu_buffer->lock);
d769041f 1881 ring_buffer_iter_reset(iter);
3e03fb7f
SR
1882 __raw_spin_unlock(&cpu_buffer->lock);
1883 local_irq_restore(flags);
7a8e76a3
SR
1884
1885 return iter;
1886}
1887
1888/**
1889 * ring_buffer_finish - finish reading the iterator of the buffer
1890 * @iter: The iterator retrieved by ring_buffer_start
1891 *
1892 * This re-enables the recording to the buffer, and frees the
1893 * iterator.
1894 */
1895void
1896ring_buffer_read_finish(struct ring_buffer_iter *iter)
1897{
1898 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1899
1900 atomic_dec(&cpu_buffer->record_disabled);
1901 kfree(iter);
1902}
1903
1904/**
1905 * ring_buffer_read - read the next item in the ring buffer by the iterator
1906 * @iter: The ring buffer iterator
1907 * @ts: The time stamp of the event read.
1908 *
1909 * This reads the next event in the ring buffer and increments the iterator.
1910 */
1911struct ring_buffer_event *
1912ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
1913{
1914 struct ring_buffer_event *event;
1915
1916 event = ring_buffer_iter_peek(iter, ts);
1917 if (!event)
1918 return NULL;
1919
1920 rb_advance_iter(iter);
1921
1922 return event;
1923}
1924
1925/**
1926 * ring_buffer_size - return the size of the ring buffer (in bytes)
1927 * @buffer: The ring buffer.
1928 */
1929unsigned long ring_buffer_size(struct ring_buffer *buffer)
1930{
1931 return BUF_PAGE_SIZE * buffer->pages;
1932}
1933
1934static void
1935rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
1936{
1937 cpu_buffer->head_page
1938 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
bf41a158
SR
1939 local_set(&cpu_buffer->head_page->write, 0);
1940 local_set(&cpu_buffer->head_page->commit, 0);
d769041f 1941
6f807acd 1942 cpu_buffer->head_page->read = 0;
bf41a158
SR
1943
1944 cpu_buffer->tail_page = cpu_buffer->head_page;
1945 cpu_buffer->commit_page = cpu_buffer->head_page;
1946
1947 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
1948 local_set(&cpu_buffer->reader_page->write, 0);
1949 local_set(&cpu_buffer->reader_page->commit, 0);
6f807acd 1950 cpu_buffer->reader_page->read = 0;
7a8e76a3 1951
7a8e76a3
SR
1952 cpu_buffer->overrun = 0;
1953 cpu_buffer->entries = 0;
1954}
1955
1956/**
1957 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
1958 * @buffer: The ring buffer to reset a per cpu buffer of
1959 * @cpu: The CPU buffer to be reset
1960 */
1961void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
1962{
1963 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
1964 unsigned long flags;
1965
1966 if (!cpu_isset(cpu, buffer->cpumask))
1967 return;
1968
3e03fb7f
SR
1969 local_irq_save(flags);
1970 __raw_spin_lock(&cpu_buffer->lock);
7a8e76a3
SR
1971
1972 rb_reset_cpu(cpu_buffer);
1973
3e03fb7f
SR
1974 __raw_spin_unlock(&cpu_buffer->lock);
1975 local_irq_restore(flags);
7a8e76a3
SR
1976}
1977
1978/**
1979 * ring_buffer_reset - reset a ring buffer
1980 * @buffer: The ring buffer to reset all cpu buffers
1981 */
1982void ring_buffer_reset(struct ring_buffer *buffer)
1983{
7a8e76a3
SR
1984 int cpu;
1985
7a8e76a3 1986 for_each_buffer_cpu(buffer, cpu)
d769041f 1987 ring_buffer_reset_cpu(buffer, cpu);
7a8e76a3
SR
1988}
1989
1990/**
1991 * rind_buffer_empty - is the ring buffer empty?
1992 * @buffer: The ring buffer to test
1993 */
1994int ring_buffer_empty(struct ring_buffer *buffer)
1995{
1996 struct ring_buffer_per_cpu *cpu_buffer;
1997 int cpu;
1998
1999 /* yes this is racy, but if you don't like the race, lock the buffer */
2000 for_each_buffer_cpu(buffer, cpu) {
2001 cpu_buffer = buffer->buffers[cpu];
2002 if (!rb_per_cpu_empty(cpu_buffer))
2003 return 0;
2004 }
2005 return 1;
2006}
2007
2008/**
2009 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
2010 * @buffer: The ring buffer
2011 * @cpu: The CPU buffer to test
2012 */
2013int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
2014{
2015 struct ring_buffer_per_cpu *cpu_buffer;
2016
2017 if (!cpu_isset(cpu, buffer->cpumask))
2018 return 1;
2019
2020 cpu_buffer = buffer->buffers[cpu];
2021 return rb_per_cpu_empty(cpu_buffer);
2022}
2023
2024/**
2025 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
2026 * @buffer_a: One buffer to swap with
2027 * @buffer_b: The other buffer to swap with
2028 *
2029 * This function is useful for tracers that want to take a "snapshot"
2030 * of a CPU buffer and has another back up buffer lying around.
2031 * it is expected that the tracer handles the cpu buffer not being
2032 * used at the moment.
2033 */
2034int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2035 struct ring_buffer *buffer_b, int cpu)
2036{
2037 struct ring_buffer_per_cpu *cpu_buffer_a;
2038 struct ring_buffer_per_cpu *cpu_buffer_b;
2039
2040 if (!cpu_isset(cpu, buffer_a->cpumask) ||
2041 !cpu_isset(cpu, buffer_b->cpumask))
2042 return -EINVAL;
2043
2044 /* At least make sure the two buffers are somewhat the same */
2045 if (buffer_a->size != buffer_b->size ||
2046 buffer_a->pages != buffer_b->pages)
2047 return -EINVAL;
2048
2049 cpu_buffer_a = buffer_a->buffers[cpu];
2050 cpu_buffer_b = buffer_b->buffers[cpu];
2051
2052 /*
2053 * We can't do a synchronize_sched here because this
2054 * function can be called in atomic context.
2055 * Normally this will be called from the same CPU as cpu.
2056 * If not it's up to the caller to protect this.
2057 */
2058 atomic_inc(&cpu_buffer_a->record_disabled);
2059 atomic_inc(&cpu_buffer_b->record_disabled);
2060
2061 buffer_a->buffers[cpu] = cpu_buffer_b;
2062 buffer_b->buffers[cpu] = cpu_buffer_a;
2063
2064 cpu_buffer_b->buffer = buffer_a;
2065 cpu_buffer_a->buffer = buffer_b;
2066
2067 atomic_dec(&cpu_buffer_a->record_disabled);
2068 atomic_dec(&cpu_buffer_b->record_disabled);
2069
2070 return 0;
2071}
2072
This page took 0.273682 seconds and 5 git commands to generate.