Merge branches 'tracing/fastboot', 'tracing/ftrace' and 'tracing/urgent' into tracing...
authorIngo Molnar <mingo@elte.hu>
Sun, 16 Nov 2008 06:28:46 +0000 (07:28 +0100)
committerIngo Molnar <mingo@elte.hu>
Sun, 16 Nov 2008 06:28:46 +0000 (07:28 +0100)
1  2  3 
kernel/trace/ring_buffer.c

index 2d6c2cf0c3bcc7087a77bd3d85e2e7640d18f9be,2d6c2cf0c3bcc7087a77bd3d85e2e7640d18f9be,036456cbb4f7e1ceecf929e37ed6de581866abda..caa4fda50f8a78e95a64a6a7572474bb0233793c
@@@@ -45,8 -45,8 -45,6 +45,8 @@@@ void tracing_off(void
        ring_buffers_off = 1;
   }
   
  +#include "trace.h"
  +
   /* Up this if you want to test the TIME_EXTENTS and normalization */
   #define DEBUG_SHIFT 0
   
@@@@ -189,8 -189,8 -187,7 +189,8 @@@@ static inline int test_time_stamp(u64 d
   struct ring_buffer_per_cpu {
        int                             cpu;
        struct ring_buffer              *buffer;
  -     spinlock_t                      lock;
  +     spinlock_t                      reader_lock; /* serialize readers */
  +     raw_spinlock_t                  lock;
        struct lock_class_key           lock_key;
        struct list_head                pages;
        struct buffer_page              *head_page;     /* read from head */
@@@@ -224,16 -224,16 -221,32 +224,16 @@@@ struct ring_buffer_iter 
        u64                             read_stamp;
   };
   
  +/* buffer may be either ring_buffer or ring_buffer_per_cpu */
   #define RB_WARN_ON(buffer, cond)                             \
  -     do {                                                    \
  -             if (unlikely(cond)) {                           \
  -                     atomic_inc(&buffer->record_disabled);   \
  -                     WARN_ON(1);                             \
  -             }                                               \
  -     } while (0)
  -
  -#define RB_WARN_ON_RET(buffer, cond)                         \
  -     do {                                                    \
  -             if (unlikely(cond)) {                           \
  -                     atomic_inc(&buffer->record_disabled);   \
  -                     WARN_ON(1);                             \
  -                     return -1;                              \
  -             }                                               \
  -     } while (0)
  -
  -#define RB_WARN_ON_ONCE(buffer, cond)                                \
  -     do {                                                    \
  -             static int once;                                \
  -             if (unlikely(cond) && !once) {                  \
  -                     once++;                                 \
  +     ({                                                      \
  +             int _____ret = unlikely(cond);                  \
  +             if (_____ret) {                                 \
                        atomic_inc(&buffer->record_disabled);   \
                        WARN_ON(1);                             \
                }                                               \
  -     } while (0)
  +             _____ret;                                       \
  +     })
   
   /**
    * check_pages - integrity check of buffer pages
@@@@ -247,18 -247,18 -260,14 +247,18 @@@@ static int rb_check_pages(struct ring_b
        struct list_head *head = &cpu_buffer->pages;
        struct buffer_page *page, *tmp;
   
  -     RB_WARN_ON_RET(cpu_buffer, head->next->prev != head);
  -     RB_WARN_ON_RET(cpu_buffer, head->prev->next != head);
  +     if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
  +             return -1;
  +     if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
  +             return -1;
   
        list_for_each_entry_safe(page, tmp, head, list) {
  -             RB_WARN_ON_RET(cpu_buffer,
  -                            page->list.next->prev != &page->list);
  -             RB_WARN_ON_RET(cpu_buffer,
  -                            page->list.prev->next != &page->list);
  +             if (RB_WARN_ON(cpu_buffer,
  +                            page->list.next->prev != &page->list))
  +                     return -1;
  +             if (RB_WARN_ON(cpu_buffer,
  +                            page->list.prev->next != &page->list))
  +                     return -1;
        }
   
        return 0;
@@@@ -315,8 -315,8 -324,7 +315,8 @@@@ rb_allocate_cpu_buffer(struct ring_buff
   
        cpu_buffer->cpu = cpu;
        cpu_buffer->buffer = buffer;
  -     spin_lock_init(&cpu_buffer->lock);
  +     spin_lock_init(&cpu_buffer->reader_lock);
  +     cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
        INIT_LIST_HEAD(&cpu_buffer->pages);
   
        page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()),
@@@@ -465,15 -465,15 -473,13 +465,15 @@@@ rb_remove_pages(struct ring_buffer_per_
        synchronize_sched();
   
        for (i = 0; i < nr_pages; i++) {
  -             BUG_ON(list_empty(&cpu_buffer->pages));
  +             if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
  +                     return;
                p = cpu_buffer->pages.next;
                page = list_entry(p, struct buffer_page, list);
                list_del_init(&page->list);
                free_buffer_page(page);
        }
  -     BUG_ON(list_empty(&cpu_buffer->pages));
  +     if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
  +             return;
   
        rb_reset_cpu(cpu_buffer);
   
@@@@ -495,8 -495,8 -501,7 +495,8 @@@@ rb_insert_pages(struct ring_buffer_per_
        synchronize_sched();
   
        for (i = 0; i < nr_pages; i++) {
  -             BUG_ON(list_empty(pages));
  +             if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
  +                     return;
                p = pages->next;
                page = list_entry(p, struct buffer_page, list);
                list_del_init(&page->list);
@@@@ -533,6 -533,6 -538,12 +533,12 @@@@ int ring_buffer_resize(struct ring_buff
        LIST_HEAD(pages);
        int i, cpu;
   
++      /*
++       * Always succeed at resizing a non-existent buffer:
++       */
++      if (!buffer)
++              return size;
++ 
        size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
        size *= BUF_PAGE_SIZE;
        buffer_size = buffer->pages * BUF_PAGE_SIZE;
        if (size < buffer_size) {
   
                /* easy case, just free pages */
  -             BUG_ON(nr_pages >= buffer->pages);
  +             if (RB_WARN_ON(buffer, nr_pages >= buffer->pages)) {
  +                     mutex_unlock(&buffer->mutex);
  +                     return -1;
  +             }
   
                rm_pages = buffer->pages - nr_pages;
   
         * add these pages to the cpu_buffers. Otherwise we just free
         * them all and return -ENOMEM;
         */
  -     BUG_ON(nr_pages <= buffer->pages);
  +     if (RB_WARN_ON(buffer, nr_pages <= buffer->pages)) {
  +             mutex_unlock(&buffer->mutex);
  +             return -1;
  +     }
  +
        new_pages = nr_pages - buffer->pages;
   
        for_each_buffer_cpu(buffer, cpu) {
                rb_insert_pages(cpu_buffer, &pages, new_pages);
        }
   
  -     BUG_ON(!list_empty(&pages));
  +     if (RB_WARN_ON(buffer, !list_empty(&pages))) {
  +             mutex_unlock(&buffer->mutex);
  +             return -1;
  +     }
   
    out:
        buffer->pages = nr_pages;
@@@@ -691,8 -691,8 -692,7 +697,8 @@@@ static void rb_update_overflow(struct r
             head += rb_event_length(event)) {
   
                event = __rb_page_index(cpu_buffer->head_page, head);
  -             BUG_ON(rb_null_event(event));
  +             if (RB_WARN_ON(cpu_buffer, rb_null_event(event)))
  +                     return;
                /* Only count data entries */
                if (event->type != RINGBUF_TYPE_DATA)
                        continue;
@@@@ -745,9 -745,9 -745,8 +751,9 @@@@ rb_set_commit_event(struct ring_buffer_
        addr &= PAGE_MASK;
   
        while (cpu_buffer->commit_page->page != (void *)addr) {
  -             RB_WARN_ON(cpu_buffer,
  -                        cpu_buffer->commit_page == cpu_buffer->tail_page);
  +             if (RB_WARN_ON(cpu_buffer,
  +                       cpu_buffer->commit_page == cpu_buffer->tail_page))
  +                     return;
                cpu_buffer->commit_page->commit =
                        cpu_buffer->commit_page->write;
                rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
@@@@ -894,8 -894,8 -893,7 +900,8 @@@@ __rb_reserve_next(struct ring_buffer_pe
        if (write > BUF_PAGE_SIZE) {
                struct buffer_page *next_page = tail_page;
   
  -             spin_lock_irqsave(&cpu_buffer->lock, flags);
  +             local_irq_save(flags);
  +             __raw_spin_lock(&cpu_buffer->lock);
   
                rb_inc_page(cpu_buffer, &next_page);
   
                reader_page = cpu_buffer->reader_page;
   
                /* we grabbed the lock before incrementing */
  -             RB_WARN_ON(cpu_buffer, next_page == reader_page);
  +             if (RB_WARN_ON(cpu_buffer, next_page == reader_page))
  +                     goto out_unlock;
   
                /*
                 * If for some reason, we had an interrupt storm that made
                        rb_set_commit_to_write(cpu_buffer);
                }
   
  -             spin_unlock_irqrestore(&cpu_buffer->lock, flags);
  +             __raw_spin_unlock(&cpu_buffer->lock);
  +             local_irq_restore(flags);
   
                /* fail and let the caller try again */
                return ERR_PTR(-EAGAIN);
   
        /* We reserved something on the buffer */
   
  -     BUG_ON(write > BUF_PAGE_SIZE);
  +     if (RB_WARN_ON(cpu_buffer, write > BUF_PAGE_SIZE))
  +             return NULL;
   
        event = __rb_page_index(tail_page, tail);
        rb_update_event(event, type, length);
        return event;
   
    out_unlock:
  -     spin_unlock_irqrestore(&cpu_buffer->lock, flags);
  +     __raw_spin_unlock(&cpu_buffer->lock);
  +     local_irq_restore(flags);
        return NULL;
   }
   
@@@@ -1081,8 -1081,8 -1075,10 +1087,8 @@@@ rb_reserve_next_event(struct ring_buffe
         * storm or we have something buggy.
         * Bail!
         */
  -     if (unlikely(++nr_loops > 1000)) {
  -             RB_WARN_ON(cpu_buffer, 1);
  +     if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
                return NULL;
  -     }
   
        ts = ring_buffer_time_stamp(cpu_buffer->cpu);
   
@@@@ -1185,7 -1185,7 -1181,8 +1191,7 @@@@ ring_buffer_lock_reserve(struct ring_bu
                return NULL;
   
        /* If we are tracing schedule, we don't want to recurse */
  -     resched = need_resched();
  -     preempt_disable_notrace();
  +     resched = ftrace_preempt_disable();
   
        cpu = raw_smp_processor_id();
   
        return event;
   
    out:
  -     if (resched)
  -             preempt_enable_notrace();
  -     else
  -             preempt_enable_notrace();
  +     ftrace_preempt_enable(resched);
        return NULL;
   }
   
@@@@ -1258,9 -1258,9 -1258,12 +1264,9 @@@@ int ring_buffer_unlock_commit(struct ri
        /*
         * Only the last preempt count needs to restore preemption.
         */
  -     if (preempt_count() == 1) {
  -             if (per_cpu(rb_need_resched, cpu))
  -                     preempt_enable_no_resched_notrace();
  -             else
  -                     preempt_enable_notrace();
  -     } else
  +     if (preempt_count() == 1)
  +             ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
  +     else
                preempt_enable_no_resched_notrace();
   
        return 0;
@@@@ -1296,7 -1296,7 -1299,8 +1302,7 @@@@ int ring_buffer_write(struct ring_buffe
        if (atomic_read(&buffer->record_disabled))
                return -EBUSY;
   
  -     resched = need_resched();
  -     preempt_disable_notrace();
  +     resched = ftrace_preempt_disable();
   
        cpu = raw_smp_processor_id();
   
   
        ret = 0;
    out:
  -     if (resched)
  -             preempt_enable_no_resched_notrace();
  -     else
  -             preempt_enable_notrace();
  +     ftrace_preempt_enable(resched);
   
        return ret;
   }
@@@@ -1481,7 -1481,7 -1488,14 +1487,7 @@@@ unsigned long ring_buffer_overruns(stru
        return overruns;
   }
   
  -/**
  - * ring_buffer_iter_reset - reset an iterator
  - * @iter: The iterator to reset
  - *
  - * Resets the iterator, so that it will start from the beginning
  - * again.
  - */
  -void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
  +static void rb_iter_reset(struct ring_buffer_iter *iter)
   {
        struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
   
                iter->read_stamp = iter->head_page->time_stamp;
   }
   
  +/**
  + * ring_buffer_iter_reset - reset an iterator
  + * @iter: The iterator to reset
  + *
  + * Resets the iterator, so that it will start from the beginning
  + * again.
  + */
  +void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
  +{
  +     struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
  +     unsigned long flags;
  +
  +     spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
  +     rb_iter_reset(iter);
  +     spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
  +}
  +
   /**
    * ring_buffer_iter_empty - check if an iterator has no more to read
    * @iter: The iterator to check
@@@@ -1599,8 -1599,8 -1596,7 +1605,8 @@@@ rb_get_reader_page(struct ring_buffer_p
        unsigned long flags;
        int nr_loops = 0;
   
  -     spin_lock_irqsave(&cpu_buffer->lock, flags);
  +     local_irq_save(flags);
  +     __raw_spin_lock(&cpu_buffer->lock);
   
    again:
        /*
         * a case where we will loop three times. There should be no
         * reason to loop four times (that I know of).
         */
  -     if (unlikely(++nr_loops > 3)) {
  -             RB_WARN_ON(cpu_buffer, 1);
  +     if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
                reader = NULL;
                goto out;
        }
                goto out;
   
        /* Never should we have an index greater than the size */
  -     RB_WARN_ON(cpu_buffer,
  -                cpu_buffer->reader_page->read > rb_page_size(reader));
  +     if (RB_WARN_ON(cpu_buffer,
  +                    cpu_buffer->reader_page->read > rb_page_size(reader)))
  +             goto out;
   
        /* check if we caught up to the tail */
        reader = NULL;
        goto again;
   
    out:
  -     spin_unlock_irqrestore(&cpu_buffer->lock, flags);
  +     __raw_spin_unlock(&cpu_buffer->lock);
  +     local_irq_restore(flags);
   
        return reader;
   }
@@@@ -1677,8 -1677,8 -1672,7 +1683,8 @@@@ static void rb_advance_reader(struct ri
        reader = rb_get_reader_page(cpu_buffer);
   
        /* This function should not be called when buffer is empty */
  -     BUG_ON(!reader);
  +     if (RB_WARN_ON(cpu_buffer, !reader))
  +             return;
   
        event = rb_reader_event(cpu_buffer);
   
@@@@ -1705,9 -1705,9 -1699,7 +1711,9 @@@@ static void rb_advance_iter(struct ring
         * Check if we are at the end of the buffer.
         */
        if (iter->head >= rb_page_size(iter->head_page)) {
  -             BUG_ON(iter->head_page == cpu_buffer->commit_page);
  +             if (RB_WARN_ON(buffer,
  +                            iter->head_page == cpu_buffer->commit_page))
  +                     return;
                rb_inc_iter(iter);
                return;
        }
         * This should not be called to advance the header if we are
         * at the tail of the buffer.
         */
  -     BUG_ON((iter->head_page == cpu_buffer->commit_page) &&
  -            (iter->head + length > rb_commit_index(cpu_buffer)));
  +     if (RB_WARN_ON(cpu_buffer,
  +                    (iter->head_page == cpu_buffer->commit_page) &&
  +                    (iter->head + length > rb_commit_index(cpu_buffer))))
  +             return;
   
        rb_update_iter_read_stamp(iter, event);
   
                rb_advance_iter(iter);
   }
   
  -/**
  - * ring_buffer_peek - peek at the next event to be read
  - * @buffer: The ring buffer to read
  - * @cpu: The cpu to peak at
  - * @ts: The timestamp counter of this event.
  - *
  - * This will return the event that will be read next, but does
  - * not consume the data.
  - */
  -struct ring_buffer_event *
  -ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
  +static struct ring_buffer_event *
  +rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
   {
        struct ring_buffer_per_cpu *cpu_buffer;
        struct ring_buffer_event *event;
         * can have.  Nesting 10 deep of interrupts is clearly
         * an anomaly.
         */
  -     if (unlikely(++nr_loops > 10)) {
  -             RB_WARN_ON(cpu_buffer, 1);
  +     if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10))
                return NULL;
  -     }
   
        reader = rb_get_reader_page(cpu_buffer);
        if (!reader)
        return NULL;
   }
   
  -/**
  - * ring_buffer_iter_peek - peek at the next event to be read
  - * @iter: The ring buffer iterator
  - * @ts: The timestamp counter of this event.
  - *
  - * This will return the event that will be read next, but does
  - * not increment the iterator.
  - */
  -struct ring_buffer_event *
  -ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
  +static struct ring_buffer_event *
  +rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
   {
        struct ring_buffer *buffer;
        struct ring_buffer_per_cpu *cpu_buffer;
         * can have. Nesting 10 deep of interrupts is clearly
         * an anomaly.
         */
  -     if (unlikely(++nr_loops > 10)) {
  -             RB_WARN_ON(cpu_buffer, 1);
  +     if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10))
                return NULL;
  -     }
   
        if (rb_per_cpu_empty(cpu_buffer))
                return NULL;
        return NULL;
   }
   
  +/**
  + * ring_buffer_peek - peek at the next event to be read
  + * @buffer: The ring buffer to read
  + * @cpu: The cpu to peak at
  + * @ts: The timestamp counter of this event.
  + *
  + * This will return the event that will be read next, but does
  + * not consume the data.
  + */
  +struct ring_buffer_event *
  +ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
  +{
  +     struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
  +     struct ring_buffer_event *event;
  +     unsigned long flags;
  +
  +     spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
  +     event = rb_buffer_peek(buffer, cpu, ts);
  +     spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
  +
  +     return event;
  +}
  +
  +/**
  + * ring_buffer_iter_peek - peek at the next event to be read
  + * @iter: The ring buffer iterator
  + * @ts: The timestamp counter of this event.
  + *
  + * This will return the event that will be read next, but does
  + * not increment the iterator.
  + */
  +struct ring_buffer_event *
  +ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
  +{
  +     struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
  +     struct ring_buffer_event *event;
  +     unsigned long flags;
  +
  +     spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
  +     event = rb_iter_peek(iter, ts);
  +     spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
  +
  +     return event;
  +}
  +
   /**
    * ring_buffer_consume - return an event and consume it
    * @buffer: The ring buffer to get the next event from
   struct ring_buffer_event *
   ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
   {
  -     struct ring_buffer_per_cpu *cpu_buffer;
  +     struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
        struct ring_buffer_event *event;
  +     unsigned long flags;
   
        if (!cpu_isset(cpu, buffer->cpumask))
                return NULL;
   
  -     event = ring_buffer_peek(buffer, cpu, ts);
  +     spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
  +
  +     event = rb_buffer_peek(buffer, cpu, ts);
        if (!event)
  -             return NULL;
  +             goto out;
   
  -     cpu_buffer = buffer->buffers[cpu];
        rb_advance_reader(cpu_buffer);
   
  + out:
  +     spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
  +
        return event;
   }
   
@@@@ -1966,11 -1966,11 -1927,9 +1972,11 @@@@ ring_buffer_read_start(struct ring_buff
        atomic_inc(&cpu_buffer->record_disabled);
        synchronize_sched();
   
  -     spin_lock_irqsave(&cpu_buffer->lock, flags);
  -     ring_buffer_iter_reset(iter);
  -     spin_unlock_irqrestore(&cpu_buffer->lock, flags);
  +     spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
  +     __raw_spin_lock(&cpu_buffer->lock);
  +     rb_iter_reset(iter);
  +     __raw_spin_unlock(&cpu_buffer->lock);
  +     spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
   
        return iter;
   }
@@@@ -2002,17 -2002,17 -1961,12 +2008,17 @@@@ struct ring_buffer_event 
   ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
   {
        struct ring_buffer_event *event;
  +     struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
  +     unsigned long flags;
   
  -     event = ring_buffer_iter_peek(iter, ts);
  +     spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
  +     event = rb_iter_peek(iter, ts);
        if (!event)
  -             return NULL;
  +             goto out;
   
        rb_advance_iter(iter);
  + out:
  +     spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
   
        return event;
   }
@@@@ -2061,15 -2061,15 -2015,11 +2067,15 @@@@ void ring_buffer_reset_cpu(struct ring_
        if (!cpu_isset(cpu, buffer->cpumask))
                return;
   
  -     spin_lock_irqsave(&cpu_buffer->lock, flags);
  +     spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
  +
  +     __raw_spin_lock(&cpu_buffer->lock);
   
        rb_reset_cpu(cpu_buffer);
   
  -     spin_unlock_irqrestore(&cpu_buffer->lock, flags);
  +     __raw_spin_unlock(&cpu_buffer->lock);
  +
  +     spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
   }
   
   /**
This page took 0.0397 seconds and 5 git commands to generate.