perf_counter: change event definition
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Mon, 6 Apr 2009 09:45:09 +0000 (11:45 +0200)
committerIngo Molnar <mingo@elte.hu>
Tue, 7 Apr 2009 08:48:59 +0000 (10:48 +0200)
Currently the definition of an event is slightly ambiguous. We have
wakeup events, for poll() and SIGIO, which are either generated
when a record crosses a page boundary (hw_events.wakeup_events == 0),
or every wakeup_events new records.

Now a record can be either a counter overflow record, or a number of
different things, like the mmap PROT_EXEC region notifications.

Then there is the PERF_COUNTER_IOC_REFRESH event limit, which only
considers counter overflows.

This patch changes then wakeup_events and SIGIO notification to only
consider overflow events. Furthermore it changes the SIGIO notification
to report SIGHUP when the event limit is reached and the counter will
be disabled.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
LKML-Reference: <20090406094518.266679874@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
include/linux/perf_counter.h
kernel/perf_counter.c

index 81220188d058349f698206d2fdf233f8dbf68a50..0f5a4005048f5eda7c00ac79ab28acc41f1ecb48 100644 (file)
@@ -439,6 +439,7 @@ struct perf_counter {
 
        /* delayed work for NMIs and such */
        int                             pending_wakeup;
+       int                             pending_kill;
        int                             pending_disable;
        struct perf_pending_entry       pending;
 
index c05e10354bc95f832fd05a9280949d1b45398ddf..8c8eaf0625f9a433f6e7be3fa2dabc6f1fee27e2 100644 (file)
@@ -1596,7 +1596,11 @@ void perf_counter_wakeup(struct perf_counter *counter)
        rcu_read_unlock();
 
        wake_up_all(&counter->waitq);
-       kill_fasync(&counter->fasync, SIGIO, POLL_IN);
+
+       if (counter->pending_kill) {
+               kill_fasync(&counter->fasync, SIGIO, counter->pending_kill);
+               counter->pending_kill = 0;
+       }
 }
 
 /*
@@ -1727,6 +1731,7 @@ struct perf_output_handle {
        unsigned int            head;
        int                     wakeup;
        int                     nmi;
+       int                     overflow;
 };
 
 static inline void __perf_output_wakeup(struct perf_output_handle *handle)
@@ -1741,7 +1746,7 @@ static inline void __perf_output_wakeup(struct perf_output_handle *handle)
 
 static int perf_output_begin(struct perf_output_handle *handle,
                             struct perf_counter *counter, unsigned int size,
-                            int nmi)
+                            int nmi, int overflow)
 {
        struct perf_mmap_data *data;
        unsigned int offset, head;
@@ -1751,8 +1756,9 @@ static int perf_output_begin(struct perf_output_handle *handle,
        if (!data)
                goto out;
 
-       handle->counter = counter;
-       handle->nmi     = nmi;
+       handle->counter  = counter;
+       handle->nmi      = nmi;
+       handle->overflow = overflow;
 
        if (!data->nr_pages)
                goto fail;
@@ -1816,7 +1822,7 @@ static void perf_output_end(struct perf_output_handle *handle)
 {
        int wakeup_events = handle->counter->hw_event.wakeup_events;
 
-       if (wakeup_events) {
+       if (handle->overflow && wakeup_events) {
                int events = atomic_inc_return(&handle->data->events);
                if (events >= wakeup_events) {
                        atomic_sub(wakeup_events, &handle->data->events);
@@ -1891,7 +1897,7 @@ static void perf_counter_output(struct perf_counter *counter,
                header.size += sizeof(u64);
        }
 
-       ret = perf_output_begin(&handle, counter, header.size, nmi);
+       ret = perf_output_begin(&handle, counter, header.size, nmi, 1);
        if (ret)
                return;
 
@@ -1955,7 +1961,7 @@ static void perf_counter_mmap_output(struct perf_counter *counter,
 {
        struct perf_output_handle handle;
        int size = mmap_event->event.header.size;
-       int ret = perf_output_begin(&handle, counter, size, 0);
+       int ret = perf_output_begin(&handle, counter, size, 0, 0);
 
        if (ret)
                return;
@@ -2084,8 +2090,10 @@ int perf_counter_overflow(struct perf_counter *counter,
        int events = atomic_read(&counter->event_limit);
        int ret = 0;
 
+       counter->pending_kill = POLL_IN;
        if (events && atomic_dec_and_test(&counter->event_limit)) {
                ret = 1;
+               counter->pending_kill = POLL_HUP;
                if (nmi) {
                        counter->pending_disable = 1;
                        perf_pending_queue(&counter->pending,
This page took 0.037364 seconds and 5 git commands to generate.