rcu_read_unlock();
wake_up_all(&counter->waitq);
- kill_fasync(&counter->fasync, SIGIO, POLL_IN);
+
+ if (counter->pending_kill) {
+ kill_fasync(&counter->fasync, SIGIO, counter->pending_kill);
+ counter->pending_kill = 0;
+ }
}
/*
unsigned int head;
int wakeup;
int nmi;
+ int overflow;
};
static inline void __perf_output_wakeup(struct perf_output_handle *handle)
static int perf_output_begin(struct perf_output_handle *handle,
struct perf_counter *counter, unsigned int size,
- int nmi)
+ int nmi, int overflow)
{
struct perf_mmap_data *data;
unsigned int offset, head;
if (!data)
goto out;
- handle->counter = counter;
- handle->nmi = nmi;
+ handle->counter = counter;
+ handle->nmi = nmi;
+ handle->overflow = overflow;
if (!data->nr_pages)
goto fail;
{
int wakeup_events = handle->counter->hw_event.wakeup_events;
- if (wakeup_events) {
+ if (handle->overflow && wakeup_events) {
int events = atomic_inc_return(&handle->data->events);
if (events >= wakeup_events) {
atomic_sub(wakeup_events, &handle->data->events);
header.size += sizeof(u64);
}
- ret = perf_output_begin(&handle, counter, header.size, nmi);
+ ret = perf_output_begin(&handle, counter, header.size, nmi, 1);
if (ret)
return;
{
struct perf_output_handle handle;
int size = mmap_event->event.header.size;
- int ret = perf_output_begin(&handle, counter, size, 0);
+ int ret = perf_output_begin(&handle, counter, size, 0, 0);
if (ret)
return;
int events = atomic_read(&counter->event_limit);
int ret = 0;
+ counter->pending_kill = POLL_IN;
if (events && atomic_dec_and_test(&counter->event_limit)) {
ret = 1;
+ counter->pending_kill = POLL_HUP;
if (nmi) {
counter->pending_disable = 1;
perf_pending_queue(&counter->pending,