* Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
*
* For licensing details see kernel-base/COPYING
*/
int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */
/*
- * Mutex for (sysadmin-configurable) counter reservations:
+ * Lock for (sysadmin-configurable) counter reservations:
*/
-static DEFINE_MUTEX(perf_resource_mutex);
+static DEFINE_SPINLOCK(perf_resource_lock);
/*
* Architecture provided APIs - weak aliases:
{
struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
struct perf_counter_context *ctx = &curr->perf_counter_ctx;
- const int rotate_percpu = 0;
- if (rotate_percpu)
- perf_counter_cpu_sched_out(cpuctx);
+ perf_counter_cpu_sched_out(cpuctx);
perf_counter_task_sched_out(curr, cpu);
- if (rotate_percpu)
- rotate_ctx(&cpuctx->ctx);
+ rotate_ctx(&cpuctx->ctx);
rotate_ctx(ctx);
- if (rotate_percpu)
- perf_counter_cpu_sched_in(cpuctx, cpu);
+ perf_counter_cpu_sched_in(cpuctx, cpu);
perf_counter_task_sched_in(curr, cpu);
}
{
struct perf_counter *counter = file->private_data;
struct perf_mmap_data *data;
- unsigned int events;
+ unsigned int events = POLL_HUP;
rcu_read_lock();
data = rcu_dereference(counter->data);
if (data)
- events = atomic_xchg(&data->wakeup, 0);
- else
- events = POLL_HUP;
+ events = atomic_xchg(&data->poll, 0);
rcu_read_unlock();
poll_wait(file, &counter->waitq, wait);
return events;
}
+static void perf_counter_reset(struct perf_counter *counter)
+{
+ atomic_set(&counter->count, 0);
+}
+
static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct perf_counter *counter = file->private_data;
case PERF_COUNTER_IOC_REFRESH:
perf_counter_refresh(counter, arg);
break;
+ case PERF_COUNTER_IOC_RESET:
+ perf_counter_reset(counter);
+ break;
default:
err = -ENOTTY;
}
void perf_counter_wakeup(struct perf_counter *counter)
{
- struct perf_mmap_data *data;
-
- rcu_read_lock();
- data = rcu_dereference(counter->data);
- if (data) {
- atomic_set(&data->wakeup, POLL_IN);
- /*
- * Ensure all data writes are issued before updating the
- * user-space data head information. The matching rmb()
- * will be in userspace after reading this value.
- */
- smp_wmb();
- data->user_page->data_head = atomic_read(&data->head);
- }
- rcu_read_unlock();
-
wake_up_all(&counter->waitq);
if (counter->pending_kill) {
struct perf_mmap_data *data;
unsigned int offset;
unsigned int head;
- int wakeup;
int nmi;
int overflow;
+ int locked;
+ unsigned long flags;
};
-static inline void __perf_output_wakeup(struct perf_output_handle *handle)
+static void perf_output_wakeup(struct perf_output_handle *handle)
{
+ atomic_set(&handle->data->poll, POLL_IN);
+
if (handle->nmi) {
handle->counter->pending_wakeup = 1;
perf_pending_queue(&handle->counter->pending,
perf_counter_wakeup(handle->counter);
}
+/*
+ * Curious locking construct.
+ *
+ * We need to ensure a later event doesn't publish a head when a former
+ * event isn't done writing. However since we need to deal with NMIs we
+ * cannot fully serialize things.
+ *
+ * What we do is serialize between CPUs so we only have to deal with NMI
+ * nesting on a single CPU.
+ *
+ * We only publish the head (and generate a wakeup) when the outer-most
+ * event completes.
+ */
+static void perf_output_lock(struct perf_output_handle *handle)
+{
+ struct perf_mmap_data *data = handle->data;
+ int cpu;
+
+ handle->locked = 0;
+
+ local_irq_save(handle->flags);
+ cpu = smp_processor_id();
+
+ if (in_nmi() && atomic_read(&data->lock) == cpu)
+ return;
+
+ while (atomic_cmpxchg(&data->lock, 0, cpu) != 0)
+ cpu_relax();
+
+ handle->locked = 1;
+}
+
+static void perf_output_unlock(struct perf_output_handle *handle)
+{
+ struct perf_mmap_data *data = handle->data;
+ int head, cpu;
+
+ data->done_head = data->head;
+
+ if (!handle->locked)
+ goto out;
+
+again:
+ /*
+ * The xchg implies a full barrier that ensures all writes are done
+ * before we publish the new head, matched by a rmb() in userspace when
+ * reading this position.
+ */
+ while ((head = atomic_xchg(&data->done_head, 0)))
+ data->user_page->data_head = head;
+
+ /*
+ * NMI can happen here, which means we can miss a done_head update.
+ */
+
+ cpu = atomic_xchg(&data->lock, 0);
+ WARN_ON_ONCE(cpu != smp_processor_id());
+
+ /*
+ * Therefore we have to validate we did not indeed do so.
+ */
+ if (unlikely(atomic_read(&data->done_head))) {
+ /*
+ * Since we had it locked, we can lock it again.
+ */
+ while (atomic_cmpxchg(&data->lock, 0, cpu) != 0)
+ cpu_relax();
+
+ goto again;
+ }
+
+ if (atomic_xchg(&data->wakeup, 0))
+ perf_output_wakeup(handle);
+out:
+ local_irq_restore(handle->flags);
+}
+
static int perf_output_begin(struct perf_output_handle *handle,
struct perf_counter *counter, unsigned int size,
int nmi, int overflow)
if (!data)
goto out;
+ handle->data = data;
handle->counter = counter;
handle->nmi = nmi;
handle->overflow = overflow;
if (!data->nr_pages)
goto fail;
+ perf_output_lock(handle);
+
do {
offset = head = atomic_read(&data->head);
head += size;
} while (atomic_cmpxchg(&data->head, offset, head) != offset);
- handle->data = data;
handle->offset = offset;
handle->head = head;
- handle->wakeup = (offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT);
+
+ if ((offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT))
+ atomic_set(&data->wakeup, 1);
return 0;
fail:
- __perf_output_wakeup(handle);
+ perf_output_wakeup(handle);
out:
rcu_read_unlock();
static void perf_output_end(struct perf_output_handle *handle)
{
- int wakeup_events = handle->counter->hw_event.wakeup_events;
+ struct perf_counter *counter = handle->counter;
+ struct perf_mmap_data *data = handle->data;
+
+ int wakeup_events = counter->hw_event.wakeup_events;
if (handle->overflow && wakeup_events) {
- int events = atomic_inc_return(&handle->data->events);
+ int events = atomic_inc_return(&data->events);
if (events >= wakeup_events) {
- atomic_sub(wakeup_events, &handle->data->events);
- __perf_output_wakeup(handle);
+ atomic_sub(wakeup_events, &data->events);
+ atomic_set(&data->wakeup, 1);
}
- } else if (handle->wakeup)
- __perf_output_wakeup(handle);
+ }
+
+ perf_output_unlock(handle);
rcu_read_unlock();
}
cpuctx = &per_cpu(perf_cpu_context, cpu);
__perf_counter_init_context(&cpuctx->ctx, NULL);
- mutex_lock(&perf_resource_mutex);
+ spin_lock(&perf_resource_lock);
cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu;
- mutex_unlock(&perf_resource_mutex);
+ spin_unlock(&perf_resource_lock);
hw_perf_counter_setup(cpu);
}
.notifier_call = perf_cpu_notify,
};
-static int __init perf_counter_init(void)
+void __init perf_counter_init(void)
{
perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
(void *)(long)smp_processor_id());
register_cpu_notifier(&perf_cpu_nb);
-
- return 0;
}
-early_initcall(perf_counter_init);
static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf)
{
if (val > perf_max_counters)
return -EINVAL;
- mutex_lock(&perf_resource_mutex);
+ spin_lock(&perf_resource_lock);
perf_reserved_percpu = val;
for_each_online_cpu(cpu) {
cpuctx = &per_cpu(perf_cpu_context, cpu);
cpuctx->max_pertask = mpt;
spin_unlock_irq(&cpuctx->ctx.lock);
}
- mutex_unlock(&perf_resource_mutex);
+ spin_unlock(&perf_resource_lock);
return count;
}
if (val > 1)
return -EINVAL;
- mutex_lock(&perf_resource_mutex);
+ spin_lock(&perf_resource_lock);
perf_overcommit = val;
- mutex_unlock(&perf_resource_mutex);
+ spin_unlock(&perf_resource_lock);
return count;
}