perf_counter: add ioctl(PERF_COUNTER_IOC_RESET)
[deliverable/linux.git] / kernel / perf_counter.c
index 76376ecb23b5ebe9a45eff99ba84d6006291373d..6e6834e0587e01238cc950015d635b632038e911 100644 (file)
@@ -1,9 +1,10 @@
 /*
  * Performance counter core code
  *
- *  Copyright(C) 2008 Thomas Gleixner <tglx@linutronix.de>
- *  Copyright(C) 2008 Red Hat, Inc., Ingo Molnar
- *
+ *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
+ *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
+ *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ *  Copyright  ©  2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
  *
  *  For licensing details see kernel-base/COPYING
  */
@@ -42,16 +43,17 @@ static atomic_t nr_mmap_tracking __read_mostly;
 static atomic_t nr_munmap_tracking __read_mostly;
 static atomic_t nr_comm_tracking __read_mostly;
 
+int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */
+
 /*
- * Mutex for (sysadmin-configurable) counter reservations:
+ * Lock for (sysadmin-configurable) counter reservations:
  */
-static DEFINE_MUTEX(perf_resource_mutex);
+static DEFINE_SPINLOCK(perf_resource_lock);
 
 /*
  * Architecture provided APIs - weak aliases:
  */
-extern __weak const struct hw_perf_counter_ops *
-hw_perf_counter_init(struct perf_counter *counter)
+extern __weak const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
 {
        return NULL;
 }
@@ -122,7 +124,7 @@ counter_sched_out(struct perf_counter *counter,
 
        counter->state = PERF_COUNTER_STATE_INACTIVE;
        counter->tstamp_stopped = ctx->time;
-       counter->hw_ops->disable(counter);
+       counter->pmu->disable(counter);
        counter->oncpu = -1;
 
        if (!is_software_counter(counter))
@@ -415,7 +417,7 @@ counter_sched_in(struct perf_counter *counter,
         */
        smp_wmb();
 
-       if (counter->hw_ops->enable(counter)) {
+       if (counter->pmu->enable(counter)) {
                counter->state = PERF_COUNTER_STATE_INACTIVE;
                counter->oncpu = -1;
                return -EAGAIN;
@@ -1067,18 +1069,14 @@ void perf_counter_task_tick(struct task_struct *curr, int cpu)
 {
        struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
        struct perf_counter_context *ctx = &curr->perf_counter_ctx;
-       const int rotate_percpu = 0;
 
-       if (rotate_percpu)
-               perf_counter_cpu_sched_out(cpuctx);
+       perf_counter_cpu_sched_out(cpuctx);
        perf_counter_task_sched_out(curr, cpu);
 
-       if (rotate_percpu)
-               rotate_ctx(&cpuctx->ctx);
+       rotate_ctx(&cpuctx->ctx);
        rotate_ctx(ctx);
 
-       if (rotate_percpu)
-               perf_counter_cpu_sched_in(cpuctx, cpu);
+       perf_counter_cpu_sched_in(cpuctx, cpu);
        perf_counter_task_sched_in(curr, cpu);
 }
 
@@ -1094,7 +1092,7 @@ static void __read(void *info)
        local_irq_save(flags);
        if (ctx->is_active)
                update_context_time(ctx);
-       counter->hw_ops->read(counter);
+       counter->pmu->read(counter);
        update_counter_times(counter);
        local_irq_restore(flags);
 }
@@ -1132,7 +1130,7 @@ static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
         */
        if (cpu != -1) {
                /* Must be root to operate on a CPU counter: */
-               if (!capable(CAP_SYS_ADMIN))
+               if (sysctl_perf_counter_priv && !capable(CAP_SYS_ADMIN))
                        return ERR_PTR(-EACCES);
 
                if (cpu < 0 || cpu > num_possible_cpus())
@@ -1277,14 +1275,12 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
 {
        struct perf_counter *counter = file->private_data;
        struct perf_mmap_data *data;
-       unsigned int events;
+       unsigned int events = POLL_HUP;
 
        rcu_read_lock();
        data = rcu_dereference(counter->data);
        if (data)
-               events = atomic_xchg(&data->wakeup, 0);
-       else
-               events = POLL_HUP;
+               events = atomic_xchg(&data->poll, 0);
        rcu_read_unlock();
 
        poll_wait(file, &counter->waitq, wait);
@@ -1292,6 +1288,11 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
        return events;
 }
 
+static void perf_counter_reset(struct perf_counter *counter)
+{
+       atomic_set(&counter->count, 0);
+}
+
 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
        struct perf_counter *counter = file->private_data;
@@ -1307,6 +1308,9 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        case PERF_COUNTER_IOC_REFRESH:
                perf_counter_refresh(counter, arg);
                break;
+       case PERF_COUNTER_IOC_RESET:
+               perf_counter_reset(counter);
+               break;
        default:
                err = -ENOTTY;
        }
@@ -1566,22 +1570,6 @@ static const struct file_operations perf_fops = {
 
 void perf_counter_wakeup(struct perf_counter *counter)
 {
-       struct perf_mmap_data *data;
-
-       rcu_read_lock();
-       data = rcu_dereference(counter->data);
-       if (data) {
-               atomic_set(&data->wakeup, POLL_IN);
-               /*
-                * Ensure all data writes are issued before updating the
-                * user-space data head information. The matching rmb()
-                * will be in userspace after reading this value.
-                */
-               smp_wmb();
-               data->user_page->data_head = atomic_read(&data->head);
-       }
-       rcu_read_unlock();
-
        wake_up_all(&counter->waitq);
 
        if (counter->pending_kill) {
@@ -1716,13 +1704,16 @@ struct perf_output_handle {
        struct perf_mmap_data   *data;
        unsigned int            offset;
        unsigned int            head;
-       int                     wakeup;
        int                     nmi;
        int                     overflow;
+       int                     locked;
+       unsigned long           flags;
 };
 
-static inline void __perf_output_wakeup(struct perf_output_handle *handle)
+static void perf_output_wakeup(struct perf_output_handle *handle)
 {
+       atomic_set(&handle->data->poll, POLL_IN);
+
        if (handle->nmi) {
                handle->counter->pending_wakeup = 1;
                perf_pending_queue(&handle->counter->pending,
@@ -1731,6 +1722,83 @@ static inline void __perf_output_wakeup(struct perf_output_handle *handle)
                perf_counter_wakeup(handle->counter);
 }
 
+/*
+ * Curious locking construct.
+ *
+ * We need to ensure a later event doesn't publish a head when a former
+ * event isn't done writing. However since we need to deal with NMIs we
+ * cannot fully serialize things.
+ *
+ * What we do is serialize between CPUs so we only have to deal with NMI
+ * nesting on a single CPU.
+ *
+ * We only publish the head (and generate a wakeup) when the outer-most
+ * event completes.
+ */
+static void perf_output_lock(struct perf_output_handle *handle)
+{
+       struct perf_mmap_data *data = handle->data;
+       int cpu;
+
+       handle->locked = 0;
+
+       local_irq_save(handle->flags);
+       cpu = smp_processor_id();
+
+       if (in_nmi() && atomic_read(&data->lock) == cpu)
+               return;
+
+       while (atomic_cmpxchg(&data->lock, 0, cpu) != 0)
+               cpu_relax();
+
+       handle->locked = 1;
+}
+
+static void perf_output_unlock(struct perf_output_handle *handle)
+{
+       struct perf_mmap_data *data = handle->data;
+       int head, cpu;
+
+       data->done_head = data->head;
+
+       if (!handle->locked)
+               goto out;
+
+again:
+       /*
+        * The xchg implies a full barrier that ensures all writes are done
+        * before we publish the new head, matched by a rmb() in userspace when
+        * reading this position.
+        */
+       while ((head = atomic_xchg(&data->done_head, 0)))
+               data->user_page->data_head = head;
+
+       /*
+        * NMI can happen here, which means we can miss a done_head update.
+        */
+
+       cpu = atomic_xchg(&data->lock, 0);
+       WARN_ON_ONCE(cpu != smp_processor_id());
+
+       /*
+        * Therefore we have to validate we did not indeed do so.
+        */
+       if (unlikely(atomic_read(&data->done_head))) {
+               /*
+                * Since we had it locked, we can lock it again.
+                */
+               while (atomic_cmpxchg(&data->lock, 0, cpu) != 0)
+                       cpu_relax();
+
+               goto again;
+       }
+
+       if (atomic_xchg(&data->wakeup, 0))
+               perf_output_wakeup(handle);
+out:
+       local_irq_restore(handle->flags);
+}
+
 static int perf_output_begin(struct perf_output_handle *handle,
                             struct perf_counter *counter, unsigned int size,
                             int nmi, int overflow)
@@ -1743,6 +1811,7 @@ static int perf_output_begin(struct perf_output_handle *handle,
        if (!data)
                goto out;
 
+       handle->data     = data;
        handle->counter  = counter;
        handle->nmi      = nmi;
        handle->overflow = overflow;
@@ -1750,20 +1819,23 @@ static int perf_output_begin(struct perf_output_handle *handle,
        if (!data->nr_pages)
                goto fail;
 
+       perf_output_lock(handle);
+
        do {
                offset = head = atomic_read(&data->head);
                head += size;
        } while (atomic_cmpxchg(&data->head, offset, head) != offset);
 
-       handle->data    = data;
        handle->offset  = offset;
        handle->head    = head;
-       handle->wakeup  = (offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT);
+
+       if ((offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT))
+               atomic_set(&data->wakeup, 1);
 
        return 0;
 
 fail:
-       __perf_output_wakeup(handle);
+       perf_output_wakeup(handle);
 out:
        rcu_read_unlock();
 
@@ -1807,16 +1879,20 @@ static void perf_output_copy(struct perf_output_handle *handle,
 
 static void perf_output_end(struct perf_output_handle *handle)
 {
-       int wakeup_events = handle->counter->hw_event.wakeup_events;
+       struct perf_counter *counter = handle->counter;
+       struct perf_mmap_data *data = handle->data;
+
+       int wakeup_events = counter->hw_event.wakeup_events;
 
        if (handle->overflow && wakeup_events) {
-               int events = atomic_inc_return(&handle->data->events);
+               int events = atomic_inc_return(&data->events);
                if (events >= wakeup_events) {
-                       atomic_sub(wakeup_events, &handle->data->events);
-                       __perf_output_wakeup(handle);
+                       atomic_sub(wakeup_events, &data->events);
+                       atomic_set(&data->wakeup, 1);
                }
-       } else if (handle->wakeup)
-               __perf_output_wakeup(handle);
+       }
+
+       perf_output_unlock(handle);
        rcu_read_unlock();
 }
 
@@ -1920,7 +1996,7 @@ static void perf_counter_output(struct perf_counter *counter,
                leader = counter->group_leader;
                list_for_each_entry(sub, &leader->sibling_list, list_entry) {
                        if (sub != counter)
-                               sub->hw_ops->read(sub);
+                               sub->pmu->read(sub);
 
                        group_entry.event = sub->hw_event.config;
                        group_entry.counter = atomic64_read(&sub->count);
@@ -2114,7 +2190,7 @@ static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event)
                        name = strncpy(tmp, "//enomem", sizeof(tmp));
                        goto got_name;
                }
-               name = dentry_path(file->f_dentry, buf, PATH_MAX);
+               name = d_path(&file->f_path, buf, PATH_MAX);
                if (IS_ERR(name)) {
                        name = strncpy(tmp, "//toolong", sizeof(tmp));
                        goto got_name;
@@ -2262,7 +2338,7 @@ static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
        struct pt_regs *regs;
 
        counter = container_of(hrtimer, struct perf_counter, hw.hrtimer);
-       counter->hw_ops->read(counter);
+       counter->pmu->read(counter);
 
        regs = get_irq_regs();
        /*
@@ -2408,7 +2484,7 @@ static void perf_swcounter_disable(struct perf_counter *counter)
        perf_swcounter_update(counter);
 }
 
-static const struct hw_perf_counter_ops perf_ops_generic = {
+static const struct pmu perf_ops_generic = {
        .enable         = perf_swcounter_enable,
        .disable        = perf_swcounter_disable,
        .read           = perf_swcounter_read,
@@ -2458,7 +2534,7 @@ static void cpu_clock_perf_counter_read(struct perf_counter *counter)
        cpu_clock_perf_counter_update(counter);
 }
 
-static const struct hw_perf_counter_ops perf_ops_cpu_clock = {
+static const struct pmu perf_ops_cpu_clock = {
        .enable         = cpu_clock_perf_counter_enable,
        .disable        = cpu_clock_perf_counter_disable,
        .read           = cpu_clock_perf_counter_read,
@@ -2520,7 +2596,7 @@ static void task_clock_perf_counter_read(struct perf_counter *counter)
        task_clock_perf_counter_update(counter, time);
 }
 
-static const struct hw_perf_counter_ops perf_ops_task_clock = {
+static const struct pmu perf_ops_task_clock = {
        .enable         = task_clock_perf_counter_enable,
        .disable        = task_clock_perf_counter_disable,
        .read           = task_clock_perf_counter_read,
@@ -2572,7 +2648,7 @@ static void cpu_migrations_perf_counter_disable(struct perf_counter *counter)
        cpu_migrations_perf_counter_update(counter);
 }
 
-static const struct hw_perf_counter_ops perf_ops_cpu_migrations = {
+static const struct pmu perf_ops_cpu_migrations = {
        .enable         = cpu_migrations_perf_counter_enable,
        .disable        = cpu_migrations_perf_counter_disable,
        .read           = cpu_migrations_perf_counter_read,
@@ -2588,6 +2664,7 @@ void perf_tpcounter_event(int event_id)
 
        __perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, regs, 0);
 }
+EXPORT_SYMBOL_GPL(perf_tpcounter_event);
 
 extern int ftrace_profile_enable(int);
 extern void ftrace_profile_disable(int);
@@ -2597,8 +2674,7 @@ static void tp_perf_counter_destroy(struct perf_counter *counter)
        ftrace_profile_disable(perf_event_id(&counter->hw_event));
 }
 
-static const struct hw_perf_counter_ops *
-tp_perf_counter_init(struct perf_counter *counter)
+static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
 {
        int event_id = perf_event_id(&counter->hw_event);
        int ret;
@@ -2613,18 +2689,16 @@ tp_perf_counter_init(struct perf_counter *counter)
        return &perf_ops_generic;
 }
 #else
-static const struct hw_perf_counter_ops *
-tp_perf_counter_init(struct perf_counter *counter)
+static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
 {
        return NULL;
 }
 #endif
 
-static const struct hw_perf_counter_ops *
-sw_perf_counter_init(struct perf_counter *counter)
+static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
 {
        struct perf_counter_hw_event *hw_event = &counter->hw_event;
-       const struct hw_perf_counter_ops *hw_ops = NULL;
+       const struct pmu *pmu = NULL;
        struct hw_perf_counter *hwc = &counter->hw;
 
        /*
@@ -2636,7 +2710,7 @@ sw_perf_counter_init(struct perf_counter *counter)
         */
        switch (perf_event_id(&counter->hw_event)) {
        case PERF_COUNT_CPU_CLOCK:
-               hw_ops = &perf_ops_cpu_clock;
+               pmu = &perf_ops_cpu_clock;
 
                if (hw_event->irq_period && hw_event->irq_period < 10000)
                        hw_event->irq_period = 10000;
@@ -2647,9 +2721,9 @@ sw_perf_counter_init(struct perf_counter *counter)
                 * use the cpu_clock counter instead.
                 */
                if (counter->ctx->task)
-                       hw_ops = &perf_ops_task_clock;
+                       pmu = &perf_ops_task_clock;
                else
-                       hw_ops = &perf_ops_cpu_clock;
+                       pmu = &perf_ops_cpu_clock;
 
                if (hw_event->irq_period && hw_event->irq_period < 10000)
                        hw_event->irq_period = 10000;
@@ -2658,18 +2732,18 @@ sw_perf_counter_init(struct perf_counter *counter)
        case PERF_COUNT_PAGE_FAULTS_MIN:
        case PERF_COUNT_PAGE_FAULTS_MAJ:
        case PERF_COUNT_CONTEXT_SWITCHES:
-               hw_ops = &perf_ops_generic;
+               pmu = &perf_ops_generic;
                break;
        case PERF_COUNT_CPU_MIGRATIONS:
                if (!counter->hw_event.exclude_kernel)
-                       hw_ops = &perf_ops_cpu_migrations;
+                       pmu = &perf_ops_cpu_migrations;
                break;
        }
 
-       if (hw_ops)
+       if (pmu)
                hwc->irq_period = hw_event->irq_period;
 
-       return hw_ops;
+       return pmu;
 }
 
 /*
@@ -2682,7 +2756,7 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event,
                   struct perf_counter *group_leader,
                   gfp_t gfpflags)
 {
-       const struct hw_perf_counter_ops *hw_ops;
+       const struct pmu *pmu;
        struct perf_counter *counter;
        long err;
 
@@ -2710,46 +2784,46 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event,
        counter->cpu                    = cpu;
        counter->hw_event               = *hw_event;
        counter->group_leader           = group_leader;
-       counter->hw_ops                 = NULL;
+       counter->pmu                    = NULL;
        counter->ctx                    = ctx;
 
        counter->state = PERF_COUNTER_STATE_INACTIVE;
        if (hw_event->disabled)
                counter->state = PERF_COUNTER_STATE_OFF;
 
-       hw_ops = NULL;
+       pmu = NULL;
 
        if (perf_event_raw(hw_event)) {
-               hw_ops = hw_perf_counter_init(counter);
+               pmu = hw_perf_counter_init(counter);
                goto done;
        }
 
        switch (perf_event_type(hw_event)) {
        case PERF_TYPE_HARDWARE:
-               hw_ops = hw_perf_counter_init(counter);
+               pmu = hw_perf_counter_init(counter);
                break;
 
        case PERF_TYPE_SOFTWARE:
-               hw_ops = sw_perf_counter_init(counter);
+               pmu = sw_perf_counter_init(counter);
                break;
 
        case PERF_TYPE_TRACEPOINT:
-               hw_ops = tp_perf_counter_init(counter);
+               pmu = tp_perf_counter_init(counter);
                break;
        }
 done:
        err = 0;
-       if (!hw_ops)
+       if (!pmu)
                err = -EINVAL;
-       else if (IS_ERR(hw_ops))
-               err = PTR_ERR(hw_ops);
+       else if (IS_ERR(pmu))
+               err = PTR_ERR(pmu);
 
        if (err) {
                kfree(counter);
                return ERR_PTR(err);
        }
 
-       counter->hw_ops = hw_ops;
+       counter->pmu = pmu;
 
        if (counter->hw_event.mmap)
                atomic_inc(&nr_mmap_tracking);
@@ -3139,9 +3213,9 @@ static void __cpuinit perf_counter_init_cpu(int cpu)
        cpuctx = &per_cpu(perf_cpu_context, cpu);
        __perf_counter_init_context(&cpuctx->ctx, NULL);
 
-       mutex_lock(&perf_resource_mutex);
+       spin_lock(&perf_resource_lock);
        cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu;
-       mutex_unlock(&perf_resource_mutex);
+       spin_unlock(&perf_resource_lock);
 
        hw_perf_counter_setup(cpu);
 }
@@ -3197,15 +3271,12 @@ static struct notifier_block __cpuinitdata perf_cpu_nb = {
        .notifier_call          = perf_cpu_notify,
 };
 
-static int __init perf_counter_init(void)
+void __init perf_counter_init(void)
 {
        perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
                        (void *)(long)smp_processor_id());
        register_cpu_notifier(&perf_cpu_nb);
-
-       return 0;
 }
-early_initcall(perf_counter_init);
 
 static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf)
 {
@@ -3227,7 +3298,7 @@ perf_set_reserve_percpu(struct sysdev_class *class,
        if (val > perf_max_counters)
                return -EINVAL;
 
-       mutex_lock(&perf_resource_mutex);
+       spin_lock(&perf_resource_lock);
        perf_reserved_percpu = val;
        for_each_online_cpu(cpu) {
                cpuctx = &per_cpu(perf_cpu_context, cpu);
@@ -3237,7 +3308,7 @@ perf_set_reserve_percpu(struct sysdev_class *class,
                cpuctx->max_pertask = mpt;
                spin_unlock_irq(&cpuctx->ctx.lock);
        }
-       mutex_unlock(&perf_resource_mutex);
+       spin_unlock(&perf_resource_lock);
 
        return count;
 }
@@ -3259,9 +3330,9 @@ perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count)
        if (val > 1)
                return -EINVAL;
 
-       mutex_lock(&perf_resource_mutex);
+       spin_lock(&perf_resource_lock);
        perf_overcommit = val;
-       mutex_unlock(&perf_resource_mutex);
+       spin_unlock(&perf_resource_lock);
 
        return count;
 }
This page took 0.034459 seconds and 5 git commands to generate.