perf counters: consolidate hw_perf save/restore APIs
[deliverable/linux.git] / kernel / perf_counter.c
index 20508f053658f86ce2dc818d975b36a0d2edf9b1..0e93fea17120c0a2b98da8dca8be4eba9c9ee7dc 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/fs.h>
 #include <linux/cpu.h>
 #include <linux/smp.h>
+#include <linux/file.h>
 #include <linux/poll.h>
 #include <linux/sysfs.h>
 #include <linux/ptrace.h>
@@ -36,18 +37,15 @@ static DEFINE_MUTEX(perf_resource_mutex);
 /*
  * Architecture provided APIs - weak aliases:
  */
-
-int __weak hw_perf_counter_init(struct perf_counter *counter, u32 hw_event_type)
+extern __weak const struct hw_perf_counter_ops *
+hw_perf_counter_init(struct perf_counter *counter)
 {
-       return -EINVAL;
+       return ERR_PTR(-EINVAL);
 }
 
-void __weak hw_perf_counter_enable(struct perf_counter *counter)        { }
-void __weak hw_perf_counter_disable(struct perf_counter *counter)       { }
-void __weak hw_perf_counter_read(struct perf_counter *counter)          { }
-void __weak hw_perf_disable_all(void) { }
-void __weak hw_perf_enable_all(void) { }
-void __weak hw_perf_counter_setup(void) { }
+u64 __weak hw_perf_save_disable(void)          { return 0; }
+void __weak hw_perf_restore(u64 ctrl)  { }
+void __weak hw_perf_counter_setup(void)                { }
 
 #if BITS_PER_LONG == 64
 
@@ -55,18 +53,28 @@ void __weak hw_perf_counter_setup(void) { }
  * Read the cached counter in counter safe against cross CPU / NMI
  * modifications. 64 bit version - no complications.
  */
-static inline u64 perf_read_counter_safe(struct perf_counter *counter)
+static inline u64 perf_counter_read_safe(struct perf_counter *counter)
 {
        return (u64) atomic64_read(&counter->count);
 }
 
+void atomic64_counter_set(struct perf_counter *counter, u64 val)
+{
+       atomic64_set(&counter->count, val);
+}
+
+u64 atomic64_counter_read(struct perf_counter *counter)
+{
+       return atomic64_read(&counter->count);
+}
+
 #else
 
 /*
  * Read the cached counter in counter safe against cross CPU / NMI
  * modifications. 32 bit version.
  */
-static u64 perf_read_counter_safe(struct perf_counter *counter)
+static u64 perf_counter_read_safe(struct perf_counter *counter)
 {
        u32 cntl, cnth;
 
@@ -81,19 +89,73 @@ static u64 perf_read_counter_safe(struct perf_counter *counter)
        return cntl | ((u64) cnth) << 32;
 }
 
+void atomic64_counter_set(struct perf_counter *counter, u64 val64)
+{
+       u32 *val32 = (void *)&val64;
+
+       atomic_set(counter->count32 + 0, *(val32 + 0));
+       atomic_set(counter->count32 + 1, *(val32 + 1));
+}
+
+u64 atomic64_counter_read(struct perf_counter *counter)
+{
+       return atomic_read(counter->count32 + 0) |
+               (u64) atomic_read(counter->count32 + 1) << 32;
+}
+
 #endif
 
+static void
+list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
+{
+       struct perf_counter *group_leader = counter->group_leader;
+
+       /*
+        * Depending on whether it is a standalone or sibling counter,
+        * add it straight to the context's counter list, or to the group
+        * leader's sibling list:
+        */
+       if (counter->group_leader == counter)
+               list_add_tail(&counter->list_entry, &ctx->counter_list);
+       else
+               list_add_tail(&counter->list_entry, &group_leader->sibling_list);
+}
+
+static void
+list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
+{
+       struct perf_counter *sibling, *tmp;
+
+       list_del_init(&counter->list_entry);
+
+       /*
+        * If this was a group counter with sibling counters then
+        * upgrade the siblings to singleton counters by adding them
+        * to the context list directly:
+        */
+       list_for_each_entry_safe(sibling, tmp,
+                                &counter->sibling_list, list_entry) {
+
+               list_del_init(&sibling->list_entry);
+               list_add_tail(&sibling->list_entry, &ctx->counter_list);
+               WARN_ON_ONCE(!sibling->group_leader);
+               WARN_ON_ONCE(sibling->group_leader == sibling);
+               sibling->group_leader = sibling;
+       }
+}
+
 /*
  * Cross CPU call to remove a performance counter
  *
  * We disable the counter on the hardware level first. After that we
  * remove it from the context list.
  */
-static void __perf_remove_from_context(void *info)
+static void __perf_counter_remove_from_context(void *info)
 {
        struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
        struct perf_counter *counter = info;
        struct perf_counter_context *ctx = counter->ctx;
+       u64 perf_flags;
 
        /*
         * If this is a task context, we need to check whether it is
@@ -106,7 +168,7 @@ static void __perf_remove_from_context(void *info)
        spin_lock(&ctx->lock);
 
        if (counter->active) {
-               hw_perf_counter_disable(counter);
+               counter->hw_ops->hw_perf_counter_disable(counter);
                counter->active = 0;
                ctx->nr_active--;
                cpuctx->active_oncpu--;
@@ -118,9 +180,9 @@ static void __perf_remove_from_context(void *info)
         * Protect the list operation against NMI by disabling the
         * counters on a global level. NOP for non NMI based counters.
         */
-       hw_perf_disable_all();
-       list_del_init(&counter->list);
-       hw_perf_enable_all();
+       perf_flags = hw_perf_save_disable();
+       list_del_counter(counter, ctx);
+       hw_perf_restore(perf_flags);
 
        if (!ctx->task) {
                /*
@@ -144,7 +206,7 @@ static void __perf_remove_from_context(void *info)
  * CPU counters are removed with a smp call. For task counters we only
  * call when the task is on a CPU.
  */
-static void perf_remove_from_context(struct perf_counter *counter)
+static void perf_counter_remove_from_context(struct perf_counter *counter)
 {
        struct perf_counter_context *ctx = counter->ctx;
        struct task_struct *task = ctx->task;
@@ -155,32 +217,32 @@ static void perf_remove_from_context(struct perf_counter *counter)
                 * the removal is always sucessful.
                 */
                smp_call_function_single(counter->cpu,
-                                        __perf_remove_from_context,
+                                        __perf_counter_remove_from_context,
                                         counter, 1);
                return;
        }
 
 retry:
-       task_oncpu_function_call(task, __perf_remove_from_context,
+       task_oncpu_function_call(task, __perf_counter_remove_from_context,
                                 counter);
 
        spin_lock_irq(&ctx->lock);
        /*
         * If the context is active we need to retry the smp call.
         */
-       if (ctx->nr_active && !list_empty(&counter->list)) {
+       if (ctx->nr_active && !list_empty(&counter->list_entry)) {
                spin_unlock_irq(&ctx->lock);
                goto retry;
        }
 
        /*
         * The lock prevents that this context is scheduled in so we
-        * can remove the counter safely, if it the call above did not
+        * can remove the counter safely, if the call above did not
         * succeed.
         */
-       if (!list_empty(&counter->list)) {
+       if (!list_empty(&counter->list_entry)) {
                ctx->nr_counters--;
-               list_del_init(&counter->list);
+               list_del_counter(counter, ctx);
                counter->task = NULL;
        }
        spin_unlock_irq(&ctx->lock);
@@ -195,6 +257,7 @@ static void __perf_install_in_context(void *info)
        struct perf_counter *counter = info;
        struct perf_counter_context *ctx = counter->ctx;
        int cpu = smp_processor_id();
+       u64 perf_flags;
 
        /*
         * If this is a task context, we need to check whether it is
@@ -210,14 +273,14 @@ static void __perf_install_in_context(void *info)
         * Protect the list operation against NMI by disabling the
         * counters on a global level. NOP for non NMI based counters.
         */
-       hw_perf_disable_all();
-       list_add_tail(&counter->list, &ctx->counters);
-       hw_perf_enable_all();
+       perf_flags = hw_perf_save_disable();
+       list_add_counter(counter, ctx);
+       hw_perf_restore(perf_flags);
 
        ctx->nr_counters++;
 
        if (cpuctx->active_oncpu < perf_max_counters) {
-               hw_perf_counter_enable(counter);
+               counter->hw_ops->hw_perf_counter_enable(counter);
                counter->active = 1;
                counter->oncpu = cpu;
                ctx->nr_active++;
@@ -268,7 +331,7 @@ retry:
         * If the context is active and the counter has not been added
         * we need to retry the smp call.
         */
-       if (ctx->nr_active && list_empty(&counter->list)) {
+       if (ctx->nr_active && list_empty(&counter->list_entry)) {
                spin_unlock_irq(&ctx->lock);
                goto retry;
        }
@@ -278,13 +341,45 @@ retry:
         * can add the counter safely, if it the call above did not
         * succeed.
         */
-       if (list_empty(&counter->list)) {
-               list_add_tail(&counter->list, &ctx->counters);
+       if (list_empty(&counter->list_entry)) {
+               list_add_counter(counter, ctx);
                ctx->nr_counters++;
        }
        spin_unlock_irq(&ctx->lock);
 }
 
+static void
+counter_sched_out(struct perf_counter *counter,
+                 struct perf_cpu_context *cpuctx,
+                 struct perf_counter_context *ctx)
+{
+       if (!counter->active)
+               return;
+
+       counter->hw_ops->hw_perf_counter_disable(counter);
+       counter->active =  0;
+       counter->oncpu  = -1;
+
+       cpuctx->active_oncpu--;
+       ctx->nr_active--;
+}
+
+static void
+group_sched_out(struct perf_counter *group_counter,
+               struct perf_cpu_context *cpuctx,
+               struct perf_counter_context *ctx)
+{
+       struct perf_counter *counter;
+
+       counter_sched_out(group_counter, cpuctx, ctx);
+
+       /*
+        * Schedule out siblings (if any):
+        */
+       list_for_each_entry(counter, &group_counter->sibling_list, list_entry)
+               counter_sched_out(counter, cpuctx, ctx);
+}
+
 /*
  * Called from scheduler to remove the counters of the current task,
  * with interrupts disabled.
@@ -306,21 +401,45 @@ void perf_counter_task_sched_out(struct task_struct *task, int cpu)
                return;
 
        spin_lock(&ctx->lock);
-       list_for_each_entry(counter, &ctx->counters, list) {
-               if (!ctx->nr_active)
-                       break;
-               if (counter->active) {
-                       hw_perf_counter_disable(counter);
-                       counter->active = 0;
-                       counter->oncpu = -1;
-                       ctx->nr_active--;
-                       cpuctx->active_oncpu--;
-               }
+       if (ctx->nr_active) {
+               list_for_each_entry(counter, &ctx->counter_list, list_entry)
+                       group_sched_out(counter, cpuctx, ctx);
        }
        spin_unlock(&ctx->lock);
        cpuctx->task_ctx = NULL;
 }
 
+static void
+counter_sched_in(struct perf_counter *counter,
+                struct perf_cpu_context *cpuctx,
+                struct perf_counter_context *ctx,
+                int cpu)
+{
+       counter->hw_ops->hw_perf_counter_enable(counter);
+       counter->active = 1;
+       counter->oncpu = cpu;   /* TODO: put 'cpu' into cpuctx->cpu */
+
+       cpuctx->active_oncpu++;
+       ctx->nr_active++;
+}
+
+static void
+group_sched_in(struct perf_counter *group_counter,
+              struct perf_cpu_context *cpuctx,
+              struct perf_counter_context *ctx,
+              int cpu)
+{
+       struct perf_counter *counter;
+
+       counter_sched_in(group_counter, cpuctx, ctx, cpu);
+
+       /*
+        * Schedule in siblings as one group (if any):
+        */
+       list_for_each_entry(counter, &group_counter->sibling_list, list_entry)
+               counter_sched_in(counter, cpuctx, ctx, cpu);
+}
+
 /*
  * Called from scheduler to add the counters of the current task
  * with interrupts disabled.
@@ -342,19 +461,21 @@ void perf_counter_task_sched_in(struct task_struct *task, int cpu)
                return;
 
        spin_lock(&ctx->lock);
-       list_for_each_entry(counter, &ctx->counters, list) {
+       list_for_each_entry(counter, &ctx->counter_list, list_entry) {
                if (ctx->nr_active == cpuctx->max_pertask)
                        break;
+
+               /*
+                * Listen to the 'cpu' scheduling filter constraint
+                * of counters:
+                */
                if (counter->cpu != -1 && counter->cpu != cpu)
                        continue;
 
-               hw_perf_counter_enable(counter);
-               counter->active = 1;
-               counter->oncpu = cpu;
-               ctx->nr_active++;
-               cpuctx->active_oncpu++;
+               group_sched_in(counter, cpuctx, ctx, cpu);
        }
        spin_unlock(&ctx->lock);
+
        cpuctx->task_ctx = ctx;
 }
 
@@ -362,6 +483,7 @@ void perf_counter_task_tick(struct task_struct *curr, int cpu)
 {
        struct perf_counter_context *ctx = &curr->perf_counter_ctx;
        struct perf_counter *counter;
+       u64 perf_flags;
 
        if (likely(!ctx->nr_counters))
                return;
@@ -371,32 +493,39 @@ void perf_counter_task_tick(struct task_struct *curr, int cpu)
        spin_lock(&ctx->lock);
 
        /*
-        * Rotate the first entry last:
+        * Rotate the first entry last (works just fine for group counters too):
         */
-       hw_perf_disable_all();
-       list_for_each_entry(counter, &ctx->counters, list) {
-               list_del(&counter->list);
-               list_add_tail(&counter->list, &ctx->counters);
+       perf_flags = hw_perf_save_disable();
+       list_for_each_entry(counter, &ctx->counter_list, list_entry) {
+               list_del(&counter->list_entry);
+               list_add_tail(&counter->list_entry, &ctx->counter_list);
                break;
        }
-       hw_perf_enable_all();
+       hw_perf_restore(perf_flags);
 
        spin_unlock(&ctx->lock);
 
        perf_counter_task_sched_in(curr, cpu);
 }
 
+/*
+ * Initialize the perf_counter context in a task_struct:
+ */
+static void
+__perf_counter_init_context(struct perf_counter_context *ctx,
+                           struct task_struct *task)
+{
+       spin_lock_init(&ctx->lock);
+       INIT_LIST_HEAD(&ctx->counter_list);
+       ctx->nr_counters        = 0;
+       ctx->task               = task;
+}
 /*
  * Initialize the perf_counter context in task_struct
  */
 void perf_counter_init_task(struct task_struct *task)
 {
-       struct perf_counter_context *ctx = &task->perf_counter_ctx;
-
-       spin_lock_init(&ctx->lock);
-       INIT_LIST_HEAD(&ctx->counters);
-       ctx->nr_counters = 0;
-       ctx->task = task;
+       __perf_counter_init_context(&task->perf_counter_ctx, task);
 }
 
 /*
@@ -404,10 +533,12 @@ void perf_counter_init_task(struct task_struct *task)
  */
 static void __hw_perf_counter_read(void *info)
 {
-       hw_perf_counter_read(info);
+       struct perf_counter *counter = info;
+
+       counter->hw_ops->hw_perf_counter_read(counter);
 }
 
-static u64 perf_read_counter(struct perf_counter *counter)
+static u64 perf_counter_read(struct perf_counter *counter)
 {
        /*
         * If counter is enabled and currently active on a CPU, update the
@@ -418,7 +549,7 @@ static u64 perf_read_counter(struct perf_counter *counter)
                                         __hw_perf_counter_read, counter, 1);
        }
 
-       return perf_read_counter_safe(counter);
+       return perf_counter_read_safe(counter);
 }
 
 /*
@@ -555,7 +686,7 @@ static int perf_release(struct inode *inode, struct file *file)
 
        mutex_lock(&counter->mutex);
 
-       perf_remove_from_context(counter);
+       perf_counter_remove_from_context(counter);
        put_context(ctx);
 
        mutex_unlock(&counter->mutex);
@@ -577,7 +708,7 @@ perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
                return -EINVAL;
 
        mutex_lock(&counter->mutex);
-       cntval = perf_read_counter(counter);
+       cntval = perf_counter_read(counter);
        mutex_unlock(&counter->mutex);
 
        return put_user(cntval, (u64 __user *) buf) ? -EFAULT : sizeof(cntval);
@@ -669,7 +800,7 @@ perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
 {
        struct perf_counter *counter = file->private_data;
 
-       switch (counter->record_type) {
+       switch (counter->hw_event.record_type) {
        case PERF_RECORD_SIMPLE:
                return perf_read_hw(counter, buf, count);
 
@@ -703,113 +834,212 @@ static const struct file_operations perf_fops = {
        .poll                   = perf_poll,
 };
 
+static void cpu_clock_perf_counter_enable(struct perf_counter *counter)
+{
+}
+
+static void cpu_clock_perf_counter_disable(struct perf_counter *counter)
+{
+}
+
+static void cpu_clock_perf_counter_read(struct perf_counter *counter)
+{
+       int cpu = raw_smp_processor_id();
+
+       atomic64_counter_set(counter, cpu_clock(cpu));
+}
+
+static const struct hw_perf_counter_ops perf_ops_cpu_clock = {
+       .hw_perf_counter_enable         = cpu_clock_perf_counter_enable,
+       .hw_perf_counter_disable        = cpu_clock_perf_counter_disable,
+       .hw_perf_counter_read           = cpu_clock_perf_counter_read,
+};
+
+static const struct hw_perf_counter_ops *
+sw_perf_counter_init(struct perf_counter *counter)
+{
+       const struct hw_perf_counter_ops *hw_ops = NULL;
+
+       switch (counter->hw_event.type) {
+       case PERF_COUNT_CPU_CLOCK:
+               hw_ops = &perf_ops_cpu_clock;
+               break;
+       default:
+               break;
+       }
+       return hw_ops;
+}
+
 /*
  * Allocate and initialize a counter structure
  */
 static struct perf_counter *
-perf_counter_alloc(u32 hw_event_period, int cpu, u32 record_type)
+perf_counter_alloc(struct perf_counter_hw_event *hw_event,
+                  int cpu,
+                  struct perf_counter *group_leader)
 {
-       struct perf_counter *counter = kzalloc(sizeof(*counter), GFP_KERNEL);
+       const struct hw_perf_counter_ops *hw_ops;
+       struct perf_counter *counter;
 
+       counter = kzalloc(sizeof(*counter), GFP_KERNEL);
        if (!counter)
                return NULL;
 
+       /*
+        * Single counters are their own group leaders, with an
+        * empty sibling list:
+        */
+       if (!group_leader)
+               group_leader = counter;
+
        mutex_init(&counter->mutex);
-       INIT_LIST_HEAD(&counter->list);
+       INIT_LIST_HEAD(&counter->list_entry);
+       INIT_LIST_HEAD(&counter->sibling_list);
        init_waitqueue_head(&counter->waitq);
 
-       counter->irqdata        = &counter->data[0];
-       counter->usrdata        = &counter->data[1];
-       counter->cpu            = cpu;
-       counter->record_type    = record_type;
-       counter->__irq_period   = hw_event_period;
-       counter->wakeup_pending = 0;
+       counter->irqdata                = &counter->data[0];
+       counter->usrdata                = &counter->data[1];
+       counter->cpu                    = cpu;
+       counter->hw_event               = *hw_event;
+       counter->wakeup_pending         = 0;
+       counter->group_leader           = group_leader;
+       counter->hw_ops                 = NULL;
+
+       hw_ops = NULL;
+       if (!hw_event->raw && hw_event->type < 0)
+               hw_ops = sw_perf_counter_init(counter);
+       if (!hw_ops) {
+               hw_ops = hw_perf_counter_init(counter);
+       }
+
+       if (!hw_ops) {
+               kfree(counter);
+               return NULL;
+       }
+       counter->hw_ops = hw_ops;
 
        return counter;
 }
 
 /**
- * sys_perf_task_open - open a performance counter associate it to a task
- * @hw_event_type:     event type for monitoring/sampling...
+ * sys_perf_task_open - open a performance counter, associate it to a task/cpu
+ *
+ * @hw_event_uptr:     event type attributes for monitoring/sampling
  * @pid:               target pid
+ * @cpu:               target cpu
+ * @group_fd:          group leader counter fd
  */
-asmlinkage int
-sys_perf_counter_open(u32 hw_event_type,
-                     u32 hw_event_period,
-                     u32 record_type,
-                     pid_t pid,
-                     int cpu)
+asmlinkage int sys_perf_counter_open(
+
+       struct perf_counter_hw_event    *hw_event_uptr          __user,
+       pid_t                           pid,
+       int                             cpu,
+       int                             group_fd)
+
 {
+       struct perf_counter *counter, *group_leader;
+       struct perf_counter_hw_event hw_event;
        struct perf_counter_context *ctx;
-       struct perf_counter *counter;
+       struct file *group_file = NULL;
+       int fput_needed = 0;
        int ret;
 
+       if (copy_from_user(&hw_event, hw_event_uptr, sizeof(hw_event)) != 0)
+               return -EFAULT;
+
+       /*
+        * Get the target context (task or percpu):
+        */
        ctx = find_get_context(pid, cpu);
        if (IS_ERR(ctx))
                return PTR_ERR(ctx);
 
-       ret = -ENOMEM;
-       counter = perf_counter_alloc(hw_event_period, cpu, record_type);
+       /*
+        * Look up the group leader (we will attach this counter to it):
+        */
+       group_leader = NULL;
+       if (group_fd != -1) {
+               ret = -EINVAL;
+               group_file = fget_light(group_fd, &fput_needed);
+               if (!group_file)
+                       goto err_put_context;
+               if (group_file->f_op != &perf_fops)
+                       goto err_put_context;
+
+               group_leader = group_file->private_data;
+               /*
+                * Do not allow a recursive hierarchy (this new sibling
+                * becoming part of another group-sibling):
+                */
+               if (group_leader->group_leader != group_leader)
+                       goto err_put_context;
+               /*
+                * Do not allow to attach to a group in a different
+                * task or CPU context:
+                */
+               if (group_leader->ctx != ctx)
+                       goto err_put_context;
+       }
+
+       ret = -EINVAL;
+       counter = perf_counter_alloc(&hw_event, cpu, group_leader);
        if (!counter)
                goto err_put_context;
 
-       ret = hw_perf_counter_init(counter, hw_event_type);
-       if (ret)
-               goto err_free_put_context;
-
        perf_install_in_context(ctx, counter, cpu);
 
        ret = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0);
        if (ret < 0)
                goto err_remove_free_put_context;
 
+out_fput:
+       fput_light(group_file, fput_needed);
+
        return ret;
 
 err_remove_free_put_context:
        mutex_lock(&counter->mutex);
-       perf_remove_from_context(counter);
+       perf_counter_remove_from_context(counter);
        mutex_unlock(&counter->mutex);
-
-err_free_put_context:
        kfree(counter);
 
 err_put_context:
        put_context(ctx);
 
-       return ret;
+       goto out_fput;
 }
 
-static void __cpuinit perf_init_cpu(int cpu)
+static void __cpuinit perf_counter_init_cpu(int cpu)
 {
-       struct perf_cpu_context *ctx;
+       struct perf_cpu_context *cpuctx;
 
-       ctx = &per_cpu(perf_cpu_context, cpu);
-       spin_lock_init(&ctx->ctx.lock);
-       INIT_LIST_HEAD(&ctx->ctx.counters);
+       cpuctx = &per_cpu(perf_cpu_context, cpu);
+       __perf_counter_init_context(&cpuctx->ctx, NULL);
 
        mutex_lock(&perf_resource_mutex);
-       ctx->max_pertask = perf_max_counters - perf_reserved_percpu;
+       cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu;
        mutex_unlock(&perf_resource_mutex);
+
        hw_perf_counter_setup();
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
-static void __perf_exit_cpu(void *info)
+static void __perf_counter_exit_cpu(void *info)
 {
        struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
        struct perf_counter_context *ctx = &cpuctx->ctx;
        struct perf_counter *counter, *tmp;
 
-       list_for_each_entry_safe(counter, tmp, &ctx->counters, list)
-               __perf_remove_from_context(counter);
+       list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry)
+               __perf_counter_remove_from_context(counter);
 
 }
-static void perf_exit_cpu(int cpu)
+static void perf_counter_exit_cpu(int cpu)
 {
-       smp_call_function_single(cpu, __perf_exit_cpu, NULL, 1);
+       smp_call_function_single(cpu, __perf_counter_exit_cpu, NULL, 1);
 }
 #else
-static inline void perf_exit_cpu(int cpu) { }
+static inline void perf_counter_exit_cpu(int cpu) { }
 #endif
 
 static int __cpuinit
@@ -821,12 +1051,12 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
 
        case CPU_UP_PREPARE:
        case CPU_UP_PREPARE_FROZEN:
-               perf_init_cpu(cpu);
+               perf_counter_init_cpu(cpu);
                break;
 
        case CPU_DOWN_PREPARE:
        case CPU_DOWN_PREPARE_FROZEN:
-               perf_exit_cpu(cpu);
+               perf_counter_exit_cpu(cpu);
                break;
 
        default:
This page took 0.033062 seconds and 5 git commands to generate.