perf_counter: optimize perf_counter_task_tick()
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Fri, 8 May 2009 16:52:21 +0000 (18:52 +0200)
committerIngo Molnar <mingo@elte.hu>
Fri, 8 May 2009 18:36:57 +0000 (20:36 +0200)
perf_counter_task_tick() does way too much work to find out
there's nothing to do. Provide an easy short-circuit for the
normal case where there are no counters on the system.

[ Impact: micro-optimization ]

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
LKML-Reference: <20090508170028.750619201@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
kernel/perf_counter.c

index 60e55f0b48f4b94b4ed4d3a8bb9f26935903f161..fdb0d2421276351b5885ce1173e9c5141cd453f1 100644 (file)
@@ -39,6 +39,7 @@ int perf_max_counters __read_mostly = 1;
 static int perf_reserved_percpu __read_mostly;
 static int perf_overcommit __read_mostly = 1;
 
+static atomic_t nr_counters __read_mostly;
 static atomic_t nr_mmap_tracking __read_mostly;
 static atomic_t nr_munmap_tracking __read_mostly;
 static atomic_t nr_comm_tracking __read_mostly;
@@ -1076,8 +1077,14 @@ static void rotate_ctx(struct perf_counter_context *ctx)
 
 void perf_counter_task_tick(struct task_struct *curr, int cpu)
 {
-       struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
-       struct perf_counter_context *ctx = &curr->perf_counter_ctx;
+       struct perf_cpu_context *cpuctx;
+       struct perf_counter_context *ctx;
+
+       if (!atomic_read(&nr_counters))
+               return;
+
+       cpuctx = &per_cpu(perf_cpu_context, cpu);
+       ctx = &curr->perf_counter_ctx;
 
        perf_counter_cpu_sched_out(cpuctx);
        perf_counter_task_sched_out(curr, cpu);
@@ -1197,6 +1204,7 @@ static void free_counter(struct perf_counter *counter)
 {
        perf_pending_sync(counter);
 
+       atomic_dec(&nr_counters);
        if (counter->hw_event.mmap)
                atomic_dec(&nr_mmap_tracking);
        if (counter->hw_event.munmap)
@@ -2861,6 +2869,7 @@ done:
 
        counter->pmu = pmu;
 
+       atomic_inc(&nr_counters);
        if (counter->hw_event.mmap)
                atomic_inc(&nr_mmap_tracking);
        if (counter->hw_event.munmap)
This page took 0.075978 seconds and 5 git commands to generate.