perf_counter, x86: introduce max_period variable
authorRobert Richter <robert.richter@amd.com>
Wed, 29 Apr 2009 10:47:23 +0000 (12:47 +0200)
committerIngo Molnar <mingo@elte.hu>
Wed, 29 Apr 2009 12:51:13 +0000 (14:51 +0200)
In x86 pmus the allowed counter period to programm differs. This
introduces a max_period value and allows the generic implementation
for all models to check the max period.

[ Impact: generalize code ]

Signed-off-by: Robert Richter <robert.richter@amd.com>
Cc: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1241002046-8832-27-git-send-email-robert.richter@amd.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/kernel/cpu/perf_counter.c

index a8a53abd706d4944522f5dc2d3aa92bd22b4e99f..4b8715b34f87911a7618f58c109fec9322ea71b7 100644 (file)
@@ -54,6 +54,7 @@ struct x86_pmu {
        int             num_counters_fixed;
        int             counter_bits;
        u64             counter_mask;
+       u64             max_period;
 };
 
 static struct x86_pmu x86_pmu __read_mostly;
@@ -279,14 +280,8 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
                hwc->nmi = 1;
 
        hwc->irq_period         = hw_event->irq_period;
-       /*
-        * Intel PMCs cannot be accessed sanely above 32 bit width,
-        * so we install an artificial 1<<31 period regardless of
-        * the generic counter period:
-        */
-       if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
-               if ((s64)hwc->irq_period <= 0 || hwc->irq_period > 0x7FFFFFFF)
-                       hwc->irq_period = 0x7FFFFFFF;
+       if ((s64)hwc->irq_period <= 0 || hwc->irq_period > x86_pmu.max_period)
+               hwc->irq_period = x86_pmu.max_period;
 
        atomic64_set(&hwc->period_left, hwc->irq_period);
 
@@ -910,6 +905,12 @@ static struct x86_pmu intel_pmu = {
        .event_map              = intel_pmu_event_map,
        .raw_event              = intel_pmu_raw_event,
        .max_events             = ARRAY_SIZE(intel_perfmon_event_map),
+       /*
+        * Intel PMCs cannot be accessed sanely above 32 bit width,
+        * so we install an artificial 1<<31 period regardless of
+        * the generic counter period:
+        */
+       .max_period             = (1ULL << 31) - 1,
 };
 
 static struct x86_pmu amd_pmu = {
@@ -927,6 +928,8 @@ static struct x86_pmu amd_pmu = {
        .num_counters           = 4,
        .counter_bits           = 48,
        .counter_mask           = (1ULL << 48) - 1,
+       /* use highest bit to detect overflow */
+       .max_period             = (1ULL << 47) - 1,
 };
 
 static int intel_pmu_init(void)
@@ -999,6 +1002,7 @@ void __init init_hw_perf_counters(void)
        perf_max_counters = x86_pmu.num_counters;
 
        pr_info("... value mask:      %016Lx\n", x86_pmu.counter_mask);
+       pr_info("... max period:      %016Lx\n", x86_pmu.max_period);
 
        if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
                x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;
This page took 0.028063 seconds and 5 git commands to generate.