perf/x86/intel: Streamline LBR MSR handling in PMI
authorAndi Kleen <ak@linux.intel.com>
Fri, 20 Mar 2015 17:11:23 +0000 (10:11 -0700)
committerIngo Molnar <mingo@kernel.org>
Thu, 2 Apr 2015 15:33:19 +0000 (17:33 +0200)
The perf PMI currently does unnecessary MSR accesses when
LBRs are enabled. We use LBR freezing, or when in callstack
mode force the LBRs to only filter on ring 3.

So there is no need to disable the LBRs explicitely in the
PMI handler.

Also we always unnecessarily rewrite LBR_SELECT in the LBR
handler, even though it can never change.

 5)               |  /* write_msr: MSR_LBR_SELECT(1c8), value 0 */
 5)               |  /* read_msr: MSR_IA32_DEBUGCTLMSR(1d9), value 1801 */
 5)               |  /* write_msr: MSR_IA32_DEBUGCTLMSR(1d9), value 1801 */
 5)               |  /* write_msr: MSR_CORE_PERF_GLOBAL_CTRL(38f), value 70000000f */
 5)               |  /* write_msr: MSR_CORE_PERF_GLOBAL_CTRL(38f), value 0 */
 5)               |  /* write_msr: MSR_LBR_SELECT(1c8), value 0 */
 5)               |  /* read_msr: MSR_IA32_DEBUGCTLMSR(1d9), value 1801 */
 5)               |  /* write_msr: MSR_IA32_DEBUGCTLMSR(1d9), value 1801 */

This patch:

  - Avoids disabling already frozen LBRs unnecessarily in the PMI
  - Avoids changing LBR_SELECT in the PMI

Signed-off-by: Andi Kleen <ak@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: eranian@google.com
Link: http://lkml.kernel.org/r/1426871484-21285-1-git-send-email-andi@firstfloor.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/kernel/cpu/perf_event.h
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpu/perf_event_intel_lbr.c

index 7250c0281f9db1d2af84de0fb8b9e113c0837368..329f0356ad4a0a8b969ae4d61d9b29a10d3b9522 100644 (file)
@@ -870,7 +870,7 @@ void intel_pmu_lbr_enable(struct perf_event *event);
 
 void intel_pmu_lbr_disable(struct perf_event *event);
 
-void intel_pmu_lbr_enable_all(void);
+void intel_pmu_lbr_enable_all(bool pmi);
 
 void intel_pmu_lbr_disable_all(void);
 
index 59994602bb94361185e86ba5726408490e7662ee..9da2400c2ec37b7ea164e7a17f3bea68b172cbe6 100644 (file)
@@ -1244,7 +1244,10 @@ static __initconst const u64 slm_hw_cache_event_ids
  },
 };
 
-static void intel_pmu_disable_all(void)
+/*
+ * Use from PMIs where the LBRs are already disabled.
+ */
+static void __intel_pmu_disable_all(void)
 {
        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
 
@@ -1256,15 +1259,20 @@ static void intel_pmu_disable_all(void)
                intel_bts_disable_local();
 
        intel_pmu_pebs_disable_all();
+}
+
+static void intel_pmu_disable_all(void)
+{
+       __intel_pmu_disable_all();
        intel_pmu_lbr_disable_all();
 }
 
-static void intel_pmu_enable_all(int added)
+static void __intel_pmu_enable_all(int added, bool pmi)
 {
        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
 
        intel_pmu_pebs_enable_all();
-       intel_pmu_lbr_enable_all();
+       intel_pmu_lbr_enable_all(pmi);
        wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
                        x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
 
@@ -1280,6 +1288,11 @@ static void intel_pmu_enable_all(int added)
                intel_bts_enable_local();
 }
 
+static void intel_pmu_enable_all(int added)
+{
+       __intel_pmu_enable_all(added, false);
+}
+
 /*
  * Workaround for:
  *   Intel Errata AAK100 (model 26)
@@ -1573,7 +1586,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
         */
        if (!x86_pmu.late_ack)
                apic_write(APIC_LVTPC, APIC_DM_NMI);
-       intel_pmu_disable_all();
+       __intel_pmu_disable_all();
        handled = intel_pmu_drain_bts_buffer();
        handled += intel_bts_interrupt();
        status = intel_pmu_get_status();
@@ -1658,7 +1671,7 @@ again:
                goto again;
 
 done:
-       intel_pmu_enable_all(0);
+       __intel_pmu_enable_all(0, true);
        /*
         * Only unmask the NMI after the overflow counters
         * have been reset. This avoids spurious NMIs on
index 0473874109cb7a4551cbf114d15670af4a023037..3d537252f0119a96e55fee271b7cd9ec7ea48ffb 100644 (file)
@@ -132,12 +132,16 @@ static void intel_pmu_lbr_filter(struct cpu_hw_events *cpuc);
  * otherwise it becomes near impossible to get a reliable stack.
  */
 
-static void __intel_pmu_lbr_enable(void)
+static void __intel_pmu_lbr_enable(bool pmi)
 {
        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
        u64 debugctl, lbr_select = 0;
 
-       if (cpuc->lbr_sel) {
+       /*
+        * No need to reprogram LBR_SELECT in a PMI, as it
+        * did not change.
+        */
+       if (cpuc->lbr_sel && !pmi) {
                lbr_select = cpuc->lbr_sel->config;
                wrmsrl(MSR_LBR_SELECT, lbr_select);
        }
@@ -351,12 +355,12 @@ void intel_pmu_lbr_disable(struct perf_event *event)
        }
 }
 
-void intel_pmu_lbr_enable_all(void)
+void intel_pmu_lbr_enable_all(bool pmi)
 {
        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
 
        if (cpuc->lbr_users)
-               __intel_pmu_lbr_enable();
+               __intel_pmu_lbr_enable(pmi);
 }
 
 void intel_pmu_lbr_disable_all(void)
This page took 0.030829 seconds and 5 git commands to generate.