drm/i915/vlv: fixup DDR freq detection per Punit spec
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_pm.c
index 3d3f1134f16bd84597c0ae2a9e759cbfa5afacde..a5778e59cc155ecb0767eaf4c02ded8d0796b1f5 100644 (file)
 #include <linux/module.h>
 #include <drm/i915_powerwell.h>
 
+/**
+ * RC6 is a special power stage which allows the GPU to enter an very
+ * low-voltage mode when idle, using down to 0V while at this stage.  This
+ * stage is entered automatically when the GPU is idle when RC6 support is
+ * enabled, and as soon as new workload arises GPU wakes up automatically as well.
+ *
+ * There are different RC6 modes available in Intel GPU, which differentiate
+ * among each other with the latency required to enter and leave RC6 and
+ * voltage consumed by the GPU in different states.
+ *
+ * The combination of the following flags define which states GPU is allowed
+ * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
+ * RC6pp is deepest RC6. Their support by hardware varies according to the
+ * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
+ * which brings the most power savings; deeper states save more power, but
+ * require higher latency to switch to and wake up.
+ */
+#define INTEL_RC6_ENABLE                       (1<<0)
+#define INTEL_RC6p_ENABLE                      (1<<1)
+#define INTEL_RC6pp_ENABLE                     (1<<2)
+
 /* FBC, or Frame Buffer Compression, is a technique employed to compress the
  * framebuffer contents in-memory, aiming at reducing the required bandwidth
  * during in-memory transfers and, therefore, reduce the power packet.
@@ -233,18 +254,6 @@ static void ironlake_disable_fbc(struct drm_device *dev)
                dpfc_ctl &= ~DPFC_CTL_EN;
                I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
 
-               if (IS_IVYBRIDGE(dev))
-                       /* WaFbcDisableDpfcClockGating:ivb */
-                       I915_WRITE(ILK_DSPCLK_GATE_D,
-                                  I915_READ(ILK_DSPCLK_GATE_D) &
-                                  ~ILK_DPFCUNIT_CLOCK_GATE_DISABLE);
-
-               if (IS_HASWELL(dev))
-                       /* WaFbcDisableDpfcClockGating:hsw */
-                       I915_WRITE(HSW_CLKGATE_DISABLE_PART_1,
-                                  I915_READ(HSW_CLKGATE_DISABLE_PART_1) &
-                                  ~HSW_DPFC_GATING_DISABLE);
-
                DRM_DEBUG_KMS("disabled FBC\n");
        }
 }
@@ -274,18 +283,10 @@ static void gen7_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
        if (IS_IVYBRIDGE(dev)) {
                /* WaFbcAsynchFlipDisableFbcQueue:ivb */
                I915_WRITE(ILK_DISPLAY_CHICKEN1, ILK_FBCQ_DIS);
-               /* WaFbcDisableDpfcClockGating:ivb */
-               I915_WRITE(ILK_DSPCLK_GATE_D,
-                          I915_READ(ILK_DSPCLK_GATE_D) |
-                          ILK_DPFCUNIT_CLOCK_GATE_DISABLE);
        } else {
                /* WaFbcAsynchFlipDisableFbcQueue:hsw */
                I915_WRITE(HSW_PIPE_SLICE_CHICKEN_1(intel_crtc->pipe),
                           HSW_BYPASS_FBC_QUEUE);
-               /* WaFbcDisableDpfcClockGating:hsw */
-               I915_WRITE(HSW_CLKGATE_DISABLE_PART_1,
-                          I915_READ(HSW_CLKGATE_DISABLE_PART_1) |
-                          HSW_DPFC_GATING_DISABLE);
        }
 
        I915_WRITE(SNB_DPFC_CTL_SA,
@@ -2200,20 +2201,11 @@ struct hsw_wm_maximums {
        uint16_t fbc;
 };
 
-struct hsw_wm_values {
-       uint32_t wm_pipe[3];
-       uint32_t wm_lp[3];
-       uint32_t wm_lp_spr[3];
-       uint32_t wm_linetime[3];
-       bool enable_fbc_wm;
-};
-
 /* used in computing the new watermarks state */
 struct intel_wm_config {
        unsigned int num_pipes_active;
        bool sprites_enabled;
        bool sprites_scaled;
-       bool fbc_wm_enabled;
 };
 
 /*
@@ -2380,11 +2372,11 @@ static unsigned int ilk_fbc_wm_max(void)
        return 15;
 }
 
-static void ilk_wm_max(struct drm_device *dev,
-                      int level,
-                      const struct intel_wm_config *config,
-                      enum intel_ddb_partitioning ddb_partitioning,
-                      struct hsw_wm_maximums *max)
+static void ilk_compute_wm_maximums(struct drm_device *dev,
+                                   int level,
+                                   const struct intel_wm_config *config,
+                                   enum intel_ddb_partitioning ddb_partitioning,
+                                   struct hsw_wm_maximums *max)
 {
        max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
        max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
@@ -2392,9 +2384,9 @@ static void ilk_wm_max(struct drm_device *dev,
        max->fbc = ilk_fbc_wm_max();
 }
 
-static bool ilk_check_wm(int level,
-                        const struct hsw_wm_maximums *max,
-                        struct intel_wm_level *result)
+static bool ilk_validate_wm_level(int level,
+                                 const struct hsw_wm_maximums *max,
+                                 struct intel_wm_level *result)
 {
        bool ret;
 
@@ -2430,8 +2422,6 @@ static bool ilk_check_wm(int level,
                result->enable = true;
        }
 
-       DRM_DEBUG_KMS("WM%d: %sabled\n", level, result->enable ? "en" : "dis");
-
        return ret;
 }
 
@@ -2637,7 +2627,7 @@ static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
        struct hsw_wm_maximums max;
 
        /* LP0 watermarks always use 1/2 DDB partitioning */
-       ilk_wm_max(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
+       ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
 
        for (level = 0; level <= max_level; level++)
                ilk_compute_wm_level(dev_priv, level, params,
@@ -2646,7 +2636,7 @@ static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
        pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc);
 
        /* At least LP0 must be valid */
-       return ilk_check_wm(0, &max, &pipe_wm->wm[0]);
+       return ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]);
 }
 
 /*
@@ -2691,7 +2681,7 @@ static void ilk_wm_merge(struct drm_device *dev,
 
                ilk_merge_wm_level(dev, level, wm);
 
-               if (!ilk_check_wm(level, max, wm))
+               if (!ilk_validate_wm_level(level, max, wm))
                        break;
 
                /*
@@ -2705,20 +2695,28 @@ static void ilk_wm_merge(struct drm_device *dev,
        }
 }
 
+static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
+{
+       /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
+       return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
+}
+
 static void hsw_compute_wm_results(struct drm_device *dev,
                                   const struct intel_pipe_wm *merged,
+                                  enum intel_ddb_partitioning partitioning,
                                   struct hsw_wm_values *results)
 {
        struct intel_crtc *intel_crtc;
        int level, wm_lp;
 
        results->enable_fbc_wm = merged->fbc_wm_enabled;
+       results->partitioning = partitioning;
 
        /* LP1+ register values */
        for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
                const struct intel_wm_level *r;
 
-               level = wm_lp + (wm_lp >= 2 && merged->wm[4].enable);
+               level = ilk_wm_lp_to_level(wm_lp, merged);
 
                r = &merged->wm[level];
                if (!r->enable)
@@ -2777,80 +2775,112 @@ static struct intel_pipe_wm *hsw_find_best_result(struct drm_device *dev,
        }
 }
 
+/* dirty bits used to track which watermarks need changes */
+#define WM_DIRTY_PIPE(pipe) (1 << (pipe))
+#define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
+#define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
+#define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
+#define WM_DIRTY_FBC (1 << 24)
+#define WM_DIRTY_DDB (1 << 25)
+
+static unsigned int ilk_compute_wm_dirty(struct drm_device *dev,
+                                        const struct hsw_wm_values *old,
+                                        const struct hsw_wm_values *new)
+{
+       unsigned int dirty = 0;
+       enum pipe pipe;
+       int wm_lp;
+
+       for_each_pipe(pipe) {
+               if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
+                       dirty |= WM_DIRTY_LINETIME(pipe);
+                       /* Must disable LP1+ watermarks too */
+                       dirty |= WM_DIRTY_LP_ALL;
+               }
+
+               if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
+                       dirty |= WM_DIRTY_PIPE(pipe);
+                       /* Must disable LP1+ watermarks too */
+                       dirty |= WM_DIRTY_LP_ALL;
+               }
+       }
+
+       if (old->enable_fbc_wm != new->enable_fbc_wm) {
+               dirty |= WM_DIRTY_FBC;
+               /* Must disable LP1+ watermarks too */
+               dirty |= WM_DIRTY_LP_ALL;
+       }
+
+       if (old->partitioning != new->partitioning) {
+               dirty |= WM_DIRTY_DDB;
+               /* Must disable LP1+ watermarks too */
+               dirty |= WM_DIRTY_LP_ALL;
+       }
+
+       /* LP1+ watermarks already deemed dirty, no need to continue */
+       if (dirty & WM_DIRTY_LP_ALL)
+               return dirty;
+
+       /* Find the lowest numbered LP1+ watermark in need of an update... */
+       for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
+               if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
+                   old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
+                       break;
+       }
+
+       /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
+       for (; wm_lp <= 3; wm_lp++)
+               dirty |= WM_DIRTY_LP(wm_lp);
+
+       return dirty;
+}
+
 /*
  * The spec says we shouldn't write when we don't need, because every write
  * causes WMs to be re-evaluated, expending some power.
  */
 static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
-                               struct hsw_wm_values *results,
-                               enum intel_ddb_partitioning partitioning)
+                               struct hsw_wm_values *results)
 {
-       struct hsw_wm_values previous;
+       struct hsw_wm_values *previous = &dev_priv->wm.hw;
+       unsigned int dirty;
        uint32_t val;
-       enum intel_ddb_partitioning prev_partitioning;
-       bool prev_enable_fbc_wm;
-
-       previous.wm_pipe[0] = I915_READ(WM0_PIPEA_ILK);
-       previous.wm_pipe[1] = I915_READ(WM0_PIPEB_ILK);
-       previous.wm_pipe[2] = I915_READ(WM0_PIPEC_IVB);
-       previous.wm_lp[0] = I915_READ(WM1_LP_ILK);
-       previous.wm_lp[1] = I915_READ(WM2_LP_ILK);
-       previous.wm_lp[2] = I915_READ(WM3_LP_ILK);
-       previous.wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
-       previous.wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
-       previous.wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
-       previous.wm_linetime[0] = I915_READ(PIPE_WM_LINETIME(PIPE_A));
-       previous.wm_linetime[1] = I915_READ(PIPE_WM_LINETIME(PIPE_B));
-       previous.wm_linetime[2] = I915_READ(PIPE_WM_LINETIME(PIPE_C));
-
-       prev_partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
-                               INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
-
-       prev_enable_fbc_wm = !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
-
-       if (memcmp(results->wm_pipe, previous.wm_pipe,
-                  sizeof(results->wm_pipe)) == 0 &&
-           memcmp(results->wm_lp, previous.wm_lp,
-                  sizeof(results->wm_lp)) == 0 &&
-           memcmp(results->wm_lp_spr, previous.wm_lp_spr,
-                  sizeof(results->wm_lp_spr)) == 0 &&
-           memcmp(results->wm_linetime, previous.wm_linetime,
-                  sizeof(results->wm_linetime)) == 0 &&
-           partitioning == prev_partitioning &&
-           results->enable_fbc_wm == prev_enable_fbc_wm)
+
+       dirty = ilk_compute_wm_dirty(dev_priv->dev, previous, results);
+       if (!dirty)
                return;
 
-       if (previous.wm_lp[2] != 0)
+       if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != 0)
                I915_WRITE(WM3_LP_ILK, 0);
-       if (previous.wm_lp[1] != 0)
+       if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != 0)
                I915_WRITE(WM2_LP_ILK, 0);
-       if (previous.wm_lp[0] != 0)
+       if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != 0)
                I915_WRITE(WM1_LP_ILK, 0);
 
-       if (previous.wm_pipe[0] != results->wm_pipe[0])
+       if (dirty & WM_DIRTY_PIPE(PIPE_A))
                I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
-       if (previous.wm_pipe[1] != results->wm_pipe[1])
+       if (dirty & WM_DIRTY_PIPE(PIPE_B))
                I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
-       if (previous.wm_pipe[2] != results->wm_pipe[2])
+       if (dirty & WM_DIRTY_PIPE(PIPE_C))
                I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
 
-       if (previous.wm_linetime[0] != results->wm_linetime[0])
+       if (dirty & WM_DIRTY_LINETIME(PIPE_A))
                I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
-       if (previous.wm_linetime[1] != results->wm_linetime[1])
+       if (dirty & WM_DIRTY_LINETIME(PIPE_B))
                I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
-       if (previous.wm_linetime[2] != results->wm_linetime[2])
+       if (dirty & WM_DIRTY_LINETIME(PIPE_C))
                I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
 
-       if (prev_partitioning != partitioning) {
+       if (dirty & WM_DIRTY_DDB) {
                val = I915_READ(WM_MISC);
-               if (partitioning == INTEL_DDB_PART_1_2)
+               if (results->partitioning == INTEL_DDB_PART_1_2)
                        val &= ~WM_MISC_DATA_PARTITION_5_6;
                else
                        val |= WM_MISC_DATA_PARTITION_5_6;
                I915_WRITE(WM_MISC, val);
        }
 
-       if (prev_enable_fbc_wm != results->enable_fbc_wm) {
+       if (dirty & WM_DIRTY_FBC) {
                val = I915_READ(DISP_ARB_CTL);
                if (results->enable_fbc_wm)
                        val &= ~DISP_FBC_WM_DIS;
@@ -2859,19 +2889,21 @@ static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
                I915_WRITE(DISP_ARB_CTL, val);
        }
 
-       if (previous.wm_lp_spr[0] != results->wm_lp_spr[0])
+       if (dirty & WM_DIRTY_LP(1) && previous->wm_lp_spr[0] != results->wm_lp_spr[0])
                I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
-       if (previous.wm_lp_spr[1] != results->wm_lp_spr[1])
+       if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
                I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
-       if (previous.wm_lp_spr[2] != results->wm_lp_spr[2])
+       if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
                I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
 
-       if (results->wm_lp[0] != 0)
+       if (dirty & WM_DIRTY_LP(1) && results->wm_lp[0] != 0)
                I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
-       if (results->wm_lp[1] != 0)
+       if (dirty & WM_DIRTY_LP(2) && results->wm_lp[1] != 0)
                I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
-       if (results->wm_lp[2] != 0)
+       if (dirty & WM_DIRTY_LP(3) && results->wm_lp[2] != 0)
                I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
+
+       dev_priv->wm.hw = *results;
 }
 
 static void haswell_update_wm(struct drm_crtc *crtc)
@@ -2896,12 +2928,13 @@ static void haswell_update_wm(struct drm_crtc *crtc)
 
        intel_crtc->wm.active = pipe_wm;
 
-       ilk_wm_max(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
+       ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
        ilk_wm_merge(dev, &max, &lp_wm_1_2);
 
        /* 5/6 split only in single pipe config on IVB+ */
-       if (INTEL_INFO(dev)->gen >= 7 && config.num_pipes_active <= 1) {
-               ilk_wm_max(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
+       if (INTEL_INFO(dev)->gen >= 7 &&
+           config.num_pipes_active == 1 && config.sprites_enabled) {
+               ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
                ilk_wm_merge(dev, &max, &lp_wm_5_6);
 
                best_lp_wm = hsw_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
@@ -2909,12 +2942,12 @@ static void haswell_update_wm(struct drm_crtc *crtc)
                best_lp_wm = &lp_wm_1_2;
        }
 
-       hsw_compute_wm_results(dev, best_lp_wm, &results);
-
        partitioning = (best_lp_wm == &lp_wm_1_2) ?
                       INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
 
-       hsw_write_wm_values(dev_priv, &results, partitioning);
+       hsw_compute_wm_results(dev, best_lp_wm, partitioning, &results);
+
+       hsw_write_wm_values(dev_priv, &results);
 }
 
 static void haswell_update_sprite_wm(struct drm_plane *plane,
@@ -3094,6 +3127,74 @@ static void sandybridge_update_sprite_wm(struct drm_plane *plane,
        I915_WRITE(WM3S_LP_IVB, sprite_wm);
 }
 
+static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct hsw_wm_values *hw = &dev_priv->wm.hw;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct intel_pipe_wm *active = &intel_crtc->wm.active;
+       enum pipe pipe = intel_crtc->pipe;
+       static const unsigned int wm0_pipe_reg[] = {
+               [PIPE_A] = WM0_PIPEA_ILK,
+               [PIPE_B] = WM0_PIPEB_ILK,
+               [PIPE_C] = WM0_PIPEC_IVB,
+       };
+
+       hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
+       hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
+
+       if (intel_crtc_active(crtc)) {
+               u32 tmp = hw->wm_pipe[pipe];
+
+               /*
+                * For active pipes LP0 watermark is marked as
+                * enabled, and LP1+ watermaks as disabled since
+                * we can't really reverse compute them in case
+                * multiple pipes are active.
+                */
+               active->wm[0].enable = true;
+               active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
+               active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
+               active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
+               active->linetime = hw->wm_linetime[pipe];
+       } else {
+               int level, max_level = ilk_wm_max_level(dev);
+
+               /*
+                * For inactive pipes, all watermark levels
+                * should be marked as enabled but zeroed,
+                * which is what we'd compute them to.
+                */
+               for (level = 0; level <= max_level; level++)
+                       active->wm[level].enable = true;
+       }
+}
+
+void ilk_wm_get_hw_state(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct hsw_wm_values *hw = &dev_priv->wm.hw;
+       struct drm_crtc *crtc;
+
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+               ilk_pipe_wm_get_hw_state(crtc);
+
+       hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
+       hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
+       hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
+
+       hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
+       hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
+       hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
+
+       hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
+               INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
+
+       hw->enable_fbc_wm =
+               !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
+}
+
 /**
  * intel_update_watermarks - update FIFO watermark values based on current modes
  *
@@ -3585,6 +3686,20 @@ static void valleyview_disable_rps(struct drm_device *dev)
        }
 }
 
+static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
+{
+       if (IS_GEN6(dev))
+               DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
+
+       if (IS_HASWELL(dev))
+               DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
+
+       DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
+                       (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
+                       (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
+                       (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
+}
+
 int intel_enable_rc6(const struct drm_device *dev)
 {
        /* No RC6 before Ironlake */
@@ -3599,18 +3714,13 @@ int intel_enable_rc6(const struct drm_device *dev)
        if (INTEL_INFO(dev)->gen == 5)
                return 0;
 
-       if (IS_HASWELL(dev)) {
-               DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
+       if (IS_HASWELL(dev))
                return INTEL_RC6_ENABLE;
-       }
 
        /* snb/ivb have more than one rc6 state. */
-       if (INTEL_INFO(dev)->gen == 6) {
-               DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
+       if (INTEL_INFO(dev)->gen == 6)
                return INTEL_RC6_ENABLE;
-       }
 
-       DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
        return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
 }
 
@@ -3712,10 +3822,7 @@ static void gen6_enable_rps(struct drm_device *dev)
                        rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
        }
 
-       DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
-                       (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
-                       (rc6_mask & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
-                       (rc6_mask & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
+       intel_print_rc6_info(dev, rc6_mask);
 
        I915_WRITE(GEN6_RC_CONTROL,
                   rc6_mask |
@@ -3788,7 +3895,7 @@ void gen6_update_ring_freq(struct drm_device *dev)
        /* Convert from kHz to MHz */
        max_ia_freq /= 1000;
 
-       min_ring_freq = I915_READ(MCHBAR_MIRROR_BASE_SNB + DCLK) & 0xf;
+       min_ring_freq = I915_READ(DCLK) & 0xf;
        /* convert DDR frequency from units of 266.6MHz to bandwidth */
        min_ring_freq = mult_frac(min_ring_freq, 8, 3);
 
@@ -3951,22 +4058,12 @@ static void valleyview_enable_rps(struct drm_device *dev)
                                      VLV_RENDER_RC6_COUNT_EN));
        if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
                rc6_mode = GEN7_RC_CTL_TO_MODE;
+
+       intel_print_rc6_info(dev, rc6_mode);
+
        I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
 
        val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
-       switch ((val >> 6) & 3) {
-       case 0:
-       case 1:
-               dev_priv->mem_freq = 800;
-               break;
-       case 2:
-               dev_priv->mem_freq = 1066;
-               break;
-       case 3:
-               dev_priv->mem_freq = 1333;
-               break;
-       }
-       DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
 
        DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
        DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
@@ -4122,6 +4219,8 @@ static void ironlake_enable_rc6(struct drm_device *dev)
 
        I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
        I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
+
+       intel_print_rc6_info(dev, INTEL_RC6_ENABLE);
 }
 
 static unsigned long intel_pxfreq(u32 vidfreq)
@@ -4896,7 +4995,9 @@ static void cpt_init_clock_gating(struct drm_device *dev)
         * gating for the panel power sequencer or it will fail to
         * start up when no ports are active.
         */
-       I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
+       I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
+                  PCH_DPLUNIT_CLOCK_GATE_DISABLE |
+                  PCH_CPUNIT_CLOCK_GATE_DISABLE);
        I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
                   DPLS_EDP_PPS_FIX_DIS);
        /* The below fixes the weird display corruption, a few pixels shifted
@@ -5090,6 +5191,11 @@ static void haswell_init_clock_gating(struct drm_device *dev)
        I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
                        GEN7_WA_L3_CHICKEN_MODE);
 
+       /* L3 caching of data atomics doesn't work -- disable it. */
+       I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
+       I915_WRITE(HSW_ROW_CHICKEN3,
+                  _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE));
+
        /* This is required by WaCatErrorRejectionIssue:hsw */
        I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
                        I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
@@ -5206,6 +5312,31 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
 static void valleyview_init_clock_gating(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 val;
+
+       mutex_lock(&dev_priv->rps.hw_lock);
+       val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
+       mutex_unlock(&dev_priv->rps.hw_lock);
+       switch ((val >> 6) & 3) {
+       case 0:
+               dev_priv->mem_freq = 800;
+               break;
+       case 1:
+               dev_priv->mem_freq = 1066;
+               break;
+       case 2:
+               dev_priv->mem_freq = 1333;
+               break;
+       case 3:
+               /*
+                * Probably a BIOS/Punit bug, or a new platform we don't
+                * support yet.
+                */
+               WARN(1, "invalid DDR freq detected, assuming 800MHz\n");
+               dev_priv->mem_freq = 800;
+               break;
+       }
+       DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
 
        I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
 
@@ -5385,6 +5516,23 @@ void intel_suspend_hw(struct drm_device *dev)
                lpt_suspend_hw(dev);
 }
 
+static bool is_always_on_power_domain(struct drm_device *dev,
+                                     enum intel_display_power_domain domain)
+{
+       unsigned long always_on_domains;
+
+       BUG_ON(BIT(domain) & ~POWER_DOMAIN_MASK);
+
+       if (IS_HASWELL(dev)) {
+               always_on_domains = HSW_ALWAYS_ON_POWER_DOMAINS;
+       } else {
+               WARN_ON(1);
+               return true;
+       }
+
+       return BIT(domain) & always_on_domains;
+}
+
 /**
  * We should only use the power well if we explicitly asked the hardware to
  * enable it, so check if it's enabled and also check if we've requested it to
@@ -5398,24 +5546,11 @@ bool intel_display_power_enabled(struct drm_device *dev,
        if (!HAS_POWER_WELL(dev))
                return true;
 
-       switch (domain) {
-       case POWER_DOMAIN_PIPE_A:
-       case POWER_DOMAIN_TRANSCODER_EDP:
+       if (is_always_on_power_domain(dev, domain))
                return true;
-       case POWER_DOMAIN_VGA:
-       case POWER_DOMAIN_PIPE_B:
-       case POWER_DOMAIN_PIPE_C:
-       case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
-       case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
-       case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
-       case POWER_DOMAIN_TRANSCODER_A:
-       case POWER_DOMAIN_TRANSCODER_B:
-       case POWER_DOMAIN_TRANSCODER_C:
-               return I915_READ(HSW_PWR_WELL_DRIVER) ==
+
+       return I915_READ(HSW_PWR_WELL_DRIVER) ==
                     (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
-       default:
-               BUG();
-       }
 }
 
 static void __intel_set_power_well(struct drm_device *dev, bool enable)
@@ -5465,169 +5600,130 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
        }
 }
 
-static void __intel_power_well_get(struct i915_power_well *power_well)
+static void __intel_power_well_get(struct drm_device *dev,
+                                  struct i915_power_well *power_well)
 {
        if (!power_well->count++)
-               __intel_set_power_well(power_well->device, true);
+               __intel_set_power_well(dev, true);
 }
 
-static void __intel_power_well_put(struct i915_power_well *power_well)
+static void __intel_power_well_put(struct drm_device *dev,
+                                  struct i915_power_well *power_well)
 {
        WARN_ON(!power_well->count);
-       if (!--power_well->count)
-               __intel_set_power_well(power_well->device, false);
+       if (!--power_well->count && i915_disable_power_well)
+               __intel_set_power_well(dev, false);
 }
 
 void intel_display_power_get(struct drm_device *dev,
                             enum intel_display_power_domain domain)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct i915_power_well *power_well = &dev_priv->power_well;
+       struct i915_power_domains *power_domains;
 
        if (!HAS_POWER_WELL(dev))
                return;
 
-       switch (domain) {
-       case POWER_DOMAIN_PIPE_A:
-       case POWER_DOMAIN_TRANSCODER_EDP:
+       if (is_always_on_power_domain(dev, domain))
                return;
-       case POWER_DOMAIN_VGA:
-       case POWER_DOMAIN_PIPE_B:
-       case POWER_DOMAIN_PIPE_C:
-       case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
-       case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
-       case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
-       case POWER_DOMAIN_TRANSCODER_A:
-       case POWER_DOMAIN_TRANSCODER_B:
-       case POWER_DOMAIN_TRANSCODER_C:
-               spin_lock_irq(&power_well->lock);
-               __intel_power_well_get(power_well);
-               spin_unlock_irq(&power_well->lock);
-               return;
-       default:
-               BUG();
-       }
+
+       power_domains = &dev_priv->power_domains;
+
+       mutex_lock(&power_domains->lock);
+       __intel_power_well_get(dev, &power_domains->power_wells[0]);
+       mutex_unlock(&power_domains->lock);
 }
 
 void intel_display_power_put(struct drm_device *dev,
                             enum intel_display_power_domain domain)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct i915_power_well *power_well = &dev_priv->power_well;
+       struct i915_power_domains *power_domains;
 
        if (!HAS_POWER_WELL(dev))
                return;
 
-       switch (domain) {
-       case POWER_DOMAIN_PIPE_A:
-       case POWER_DOMAIN_TRANSCODER_EDP:
+       if (is_always_on_power_domain(dev, domain))
                return;
-       case POWER_DOMAIN_VGA:
-       case POWER_DOMAIN_PIPE_B:
-       case POWER_DOMAIN_PIPE_C:
-       case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
-       case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
-       case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
-       case POWER_DOMAIN_TRANSCODER_A:
-       case POWER_DOMAIN_TRANSCODER_B:
-       case POWER_DOMAIN_TRANSCODER_C:
-               spin_lock_irq(&power_well->lock);
-               __intel_power_well_put(power_well);
-               spin_unlock_irq(&power_well->lock);
-               return;
-       default:
-               BUG();
-       }
+
+       power_domains = &dev_priv->power_domains;
+
+       mutex_lock(&power_domains->lock);
+       __intel_power_well_put(dev, &power_domains->power_wells[0]);
+       mutex_unlock(&power_domains->lock);
 }
 
-static struct i915_power_well *hsw_pwr;
+static struct i915_power_domains *hsw_pwr;
 
 /* Display audio driver power well request */
 void i915_request_power_well(void)
 {
+       struct drm_i915_private *dev_priv;
+
        if (WARN_ON(!hsw_pwr))
                return;
 
-       spin_lock_irq(&hsw_pwr->lock);
-       __intel_power_well_get(hsw_pwr);
-       spin_unlock_irq(&hsw_pwr->lock);
+       dev_priv = container_of(hsw_pwr, struct drm_i915_private,
+                               power_domains);
+
+       mutex_lock(&hsw_pwr->lock);
+       __intel_power_well_get(dev_priv->dev, &hsw_pwr->power_wells[0]);
+       mutex_unlock(&hsw_pwr->lock);
 }
 EXPORT_SYMBOL_GPL(i915_request_power_well);
 
 /* Display audio driver power well release */
 void i915_release_power_well(void)
 {
+       struct drm_i915_private *dev_priv;
+
        if (WARN_ON(!hsw_pwr))
                return;
 
-       spin_lock_irq(&hsw_pwr->lock);
-       __intel_power_well_put(hsw_pwr);
-       spin_unlock_irq(&hsw_pwr->lock);
+       dev_priv = container_of(hsw_pwr, struct drm_i915_private,
+                               power_domains);
+
+       mutex_lock(&hsw_pwr->lock);
+       __intel_power_well_put(dev_priv->dev, &hsw_pwr->power_wells[0]);
+       mutex_unlock(&hsw_pwr->lock);
 }
 EXPORT_SYMBOL_GPL(i915_release_power_well);
 
-int i915_init_power_well(struct drm_device *dev)
+int intel_power_domains_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct i915_power_domains *power_domains = &dev_priv->power_domains;
+       struct i915_power_well *power_well;
 
-       hsw_pwr = &dev_priv->power_well;
+       mutex_init(&power_domains->lock);
+       hsw_pwr = power_domains;
 
-       hsw_pwr->device = dev;
-       spin_lock_init(&hsw_pwr->lock);
-       hsw_pwr->count = 0;
+       power_well = &power_domains->power_wells[0];
+       power_well->count = 0;
 
        return 0;
 }
 
-void i915_remove_power_well(struct drm_device *dev)
+void intel_power_domains_remove(struct drm_device *dev)
 {
        hsw_pwr = NULL;
 }
 
-void intel_set_power_well(struct drm_device *dev, bool enable)
+static void intel_power_domains_resume(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct i915_power_well *power_well = &dev_priv->power_well;
+       struct i915_power_domains *power_domains = &dev_priv->power_domains;
+       struct i915_power_well *power_well;
 
        if (!HAS_POWER_WELL(dev))
                return;
 
-       if (!i915_disable_power_well && !enable)
-               return;
-
-       spin_lock_irq(&power_well->lock);
+       mutex_lock(&power_domains->lock);
 
-       /*
-        * This function will only ever contribute one
-        * to the power well reference count. i915_request
-        * is what tracks whether we have or have not
-        * added the one to the reference count.
-        */
-       if (power_well->i915_request == enable)
-               goto out;
-
-       power_well->i915_request = enable;
-
-       if (enable)
-               __intel_power_well_get(power_well);
-       else
-               __intel_power_well_put(power_well);
-
- out:
-       spin_unlock_irq(&power_well->lock);
-}
-
-static void intel_resume_power_well(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct i915_power_well *power_well = &dev_priv->power_well;
-
-       if (!HAS_POWER_WELL(dev))
-               return;
-
-       spin_lock_irq(&power_well->lock);
+       power_well = &power_domains->power_wells[0];
        __intel_set_power_well(dev, power_well->count > 0);
-       spin_unlock_irq(&power_well->lock);
+
+       mutex_unlock(&power_domains->lock);
 }
 
 /*
@@ -5636,7 +5732,7 @@ static void intel_resume_power_well(struct drm_device *dev)
  * to be enabled, and it will only be disabled if none of the registers is
  * requesting it to be enabled.
  */
-void intel_init_power_well(struct drm_device *dev)
+void intel_power_domains_init_hw(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
@@ -5644,8 +5740,8 @@ void intel_init_power_well(struct drm_device *dev)
                return;
 
        /* For now, we need the power well to be always enabled. */
-       intel_set_power_well(dev, true);
-       intel_resume_power_well(dev);
+       intel_display_set_init_power(dev, true);
+       intel_power_domains_resume(dev);
 
        /* We're taking over the BIOS, so clear any requests made by it since
         * the driver is in charge now. */
This page took 0.039261 seconds and 5 git commands to generate.