drm/i915: Fix system resume if PCI device remained enabled
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_drv.c
index 20e82008b8b6c683b9189d25699dc33a054b874a..735df5595b34a3c96473071e5c9fd5b6bc2bc5d5 100644 (file)
@@ -66,6 +66,11 @@ static struct drm_driver driver;
 #define IVB_CURSOR_OFFSETS \
        .cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }
 
+#define BDW_COLORS \
+       .color = { .degamma_lut_size = 512, .gamma_lut_size = 512 }
+#define CHV_COLORS \
+       .color = { .degamma_lut_size = 65, .gamma_lut_size = 257 }
+
 static const struct intel_device_info intel_i830_info = {
        .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
        .has_overlay = 1, .overlay_needs_physical = 1,
@@ -288,24 +293,28 @@ static const struct intel_device_info intel_haswell_m_info = {
        .is_mobile = 1,
 };
 
+#define BDW_FEATURES \
+       HSW_FEATURES, \
+       BDW_COLORS
+
 static const struct intel_device_info intel_broadwell_d_info = {
-       HSW_FEATURES,
+       BDW_FEATURES,
        .gen = 8,
 };
 
 static const struct intel_device_info intel_broadwell_m_info = {
-       HSW_FEATURES,
+       BDW_FEATURES,
        .gen = 8, .is_mobile = 1,
 };
 
 static const struct intel_device_info intel_broadwell_gt3d_info = {
-       HSW_FEATURES,
+       BDW_FEATURES,
        .gen = 8,
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
 };
 
 static const struct intel_device_info intel_broadwell_gt3m_info = {
-       HSW_FEATURES,
+       BDW_FEATURES,
        .gen = 8, .is_mobile = 1,
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
 };
@@ -318,16 +327,17 @@ static const struct intel_device_info intel_cherryview_info = {
        .display_mmio_offset = VLV_DISPLAY_BASE,
        GEN_CHV_PIPEOFFSETS,
        CURSOR_OFFSETS,
+       CHV_COLORS,
 };
 
 static const struct intel_device_info intel_skylake_info = {
-       HSW_FEATURES,
+       BDW_FEATURES,
        .is_skylake = 1,
        .gen = 9,
 };
 
 static const struct intel_device_info intel_skylake_gt3_info = {
-       HSW_FEATURES,
+       BDW_FEATURES,
        .is_skylake = 1,
        .gen = 9,
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
@@ -345,18 +355,17 @@ static const struct intel_device_info intel_broxton_info = {
        .has_fbc = 1,
        GEN_DEFAULT_PIPEOFFSETS,
        IVB_CURSOR_OFFSETS,
+       BDW_COLORS,
 };
 
 static const struct intel_device_info intel_kabylake_info = {
-       HSW_FEATURES,
-       .is_preliminary = 1,
+       BDW_FEATURES,
        .is_kabylake = 1,
        .gen = 9,
 };
 
 static const struct intel_device_info intel_kabylake_gt3_info = {
-       HSW_FEATURES,
-       .is_preliminary = 1,
+       BDW_FEATURES,
        .is_kabylake = 1,
        .gen = 9,
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
@@ -504,6 +513,7 @@ void intel_detect_pch(struct drm_device *dev)
                                WARN_ON(!IS_SKYLAKE(dev) &&
                                        !IS_KABYLAKE(dev));
                        } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
+                                  (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) ||
                                   ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
                                    pch->subsystem_vendor == 0x1af4 &&
                                    pch->subsystem_device == 0x1100)) {
@@ -647,7 +657,8 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
 
        disable_rpm_wakeref_asserts(dev_priv);
 
-       fw_csr = suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload;
+       fw_csr = !IS_BROXTON(dev_priv) &&
+               suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload;
        /*
         * In case of firmware assisted context save/restore don't manually
         * deinit the power domains. This also means the CSR/DMC firmware will
@@ -758,10 +769,10 @@ static int i915_drm_resume(struct drm_device *dev)
                dev_priv->display.hpd_irq_setup(dev);
        spin_unlock_irq(&dev_priv->irq_lock);
 
-       intel_display_resume(dev);
-
        intel_dp_mst_resume(dev);
 
+       intel_display_resume(dev);
+
        /*
         * ... but also need to make sure that hotplug processing
         * doesn't cause havoc. Like in the driver load code we don't
@@ -792,7 +803,7 @@ static int i915_drm_resume(struct drm_device *dev)
 static int i915_drm_resume_early(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       int ret = 0;
+       int ret;
 
        /*
         * We have a resume ordering issue with the snd-hda driver also
@@ -803,6 +814,36 @@ static int i915_drm_resume_early(struct drm_device *dev)
         * FIXME: This should be solved with a special hdmi sink device or
         * similar so that power domains can be employed.
         */
+
+       /*
+        * Note that we need to set the power state explicitly, since we
+        * powered off the device during freeze and the PCI core won't power
+        * it back up for us during thaw. Powering off the device during
+        * freeze is not a hard requirement though, and during the
+        * suspend/resume phases the PCI core makes sure we get here with the
+        * device powered on. So in case we change our freeze logic and keep
+        * the device powered we can also remove the following set power state
+        * call.
+        */
+       ret = pci_set_power_state(dev->pdev, PCI_D0);
+       if (ret) {
+               DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret);
+               goto out;
+       }
+
+       /*
+        * Note that pci_enable_device() first enables any parent bridge
+        * device and only then sets the power state for this device. The
+        * bridge enabling is a nop though, since bridge devices are resumed
+        * first. The order of enabling power and enabling the device is
+        * imposed by the PCI core as described above, so here we preserve the
+        * same order for the freeze/thaw phases.
+        *
+        * TODO: eventually we should remove pci_disable_device() /
+        * pci_enable_enable_device() from suspend/resume. Due to how they
+        * depend on the device enable refcount we can't anyway depend on them
+        * disabling/enabling the device.
+        */
        if (pci_enable_device(dev->pdev)) {
                ret = -EIO;
                goto out;
@@ -827,14 +868,15 @@ static int i915_drm_resume_early(struct drm_device *dev)
 
        intel_uncore_sanitize(dev);
 
-       if (!(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
+       if (IS_BROXTON(dev_priv) ||
+           !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
                intel_power_domains_init_hw(dev_priv, true);
 
+       enable_rpm_wakeref_asserts(dev_priv);
+
 out:
        dev_priv->suspended_to_idle = false;
 
-       enable_rpm_wakeref_asserts(dev_priv);
-
        return ret;
 }
 
@@ -870,23 +912,32 @@ int i915_resume_switcheroo(struct drm_device *dev)
 int i915_reset(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       bool simulated;
+       struct i915_gpu_error *error = &dev_priv->gpu_error;
+       unsigned reset_counter;
        int ret;
 
        intel_reset_gt_powersave(dev);
 
        mutex_lock(&dev->struct_mutex);
 
-       i915_gem_reset(dev);
+       /* Clear any previous failed attempts at recovery. Time to try again. */
+       atomic_andnot(I915_WEDGED, &error->reset_counter);
 
-       simulated = dev_priv->gpu_error.stop_rings != 0;
+       /* Clear the reset-in-progress flag and increment the reset epoch. */
+       reset_counter = atomic_inc_return(&error->reset_counter);
+       if (WARN_ON(__i915_reset_in_progress(reset_counter))) {
+               ret = -EIO;
+               goto error;
+       }
+
+       i915_gem_reset(dev);
 
-       ret = intel_gpu_reset(dev);
+       ret = intel_gpu_reset(dev, ALL_ENGINES);
 
        /* Also reset the gpu hangman. */
-       if (simulated) {
+       if (error->stop_rings != 0) {
                DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
-               dev_priv->gpu_error.stop_rings = 0;
+               error->stop_rings = 0;
                if (ret == -ENODEV) {
                        DRM_INFO("Reset not implemented, but ignoring "
                                 "error for simulated gpu hangs\n");
@@ -898,9 +949,11 @@ int i915_reset(struct drm_device *dev)
                pr_notice("drm/i915: Resetting chip after gpu hang\n");
 
        if (ret) {
-               DRM_ERROR("Failed to reset chip: %i\n", ret);
-               mutex_unlock(&dev->struct_mutex);
-               return ret;
+               if (ret != -ENODEV)
+                       DRM_ERROR("Failed to reset chip: %i\n", ret);
+               else
+                       DRM_DEBUG_DRIVER("GPU reset disabled\n");
+               goto error;
        }
 
        intel_overlay_reset(dev_priv);
@@ -919,20 +972,14 @@ int i915_reset(struct drm_device *dev)
         * was running at the time of the reset (i.e. we weren't VT
         * switched away).
         */
-
-       /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
-       dev_priv->gpu_error.reload_in_reset = true;
-
        ret = i915_gem_init_hw(dev);
-
-       dev_priv->gpu_error.reload_in_reset = false;
-
-       mutex_unlock(&dev->struct_mutex);
        if (ret) {
                DRM_ERROR("Failed hw init on reset %d\n", ret);
-               return ret;
+               goto error;
        }
 
+       mutex_unlock(&dev->struct_mutex);
+
        /*
         * rps/rc6 re-init is necessary to restore state lost after the
         * reset and the re-install of gt irqs. Skip for ironlake per
@@ -943,6 +990,11 @@ int i915_reset(struct drm_device *dev)
                intel_enable_gt_powersave(dev);
 
        return 0;
+
+error:
+       atomic_or(I915_WEDGED, &error->reset_counter);
+       mutex_unlock(&dev->struct_mutex);
+       return ret;
 }
 
 static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -1058,12 +1110,7 @@ static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
 
 static int bxt_suspend_complete(struct drm_i915_private *dev_priv)
 {
-       struct drm_device *dev = dev_priv->dev;
-
-       /* TODO: when DC5 support is added disable DC5 here. */
-
-       broxton_ddi_phy_uninit(dev);
-       broxton_uninit_cdclk(dev);
+       bxt_display_core_uninit(dev_priv);
        bxt_enable_dc9(dev_priv);
 
        return 0;
@@ -1071,18 +1118,8 @@ static int bxt_suspend_complete(struct drm_i915_private *dev_priv)
 
 static int bxt_resume_prepare(struct drm_i915_private *dev_priv)
 {
-       struct drm_device *dev = dev_priv->dev;
-
-       /* TODO: when CSR FW support is added make sure the FW is loaded */
-
        bxt_disable_dc9(dev_priv);
-
-       /*
-        * TODO: when DC5 support is added enable DC5 here if the CSR FW
-        * is available.
-        */
-       broxton_init_cdclk(dev);
-       broxton_ddi_phy_init(dev);
+       bxt_display_core_init(dev_priv, true);
 
        return 0;
 }
@@ -1390,7 +1427,7 @@ static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
        if (err)
                goto err2;
 
-       if (!IS_CHERRYVIEW(dev_priv->dev))
+       if (!IS_CHERRYVIEW(dev_priv))
                vlv_save_gunit_s0ix_state(dev_priv);
 
        err = vlv_force_gfx_clock(dev_priv, false);
@@ -1422,7 +1459,7 @@ static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
         */
        ret = vlv_force_gfx_clock(dev_priv, true);
 
-       if (!IS_CHERRYVIEW(dev_priv->dev))
+       if (!IS_CHERRYVIEW(dev_priv))
                vlv_restore_gunit_s0ix_state(dev_priv);
 
        err = vlv_allow_gt_wake(dev_priv, true);
This page took 0.027471 seconds and 5 git commands to generate.