Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*- |
2 | */ | |
0d6aa60b | 3 | /* |
bc54fd1a | 4 | * |
1da177e4 LT |
5 | * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. |
6 | * All Rights Reserved. | |
bc54fd1a DA |
7 | * |
8 | * Permission is hereby granted, free of charge, to any person obtaining a | |
9 | * copy of this software and associated documentation files (the | |
10 | * "Software"), to deal in the Software without restriction, including | |
11 | * without limitation the rights to use, copy, modify, merge, publish, | |
12 | * distribute, sub license, and/or sell copies of the Software, and to | |
13 | * permit persons to whom the Software is furnished to do so, subject to | |
14 | * the following conditions: | |
15 | * | |
16 | * The above copyright notice and this permission notice (including the | |
17 | * next paragraph) shall be included in all copies or substantial portions | |
18 | * of the Software. | |
19 | * | |
20 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS | |
21 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
22 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. | |
23 | * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR | |
24 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, | |
25 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE | |
26 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | |
27 | * | |
0d6aa60b | 28 | */ |
1da177e4 | 29 | |
e5747e3a | 30 | #include <linux/acpi.h> |
0673ad47 CW |
31 | #include <linux/device.h> |
32 | #include <linux/oom.h> | |
e0cd3608 | 33 | #include <linux/module.h> |
0673ad47 CW |
34 | #include <linux/pci.h> |
35 | #include <linux/pm.h> | |
d6102977 | 36 | #include <linux/pm_runtime.h> |
0673ad47 CW |
37 | #include <linux/pnp.h> |
38 | #include <linux/slab.h> | |
39 | #include <linux/vgaarb.h> | |
704ab614 | 40 | #include <linux/vga_switcheroo.h> |
0673ad47 CW |
41 | #include <linux/vt.h> |
42 | #include <acpi/video.h> | |
43 | ||
44 | #include <drm/drmP.h> | |
760285e7 | 45 | #include <drm/drm_crtc_helper.h> |
0673ad47 CW |
46 | #include <drm/i915_drm.h> |
47 | ||
48 | #include "i915_drv.h" | |
49 | #include "i915_trace.h" | |
50 | #include "i915_vgpu.h" | |
51 | #include "intel_drv.h" | |
79e53945 | 52 | |
112b715e KH |
53 | static struct drm_driver driver; |
54 | ||
0673ad47 CW |
55 | static unsigned int i915_load_fail_count; |
56 | ||
57 | bool __i915_inject_load_failure(const char *func, int line) | |
58 | { | |
59 | if (i915_load_fail_count >= i915.inject_load_failure) | |
60 | return false; | |
61 | ||
62 | if (++i915_load_fail_count == i915.inject_load_failure) { | |
63 | DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n", | |
64 | i915.inject_load_failure, func, line); | |
65 | return true; | |
66 | } | |
67 | ||
68 | return false; | |
69 | } | |
70 | ||
71 | #define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI" | |
72 | #define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \ | |
73 | "providing the dmesg log by booting with drm.debug=0xf" | |
74 | ||
75 | void | |
76 | __i915_printk(struct drm_i915_private *dev_priv, const char *level, | |
77 | const char *fmt, ...) | |
78 | { | |
79 | static bool shown_bug_once; | |
c49d13ee | 80 | struct device *kdev = dev_priv->drm.dev; |
0673ad47 CW |
81 | bool is_error = level[1] <= KERN_ERR[1]; |
82 | bool is_debug = level[1] == KERN_DEBUG[1]; | |
83 | struct va_format vaf; | |
84 | va_list args; | |
85 | ||
86 | if (is_debug && !(drm_debug & DRM_UT_DRIVER)) | |
87 | return; | |
88 | ||
89 | va_start(args, fmt); | |
90 | ||
91 | vaf.fmt = fmt; | |
92 | vaf.va = &args; | |
93 | ||
c49d13ee | 94 | dev_printk(level, kdev, "[" DRM_NAME ":%ps] %pV", |
0673ad47 CW |
95 | __builtin_return_address(0), &vaf); |
96 | ||
97 | if (is_error && !shown_bug_once) { | |
c49d13ee | 98 | dev_notice(kdev, "%s", FDO_BUG_MSG); |
0673ad47 CW |
99 | shown_bug_once = true; |
100 | } | |
101 | ||
102 | va_end(args); | |
103 | } | |
104 | ||
105 | static bool i915_error_injected(struct drm_i915_private *dev_priv) | |
106 | { | |
107 | return i915.inject_load_failure && | |
108 | i915_load_fail_count == i915.inject_load_failure; | |
109 | } | |
110 | ||
111 | #define i915_load_error(dev_priv, fmt, ...) \ | |
112 | __i915_printk(dev_priv, \ | |
113 | i915_error_injected(dev_priv) ? KERN_DEBUG : KERN_ERR, \ | |
114 | fmt, ##__VA_ARGS__) | |
115 | ||
116 | ||
117 | static enum intel_pch intel_virt_detect_pch(struct drm_device *dev) | |
118 | { | |
119 | enum intel_pch ret = PCH_NOP; | |
120 | ||
121 | /* | |
122 | * In a virtualized passthrough environment we can be in a | |
123 | * setup where the ISA bridge is not able to be passed through. | |
124 | * In this case, a south bridge can be emulated and we have to | |
125 | * make an educated guess as to which PCH is really there. | |
126 | */ | |
127 | ||
128 | if (IS_GEN5(dev)) { | |
129 | ret = PCH_IBX; | |
130 | DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n"); | |
131 | } else if (IS_GEN6(dev) || IS_IVYBRIDGE(dev)) { | |
132 | ret = PCH_CPT; | |
133 | DRM_DEBUG_KMS("Assuming CouarPoint PCH\n"); | |
134 | } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { | |
135 | ret = PCH_LPT; | |
136 | DRM_DEBUG_KMS("Assuming LynxPoint PCH\n"); | |
137 | } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { | |
138 | ret = PCH_SPT; | |
139 | DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n"); | |
140 | } | |
141 | ||
142 | return ret; | |
143 | } | |
144 | ||
145 | static void intel_detect_pch(struct drm_device *dev) | |
146 | { | |
fac5e23e | 147 | struct drm_i915_private *dev_priv = to_i915(dev); |
0673ad47 CW |
148 | struct pci_dev *pch = NULL; |
149 | ||
150 | /* In all current cases, num_pipes is equivalent to the PCH_NOP setting | |
151 | * (which really amounts to a PCH but no South Display). | |
152 | */ | |
153 | if (INTEL_INFO(dev)->num_pipes == 0) { | |
154 | dev_priv->pch_type = PCH_NOP; | |
155 | return; | |
156 | } | |
157 | ||
158 | /* | |
159 | * The reason to probe ISA bridge instead of Dev31:Fun0 is to | |
160 | * make graphics device passthrough work easy for VMM, that only | |
161 | * need to expose ISA bridge to let driver know the real hardware | |
162 | * underneath. This is a requirement from virtualization team. | |
163 | * | |
164 | * In some virtualized environments (e.g. XEN), there is irrelevant | |
165 | * ISA bridge in the system. To work reliably, we should scan trhough | |
166 | * all the ISA bridge devices and check for the first match, instead | |
167 | * of only checking the first one. | |
168 | */ | |
169 | while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) { | |
170 | if (pch->vendor == PCI_VENDOR_ID_INTEL) { | |
171 | unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK; | |
172 | dev_priv->pch_id = id; | |
173 | ||
174 | if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) { | |
175 | dev_priv->pch_type = PCH_IBX; | |
176 | DRM_DEBUG_KMS("Found Ibex Peak PCH\n"); | |
177 | WARN_ON(!IS_GEN5(dev)); | |
178 | } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) { | |
179 | dev_priv->pch_type = PCH_CPT; | |
180 | DRM_DEBUG_KMS("Found CougarPoint PCH\n"); | |
181 | WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev))); | |
182 | } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) { | |
183 | /* PantherPoint is CPT compatible */ | |
184 | dev_priv->pch_type = PCH_CPT; | |
185 | DRM_DEBUG_KMS("Found PantherPoint PCH\n"); | |
186 | WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev))); | |
187 | } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { | |
188 | dev_priv->pch_type = PCH_LPT; | |
189 | DRM_DEBUG_KMS("Found LynxPoint PCH\n"); | |
190 | WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev)); | |
191 | WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev)); | |
192 | } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { | |
193 | dev_priv->pch_type = PCH_LPT; | |
194 | DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); | |
195 | WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev)); | |
196 | WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev)); | |
197 | } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) { | |
198 | dev_priv->pch_type = PCH_SPT; | |
199 | DRM_DEBUG_KMS("Found SunrisePoint PCH\n"); | |
200 | WARN_ON(!IS_SKYLAKE(dev) && | |
201 | !IS_KABYLAKE(dev)); | |
202 | } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) { | |
203 | dev_priv->pch_type = PCH_SPT; | |
204 | DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n"); | |
205 | WARN_ON(!IS_SKYLAKE(dev) && | |
206 | !IS_KABYLAKE(dev)); | |
22dea0be RV |
207 | } else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) { |
208 | dev_priv->pch_type = PCH_KBP; | |
209 | DRM_DEBUG_KMS("Found KabyPoint PCH\n"); | |
210 | WARN_ON(!IS_KABYLAKE(dev)); | |
0673ad47 CW |
211 | } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) || |
212 | (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) || | |
213 | ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) && | |
214 | pch->subsystem_vendor == | |
215 | PCI_SUBVENDOR_ID_REDHAT_QUMRANET && | |
216 | pch->subsystem_device == | |
217 | PCI_SUBDEVICE_ID_QEMU)) { | |
218 | dev_priv->pch_type = intel_virt_detect_pch(dev); | |
219 | } else | |
220 | continue; | |
221 | ||
222 | break; | |
223 | } | |
224 | } | |
225 | if (!pch) | |
226 | DRM_DEBUG_KMS("No PCH found.\n"); | |
227 | ||
228 | pci_dev_put(pch); | |
229 | } | |
230 | ||
0673ad47 CW |
231 | static int i915_getparam(struct drm_device *dev, void *data, |
232 | struct drm_file *file_priv) | |
233 | { | |
fac5e23e | 234 | struct drm_i915_private *dev_priv = to_i915(dev); |
52a05c30 | 235 | struct pci_dev *pdev = dev_priv->drm.pdev; |
0673ad47 CW |
236 | drm_i915_getparam_t *param = data; |
237 | int value; | |
238 | ||
239 | switch (param->param) { | |
240 | case I915_PARAM_IRQ_ACTIVE: | |
241 | case I915_PARAM_ALLOW_BATCHBUFFER: | |
242 | case I915_PARAM_LAST_DISPATCH: | |
243 | /* Reject all old ums/dri params. */ | |
244 | return -ENODEV; | |
245 | case I915_PARAM_CHIPSET_ID: | |
52a05c30 | 246 | value = pdev->device; |
0673ad47 CW |
247 | break; |
248 | case I915_PARAM_REVISION: | |
52a05c30 | 249 | value = pdev->revision; |
0673ad47 | 250 | break; |
0673ad47 CW |
251 | case I915_PARAM_NUM_FENCES_AVAIL: |
252 | value = dev_priv->num_fence_regs; | |
253 | break; | |
254 | case I915_PARAM_HAS_OVERLAY: | |
255 | value = dev_priv->overlay ? 1 : 0; | |
256 | break; | |
0673ad47 CW |
257 | case I915_PARAM_HAS_BSD: |
258 | value = intel_engine_initialized(&dev_priv->engine[VCS]); | |
259 | break; | |
260 | case I915_PARAM_HAS_BLT: | |
261 | value = intel_engine_initialized(&dev_priv->engine[BCS]); | |
262 | break; | |
263 | case I915_PARAM_HAS_VEBOX: | |
264 | value = intel_engine_initialized(&dev_priv->engine[VECS]); | |
265 | break; | |
266 | case I915_PARAM_HAS_BSD2: | |
267 | value = intel_engine_initialized(&dev_priv->engine[VCS2]); | |
268 | break; | |
0673ad47 | 269 | case I915_PARAM_HAS_EXEC_CONSTANTS: |
16162470 | 270 | value = INTEL_GEN(dev_priv) >= 4; |
0673ad47 CW |
271 | break; |
272 | case I915_PARAM_HAS_LLC: | |
16162470 | 273 | value = HAS_LLC(dev_priv); |
0673ad47 CW |
274 | break; |
275 | case I915_PARAM_HAS_WT: | |
16162470 | 276 | value = HAS_WT(dev_priv); |
0673ad47 CW |
277 | break; |
278 | case I915_PARAM_HAS_ALIASING_PPGTT: | |
16162470 | 279 | value = USES_PPGTT(dev_priv); |
0673ad47 CW |
280 | break; |
281 | case I915_PARAM_HAS_SEMAPHORES: | |
39df9190 | 282 | value = i915.semaphores; |
0673ad47 | 283 | break; |
0673ad47 CW |
284 | case I915_PARAM_HAS_SECURE_BATCHES: |
285 | value = capable(CAP_SYS_ADMIN); | |
286 | break; | |
0673ad47 CW |
287 | case I915_PARAM_CMD_PARSER_VERSION: |
288 | value = i915_cmd_parser_get_version(dev_priv); | |
289 | break; | |
0673ad47 | 290 | case I915_PARAM_SUBSLICE_TOTAL: |
16162470 | 291 | value = INTEL_INFO(dev_priv)->subslice_total; |
0673ad47 CW |
292 | if (!value) |
293 | return -ENODEV; | |
294 | break; | |
295 | case I915_PARAM_EU_TOTAL: | |
16162470 | 296 | value = INTEL_INFO(dev_priv)->eu_total; |
0673ad47 CW |
297 | if (!value) |
298 | return -ENODEV; | |
299 | break; | |
300 | case I915_PARAM_HAS_GPU_RESET: | |
301 | value = i915.enable_hangcheck && intel_has_gpu_reset(dev_priv); | |
302 | break; | |
303 | case I915_PARAM_HAS_RESOURCE_STREAMER: | |
16162470 | 304 | value = HAS_RESOURCE_STREAMER(dev_priv); |
0673ad47 | 305 | break; |
37f501af | 306 | case I915_PARAM_HAS_POOLED_EU: |
16162470 | 307 | value = HAS_POOLED_EU(dev_priv); |
37f501af | 308 | break; |
309 | case I915_PARAM_MIN_EU_IN_POOL: | |
16162470 | 310 | value = INTEL_INFO(dev_priv)->min_eu_in_pool; |
37f501af | 311 | break; |
4cc69075 CW |
312 | case I915_PARAM_MMAP_GTT_VERSION: |
313 | /* Though we've started our numbering from 1, and so class all | |
314 | * earlier versions as 0, in effect their value is undefined as | |
315 | * the ioctl will report EINVAL for the unknown param! | |
316 | */ | |
317 | value = i915_gem_mmap_gtt_version(); | |
318 | break; | |
16162470 DW |
319 | case I915_PARAM_MMAP_VERSION: |
320 | /* Remember to bump this if the version changes! */ | |
321 | case I915_PARAM_HAS_GEM: | |
322 | case I915_PARAM_HAS_PAGEFLIPPING: | |
323 | case I915_PARAM_HAS_EXECBUF2: /* depends on GEM */ | |
324 | case I915_PARAM_HAS_RELAXED_FENCING: | |
325 | case I915_PARAM_HAS_COHERENT_RINGS: | |
326 | case I915_PARAM_HAS_RELAXED_DELTA: | |
327 | case I915_PARAM_HAS_GEN7_SOL_RESET: | |
328 | case I915_PARAM_HAS_WAIT_TIMEOUT: | |
329 | case I915_PARAM_HAS_PRIME_VMAP_FLUSH: | |
330 | case I915_PARAM_HAS_PINNED_BATCHES: | |
331 | case I915_PARAM_HAS_EXEC_NO_RELOC: | |
332 | case I915_PARAM_HAS_EXEC_HANDLE_LUT: | |
333 | case I915_PARAM_HAS_COHERENT_PHYS_GTT: | |
334 | case I915_PARAM_HAS_EXEC_SOFTPIN: | |
335 | /* For the time being all of these are always true; | |
336 | * if some supported hardware does not have one of these | |
337 | * features this value needs to be provided from | |
338 | * INTEL_INFO(), a feature macro, or similar. | |
339 | */ | |
340 | value = 1; | |
341 | break; | |
0673ad47 CW |
342 | default: |
343 | DRM_DEBUG("Unknown parameter %d\n", param->param); | |
344 | return -EINVAL; | |
345 | } | |
346 | ||
dda33009 | 347 | if (put_user(value, param->value)) |
0673ad47 | 348 | return -EFAULT; |
0673ad47 CW |
349 | |
350 | return 0; | |
351 | } | |
352 | ||
353 | static int i915_get_bridge_dev(struct drm_device *dev) | |
354 | { | |
fac5e23e | 355 | struct drm_i915_private *dev_priv = to_i915(dev); |
0673ad47 CW |
356 | |
357 | dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0)); | |
358 | if (!dev_priv->bridge_dev) { | |
359 | DRM_ERROR("bridge device not found\n"); | |
360 | return -1; | |
361 | } | |
362 | return 0; | |
363 | } | |
364 | ||
365 | /* Allocate space for the MCH regs if needed, return nonzero on error */ | |
366 | static int | |
367 | intel_alloc_mchbar_resource(struct drm_device *dev) | |
368 | { | |
fac5e23e | 369 | struct drm_i915_private *dev_priv = to_i915(dev); |
0673ad47 CW |
370 | int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; |
371 | u32 temp_lo, temp_hi = 0; | |
372 | u64 mchbar_addr; | |
373 | int ret; | |
374 | ||
375 | if (INTEL_INFO(dev)->gen >= 4) | |
376 | pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi); | |
377 | pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo); | |
378 | mchbar_addr = ((u64)temp_hi << 32) | temp_lo; | |
379 | ||
380 | /* If ACPI doesn't have it, assume we need to allocate it ourselves */ | |
381 | #ifdef CONFIG_PNP | |
382 | if (mchbar_addr && | |
383 | pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) | |
384 | return 0; | |
385 | #endif | |
386 | ||
387 | /* Get some space for it */ | |
388 | dev_priv->mch_res.name = "i915 MCHBAR"; | |
389 | dev_priv->mch_res.flags = IORESOURCE_MEM; | |
390 | ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, | |
391 | &dev_priv->mch_res, | |
392 | MCHBAR_SIZE, MCHBAR_SIZE, | |
393 | PCIBIOS_MIN_MEM, | |
394 | 0, pcibios_align_resource, | |
395 | dev_priv->bridge_dev); | |
396 | if (ret) { | |
397 | DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret); | |
398 | dev_priv->mch_res.start = 0; | |
399 | return ret; | |
400 | } | |
401 | ||
402 | if (INTEL_INFO(dev)->gen >= 4) | |
403 | pci_write_config_dword(dev_priv->bridge_dev, reg + 4, | |
404 | upper_32_bits(dev_priv->mch_res.start)); | |
405 | ||
406 | pci_write_config_dword(dev_priv->bridge_dev, reg, | |
407 | lower_32_bits(dev_priv->mch_res.start)); | |
408 | return 0; | |
409 | } | |
410 | ||
411 | /* Setup MCHBAR if possible, return true if we should disable it again */ | |
412 | static void | |
413 | intel_setup_mchbar(struct drm_device *dev) | |
414 | { | |
fac5e23e | 415 | struct drm_i915_private *dev_priv = to_i915(dev); |
0673ad47 CW |
416 | int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; |
417 | u32 temp; | |
418 | bool enabled; | |
419 | ||
420 | if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) | |
421 | return; | |
422 | ||
423 | dev_priv->mchbar_need_disable = false; | |
424 | ||
425 | if (IS_I915G(dev) || IS_I915GM(dev)) { | |
426 | pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp); | |
427 | enabled = !!(temp & DEVEN_MCHBAR_EN); | |
428 | } else { | |
429 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); | |
430 | enabled = temp & 1; | |
431 | } | |
432 | ||
433 | /* If it's already enabled, don't have to do anything */ | |
434 | if (enabled) | |
435 | return; | |
436 | ||
437 | if (intel_alloc_mchbar_resource(dev)) | |
438 | return; | |
439 | ||
440 | dev_priv->mchbar_need_disable = true; | |
441 | ||
442 | /* Space is allocated or reserved, so enable it. */ | |
443 | if (IS_I915G(dev) || IS_I915GM(dev)) { | |
444 | pci_write_config_dword(dev_priv->bridge_dev, DEVEN, | |
445 | temp | DEVEN_MCHBAR_EN); | |
446 | } else { | |
447 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); | |
448 | pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1); | |
449 | } | |
450 | } | |
451 | ||
452 | static void | |
453 | intel_teardown_mchbar(struct drm_device *dev) | |
454 | { | |
fac5e23e | 455 | struct drm_i915_private *dev_priv = to_i915(dev); |
0673ad47 CW |
456 | int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; |
457 | ||
458 | if (dev_priv->mchbar_need_disable) { | |
459 | if (IS_I915G(dev) || IS_I915GM(dev)) { | |
460 | u32 deven_val; | |
461 | ||
462 | pci_read_config_dword(dev_priv->bridge_dev, DEVEN, | |
463 | &deven_val); | |
464 | deven_val &= ~DEVEN_MCHBAR_EN; | |
465 | pci_write_config_dword(dev_priv->bridge_dev, DEVEN, | |
466 | deven_val); | |
467 | } else { | |
468 | u32 mchbar_val; | |
469 | ||
470 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, | |
471 | &mchbar_val); | |
472 | mchbar_val &= ~1; | |
473 | pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, | |
474 | mchbar_val); | |
475 | } | |
476 | } | |
477 | ||
478 | if (dev_priv->mch_res.start) | |
479 | release_resource(&dev_priv->mch_res); | |
480 | } | |
481 | ||
482 | /* true = enable decode, false = disable decoder */ | |
483 | static unsigned int i915_vga_set_decode(void *cookie, bool state) | |
484 | { | |
485 | struct drm_device *dev = cookie; | |
486 | ||
487 | intel_modeset_vga_set_state(dev, state); | |
488 | if (state) | |
489 | return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | | |
490 | VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; | |
491 | else | |
492 | return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; | |
493 | } | |
494 | ||
495 | static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) | |
496 | { | |
497 | struct drm_device *dev = pci_get_drvdata(pdev); | |
498 | pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; | |
499 | ||
500 | if (state == VGA_SWITCHEROO_ON) { | |
501 | pr_info("switched on\n"); | |
502 | dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; | |
503 | /* i915 resume handler doesn't set to D0 */ | |
52a05c30 | 504 | pci_set_power_state(pdev, PCI_D0); |
0673ad47 CW |
505 | i915_resume_switcheroo(dev); |
506 | dev->switch_power_state = DRM_SWITCH_POWER_ON; | |
507 | } else { | |
508 | pr_info("switched off\n"); | |
509 | dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; | |
510 | i915_suspend_switcheroo(dev, pmm); | |
511 | dev->switch_power_state = DRM_SWITCH_POWER_OFF; | |
512 | } | |
513 | } | |
514 | ||
515 | static bool i915_switcheroo_can_switch(struct pci_dev *pdev) | |
516 | { | |
517 | struct drm_device *dev = pci_get_drvdata(pdev); | |
518 | ||
519 | /* | |
520 | * FIXME: open_count is protected by drm_global_mutex but that would lead to | |
521 | * locking inversion with the driver load path. And the access here is | |
522 | * completely racy anyway. So don't bother with locking for now. | |
523 | */ | |
524 | return dev->open_count == 0; | |
525 | } | |
526 | ||
527 | static const struct vga_switcheroo_client_ops i915_switcheroo_ops = { | |
528 | .set_gpu_state = i915_switcheroo_set_state, | |
529 | .reprobe = NULL, | |
530 | .can_switch = i915_switcheroo_can_switch, | |
531 | }; | |
532 | ||
533 | static void i915_gem_fini(struct drm_device *dev) | |
534 | { | |
535 | struct drm_i915_private *dev_priv = to_i915(dev); | |
536 | ||
537 | /* | |
538 | * Neither the BIOS, ourselves or any other kernel | |
539 | * expects the system to be in execlists mode on startup, | |
540 | * so we need to reset the GPU back to legacy mode. And the only | |
541 | * known way to disable logical contexts is through a GPU reset. | |
542 | * | |
543 | * So in order to leave the system in a known default configuration, | |
544 | * always reset the GPU upon unload. Afterwards we then clean up the | |
545 | * GEM state tracking, flushing off the requests and leaving the | |
546 | * system in a known idle state. | |
547 | * | |
548 | * Note that is of the upmost importance that the GPU is idle and | |
549 | * all stray writes are flushed *before* we dismantle the backing | |
550 | * storage for the pinned objects. | |
551 | * | |
552 | * However, since we are uncertain that reseting the GPU on older | |
553 | * machines is a good idea, we don't - just in case it leaves the | |
554 | * machine in an unusable condition. | |
555 | */ | |
556 | if (HAS_HW_CONTEXTS(dev)) { | |
557 | int reset = intel_gpu_reset(dev_priv, ALL_ENGINES); | |
558 | WARN_ON(reset && reset != -ENODEV); | |
559 | } | |
560 | ||
561 | mutex_lock(&dev->struct_mutex); | |
562 | i915_gem_reset(dev); | |
563 | i915_gem_cleanup_engines(dev); | |
564 | i915_gem_context_fini(dev); | |
565 | mutex_unlock(&dev->struct_mutex); | |
566 | ||
567 | WARN_ON(!list_empty(&to_i915(dev)->context_list)); | |
568 | } | |
569 | ||
570 | static int i915_load_modeset_init(struct drm_device *dev) | |
571 | { | |
fac5e23e | 572 | struct drm_i915_private *dev_priv = to_i915(dev); |
52a05c30 | 573 | struct pci_dev *pdev = dev_priv->drm.pdev; |
0673ad47 CW |
574 | int ret; |
575 | ||
576 | if (i915_inject_load_failure()) | |
577 | return -ENODEV; | |
578 | ||
579 | ret = intel_bios_init(dev_priv); | |
580 | if (ret) | |
581 | DRM_INFO("failed to find VBIOS tables\n"); | |
582 | ||
583 | /* If we have > 1 VGA cards, then we need to arbitrate access | |
584 | * to the common VGA resources. | |
585 | * | |
586 | * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA), | |
587 | * then we do not take part in VGA arbitration and the | |
588 | * vga_client_register() fails with -ENODEV. | |
589 | */ | |
52a05c30 | 590 | ret = vga_client_register(pdev, dev, NULL, i915_vga_set_decode); |
0673ad47 CW |
591 | if (ret && ret != -ENODEV) |
592 | goto out; | |
593 | ||
594 | intel_register_dsm_handler(); | |
595 | ||
52a05c30 | 596 | ret = vga_switcheroo_register_client(pdev, &i915_switcheroo_ops, false); |
0673ad47 CW |
597 | if (ret) |
598 | goto cleanup_vga_client; | |
599 | ||
600 | /* must happen before intel_power_domains_init_hw() on VLV/CHV */ | |
601 | intel_update_rawclk(dev_priv); | |
602 | ||
603 | intel_power_domains_init_hw(dev_priv, false); | |
604 | ||
605 | intel_csr_ucode_init(dev_priv); | |
606 | ||
607 | ret = intel_irq_install(dev_priv); | |
608 | if (ret) | |
609 | goto cleanup_csr; | |
610 | ||
611 | intel_setup_gmbus(dev); | |
612 | ||
613 | /* Important: The output setup functions called by modeset_init need | |
614 | * working irqs for e.g. gmbus and dp aux transfers. */ | |
615 | intel_modeset_init(dev); | |
616 | ||
617 | intel_guc_init(dev); | |
618 | ||
619 | ret = i915_gem_init(dev); | |
620 | if (ret) | |
621 | goto cleanup_irq; | |
622 | ||
623 | intel_modeset_gem_init(dev); | |
624 | ||
625 | if (INTEL_INFO(dev)->num_pipes == 0) | |
626 | return 0; | |
627 | ||
628 | ret = intel_fbdev_init(dev); | |
629 | if (ret) | |
630 | goto cleanup_gem; | |
631 | ||
632 | /* Only enable hotplug handling once the fbdev is fully set up. */ | |
633 | intel_hpd_init(dev_priv); | |
634 | ||
635 | drm_kms_helper_poll_init(dev); | |
636 | ||
637 | return 0; | |
638 | ||
639 | cleanup_gem: | |
640 | i915_gem_fini(dev); | |
641 | cleanup_irq: | |
642 | intel_guc_fini(dev); | |
643 | drm_irq_uninstall(dev); | |
644 | intel_teardown_gmbus(dev); | |
645 | cleanup_csr: | |
646 | intel_csr_ucode_fini(dev_priv); | |
647 | intel_power_domains_fini(dev_priv); | |
52a05c30 | 648 | vga_switcheroo_unregister_client(pdev); |
0673ad47 | 649 | cleanup_vga_client: |
52a05c30 | 650 | vga_client_register(pdev, NULL, NULL, NULL); |
0673ad47 CW |
651 | out: |
652 | return ret; | |
653 | } | |
654 | ||
655 | #if IS_ENABLED(CONFIG_FB) | |
656 | static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) | |
657 | { | |
658 | struct apertures_struct *ap; | |
91c8a326 | 659 | struct pci_dev *pdev = dev_priv->drm.pdev; |
0673ad47 CW |
660 | struct i915_ggtt *ggtt = &dev_priv->ggtt; |
661 | bool primary; | |
662 | int ret; | |
663 | ||
664 | ap = alloc_apertures(1); | |
665 | if (!ap) | |
666 | return -ENOMEM; | |
667 | ||
668 | ap->ranges[0].base = ggtt->mappable_base; | |
669 | ap->ranges[0].size = ggtt->mappable_end; | |
670 | ||
671 | primary = | |
672 | pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; | |
673 | ||
44adece5 | 674 | ret = drm_fb_helper_remove_conflicting_framebuffers(ap, "inteldrmfb", primary); |
0673ad47 CW |
675 | |
676 | kfree(ap); | |
677 | ||
678 | return ret; | |
679 | } | |
680 | #else | |
681 | static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) | |
682 | { | |
683 | return 0; | |
684 | } | |
685 | #endif | |
686 | ||
687 | #if !defined(CONFIG_VGA_CONSOLE) | |
688 | static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) | |
689 | { | |
690 | return 0; | |
691 | } | |
692 | #elif !defined(CONFIG_DUMMY_CONSOLE) | |
693 | static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) | |
694 | { | |
695 | return -ENODEV; | |
696 | } | |
697 | #else | |
698 | static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) | |
699 | { | |
700 | int ret = 0; | |
701 | ||
702 | DRM_INFO("Replacing VGA console driver\n"); | |
703 | ||
704 | console_lock(); | |
705 | if (con_is_bound(&vga_con)) | |
706 | ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1); | |
707 | if (ret == 0) { | |
708 | ret = do_unregister_con_driver(&vga_con); | |
709 | ||
710 | /* Ignore "already unregistered". */ | |
711 | if (ret == -ENODEV) | |
712 | ret = 0; | |
713 | } | |
714 | console_unlock(); | |
715 | ||
716 | return ret; | |
717 | } | |
718 | #endif | |
719 | ||
0673ad47 CW |
720 | static void intel_init_dpio(struct drm_i915_private *dev_priv) |
721 | { | |
722 | /* | |
723 | * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C), | |
724 | * CHV x1 PHY (DP/HDMI D) | |
725 | * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C) | |
726 | */ | |
727 | if (IS_CHERRYVIEW(dev_priv)) { | |
728 | DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2; | |
729 | DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO; | |
730 | } else if (IS_VALLEYVIEW(dev_priv)) { | |
731 | DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO; | |
732 | } | |
733 | } | |
734 | ||
735 | static int i915_workqueues_init(struct drm_i915_private *dev_priv) | |
736 | { | |
737 | /* | |
738 | * The i915 workqueue is primarily used for batched retirement of | |
739 | * requests (and thus managing bo) once the task has been completed | |
740 | * by the GPU. i915_gem_retire_requests() is called directly when we | |
741 | * need high-priority retirement, such as waiting for an explicit | |
742 | * bo. | |
743 | * | |
744 | * It is also used for periodic low-priority events, such as | |
745 | * idle-timers and recording error state. | |
746 | * | |
747 | * All tasks on the workqueue are expected to acquire the dev mutex | |
748 | * so there is no point in running more than one instance of the | |
749 | * workqueue at any time. Use an ordered one. | |
750 | */ | |
751 | dev_priv->wq = alloc_ordered_workqueue("i915", 0); | |
752 | if (dev_priv->wq == NULL) | |
753 | goto out_err; | |
754 | ||
755 | dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0); | |
756 | if (dev_priv->hotplug.dp_wq == NULL) | |
757 | goto out_free_wq; | |
758 | ||
0673ad47 CW |
759 | return 0; |
760 | ||
0673ad47 CW |
761 | out_free_wq: |
762 | destroy_workqueue(dev_priv->wq); | |
763 | out_err: | |
764 | DRM_ERROR("Failed to allocate workqueues.\n"); | |
765 | ||
766 | return -ENOMEM; | |
767 | } | |
768 | ||
769 | static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv) | |
770 | { | |
0673ad47 CW |
771 | destroy_workqueue(dev_priv->hotplug.dp_wq); |
772 | destroy_workqueue(dev_priv->wq); | |
773 | } | |
774 | ||
775 | /** | |
776 | * i915_driver_init_early - setup state not requiring device access | |
777 | * @dev_priv: device private | |
778 | * | |
779 | * Initialize everything that is a "SW-only" state, that is state not | |
780 | * requiring accessing the device or exposing the driver via kernel internal | |
781 | * or userspace interfaces. Example steps belonging here: lock initialization, | |
782 | * system memory allocation, setting up device specific attributes and | |
783 | * function hooks not requiring accessing the device. | |
784 | */ | |
785 | static int i915_driver_init_early(struct drm_i915_private *dev_priv, | |
786 | const struct pci_device_id *ent) | |
787 | { | |
788 | const struct intel_device_info *match_info = | |
789 | (struct intel_device_info *)ent->driver_data; | |
790 | struct intel_device_info *device_info; | |
791 | int ret = 0; | |
792 | ||
793 | if (i915_inject_load_failure()) | |
794 | return -ENODEV; | |
795 | ||
796 | /* Setup the write-once "constant" device info */ | |
94b4f3ba | 797 | device_info = mkwrite_device_info(dev_priv); |
0673ad47 CW |
798 | memcpy(device_info, match_info, sizeof(*device_info)); |
799 | device_info->device_id = dev_priv->drm.pdev->device; | |
800 | ||
801 | BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE); | |
802 | device_info->gen_mask = BIT(device_info->gen - 1); | |
803 | ||
804 | spin_lock_init(&dev_priv->irq_lock); | |
805 | spin_lock_init(&dev_priv->gpu_error.lock); | |
806 | mutex_init(&dev_priv->backlight_lock); | |
807 | spin_lock_init(&dev_priv->uncore.lock); | |
808 | spin_lock_init(&dev_priv->mm.object_stat_lock); | |
809 | spin_lock_init(&dev_priv->mmio_flip_lock); | |
810 | mutex_init(&dev_priv->sb_lock); | |
811 | mutex_init(&dev_priv->modeset_restore_lock); | |
812 | mutex_init(&dev_priv->av_mutex); | |
813 | mutex_init(&dev_priv->wm.wm_mutex); | |
814 | mutex_init(&dev_priv->pps_mutex); | |
815 | ||
0b1de5d5 CW |
816 | i915_memcpy_init_early(dev_priv); |
817 | ||
0673ad47 CW |
818 | ret = i915_workqueues_init(dev_priv); |
819 | if (ret < 0) | |
820 | return ret; | |
821 | ||
822 | ret = intel_gvt_init(dev_priv); | |
823 | if (ret < 0) | |
824 | goto err_workqueues; | |
825 | ||
826 | /* This must be called before any calls to HAS_PCH_* */ | |
827 | intel_detect_pch(&dev_priv->drm); | |
828 | ||
829 | intel_pm_setup(&dev_priv->drm); | |
830 | intel_init_dpio(dev_priv); | |
831 | intel_power_domains_init(dev_priv); | |
832 | intel_irq_init(dev_priv); | |
833 | intel_init_display_hooks(dev_priv); | |
834 | intel_init_clock_gating_hooks(dev_priv); | |
835 | intel_init_audio_hooks(dev_priv); | |
836 | i915_gem_load_init(&dev_priv->drm); | |
837 | ||
36cdd013 | 838 | intel_display_crc_init(dev_priv); |
0673ad47 | 839 | |
94b4f3ba | 840 | intel_device_info_dump(dev_priv); |
0673ad47 CW |
841 | |
842 | /* Not all pre-production machines fall into this category, only the | |
843 | * very first ones. Almost everything should work, except for maybe | |
844 | * suspend/resume. And we don't implement workarounds that affect only | |
845 | * pre-production machines. */ | |
846 | if (IS_HSW_EARLY_SDV(dev_priv)) | |
847 | DRM_INFO("This is an early pre-production Haswell machine. " | |
848 | "It may not be fully functional.\n"); | |
849 | ||
850 | return 0; | |
851 | ||
852 | err_workqueues: | |
853 | i915_workqueues_cleanup(dev_priv); | |
854 | return ret; | |
855 | } | |
856 | ||
857 | /** | |
858 | * i915_driver_cleanup_early - cleanup the setup done in i915_driver_init_early() | |
859 | * @dev_priv: device private | |
860 | */ | |
861 | static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv) | |
862 | { | |
91c8a326 | 863 | i915_gem_load_cleanup(&dev_priv->drm); |
0673ad47 CW |
864 | i915_workqueues_cleanup(dev_priv); |
865 | } | |
866 | ||
867 | static int i915_mmio_setup(struct drm_device *dev) | |
868 | { | |
869 | struct drm_i915_private *dev_priv = to_i915(dev); | |
52a05c30 | 870 | struct pci_dev *pdev = dev_priv->drm.pdev; |
0673ad47 CW |
871 | int mmio_bar; |
872 | int mmio_size; | |
873 | ||
874 | mmio_bar = IS_GEN2(dev) ? 1 : 0; | |
875 | /* | |
876 | * Before gen4, the registers and the GTT are behind different BARs. | |
877 | * However, from gen4 onwards, the registers and the GTT are shared | |
878 | * in the same BAR, so we want to restrict this ioremap from | |
879 | * clobbering the GTT which we want ioremap_wc instead. Fortunately, | |
880 | * the register BAR remains the same size for all the earlier | |
881 | * generations up to Ironlake. | |
882 | */ | |
883 | if (INTEL_INFO(dev)->gen < 5) | |
884 | mmio_size = 512 * 1024; | |
885 | else | |
886 | mmio_size = 2 * 1024 * 1024; | |
52a05c30 | 887 | dev_priv->regs = pci_iomap(pdev, mmio_bar, mmio_size); |
0673ad47 CW |
888 | if (dev_priv->regs == NULL) { |
889 | DRM_ERROR("failed to map registers\n"); | |
890 | ||
891 | return -EIO; | |
892 | } | |
893 | ||
894 | /* Try to make sure MCHBAR is enabled before poking at it */ | |
895 | intel_setup_mchbar(dev); | |
896 | ||
897 | return 0; | |
898 | } | |
899 | ||
900 | static void i915_mmio_cleanup(struct drm_device *dev) | |
901 | { | |
902 | struct drm_i915_private *dev_priv = to_i915(dev); | |
52a05c30 | 903 | struct pci_dev *pdev = dev_priv->drm.pdev; |
0673ad47 CW |
904 | |
905 | intel_teardown_mchbar(dev); | |
52a05c30 | 906 | pci_iounmap(pdev, dev_priv->regs); |
0673ad47 CW |
907 | } |
908 | ||
909 | /** | |
910 | * i915_driver_init_mmio - setup device MMIO | |
911 | * @dev_priv: device private | |
912 | * | |
913 | * Setup minimal device state necessary for MMIO accesses later in the | |
914 | * initialization sequence. The setup here should avoid any other device-wide | |
915 | * side effects or exposing the driver via kernel internal or user space | |
916 | * interfaces. | |
917 | */ | |
918 | static int i915_driver_init_mmio(struct drm_i915_private *dev_priv) | |
919 | { | |
91c8a326 | 920 | struct drm_device *dev = &dev_priv->drm; |
0673ad47 CW |
921 | int ret; |
922 | ||
923 | if (i915_inject_load_failure()) | |
924 | return -ENODEV; | |
925 | ||
926 | if (i915_get_bridge_dev(dev)) | |
927 | return -EIO; | |
928 | ||
929 | ret = i915_mmio_setup(dev); | |
930 | if (ret < 0) | |
931 | goto put_bridge; | |
932 | ||
933 | intel_uncore_init(dev_priv); | |
934 | ||
935 | return 0; | |
936 | ||
937 | put_bridge: | |
938 | pci_dev_put(dev_priv->bridge_dev); | |
939 | ||
940 | return ret; | |
941 | } | |
942 | ||
943 | /** | |
944 | * i915_driver_cleanup_mmio - cleanup the setup done in i915_driver_init_mmio() | |
945 | * @dev_priv: device private | |
946 | */ | |
947 | static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv) | |
948 | { | |
91c8a326 | 949 | struct drm_device *dev = &dev_priv->drm; |
0673ad47 CW |
950 | |
951 | intel_uncore_fini(dev_priv); | |
952 | i915_mmio_cleanup(dev); | |
953 | pci_dev_put(dev_priv->bridge_dev); | |
954 | } | |
955 | ||
94b4f3ba CW |
956 | static void intel_sanitize_options(struct drm_i915_private *dev_priv) |
957 | { | |
958 | i915.enable_execlists = | |
959 | intel_sanitize_enable_execlists(dev_priv, | |
960 | i915.enable_execlists); | |
961 | ||
962 | /* | |
963 | * i915.enable_ppgtt is read-only, so do an early pass to validate the | |
964 | * user's requested state against the hardware/driver capabilities. We | |
965 | * do this now so that we can print out any log messages once rather | |
966 | * than every time we check intel_enable_ppgtt(). | |
967 | */ | |
968 | i915.enable_ppgtt = | |
969 | intel_sanitize_enable_ppgtt(dev_priv, i915.enable_ppgtt); | |
970 | DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt); | |
39df9190 CW |
971 | |
972 | i915.semaphores = intel_sanitize_semaphores(dev_priv, i915.semaphores); | |
973 | DRM_DEBUG_DRIVER("use GPU sempahores? %s\n", yesno(i915.semaphores)); | |
94b4f3ba CW |
974 | } |
975 | ||
0673ad47 CW |
976 | /** |
977 | * i915_driver_init_hw - setup state requiring device access | |
978 | * @dev_priv: device private | |
979 | * | |
980 | * Setup state that requires accessing the device, but doesn't require | |
981 | * exposing the driver via kernel internal or userspace interfaces. | |
982 | */ | |
983 | static int i915_driver_init_hw(struct drm_i915_private *dev_priv) | |
984 | { | |
52a05c30 | 985 | struct pci_dev *pdev = dev_priv->drm.pdev; |
91c8a326 | 986 | struct drm_device *dev = &dev_priv->drm; |
0673ad47 CW |
987 | int ret; |
988 | ||
989 | if (i915_inject_load_failure()) | |
990 | return -ENODEV; | |
991 | ||
94b4f3ba CW |
992 | intel_device_info_runtime_init(dev_priv); |
993 | ||
994 | intel_sanitize_options(dev_priv); | |
0673ad47 | 995 | |
97d6d7ab | 996 | ret = i915_ggtt_probe_hw(dev_priv); |
0673ad47 CW |
997 | if (ret) |
998 | return ret; | |
999 | ||
0673ad47 CW |
1000 | /* WARNING: Apparently we must kick fbdev drivers before vgacon, |
1001 | * otherwise the vga fbdev driver falls over. */ | |
1002 | ret = i915_kick_out_firmware_fb(dev_priv); | |
1003 | if (ret) { | |
1004 | DRM_ERROR("failed to remove conflicting framebuffer drivers\n"); | |
1005 | goto out_ggtt; | |
1006 | } | |
1007 | ||
1008 | ret = i915_kick_out_vgacon(dev_priv); | |
1009 | if (ret) { | |
1010 | DRM_ERROR("failed to remove conflicting VGA console\n"); | |
1011 | goto out_ggtt; | |
1012 | } | |
1013 | ||
97d6d7ab | 1014 | ret = i915_ggtt_init_hw(dev_priv); |
0088e522 CW |
1015 | if (ret) |
1016 | return ret; | |
1017 | ||
97d6d7ab | 1018 | ret = i915_ggtt_enable_hw(dev_priv); |
0088e522 CW |
1019 | if (ret) { |
1020 | DRM_ERROR("failed to enable GGTT\n"); | |
1021 | goto out_ggtt; | |
1022 | } | |
1023 | ||
52a05c30 | 1024 | pci_set_master(pdev); |
0673ad47 CW |
1025 | |
1026 | /* overlay on gen2 is broken and can't address above 1G */ | |
1027 | if (IS_GEN2(dev)) { | |
52a05c30 | 1028 | ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30)); |
0673ad47 CW |
1029 | if (ret) { |
1030 | DRM_ERROR("failed to set DMA mask\n"); | |
1031 | ||
1032 | goto out_ggtt; | |
1033 | } | |
1034 | } | |
1035 | ||
0673ad47 CW |
1036 | /* 965GM sometimes incorrectly writes to hardware status page (HWS) |
1037 | * using 32bit addressing, overwriting memory if HWS is located | |
1038 | * above 4GB. | |
1039 | * | |
1040 | * The documentation also mentions an issue with undefined | |
1041 | * behaviour if any general state is accessed within a page above 4GB, | |
1042 | * which also needs to be handled carefully. | |
1043 | */ | |
1044 | if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) { | |
52a05c30 | 1045 | ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); |
0673ad47 CW |
1046 | |
1047 | if (ret) { | |
1048 | DRM_ERROR("failed to set DMA mask\n"); | |
1049 | ||
1050 | goto out_ggtt; | |
1051 | } | |
1052 | } | |
1053 | ||
0673ad47 CW |
1054 | pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, |
1055 | PM_QOS_DEFAULT_VALUE); | |
1056 | ||
1057 | intel_uncore_sanitize(dev_priv); | |
1058 | ||
1059 | intel_opregion_setup(dev_priv); | |
1060 | ||
1061 | i915_gem_load_init_fences(dev_priv); | |
1062 | ||
1063 | /* On the 945G/GM, the chipset reports the MSI capability on the | |
1064 | * integrated graphics even though the support isn't actually there | |
1065 | * according to the published specs. It doesn't appear to function | |
1066 | * correctly in testing on 945G. | |
1067 | * This may be a side effect of MSI having been made available for PEG | |
1068 | * and the registers being closely associated. | |
1069 | * | |
1070 | * According to chipset errata, on the 965GM, MSI interrupts may | |
1071 | * be lost or delayed, but we use them anyways to avoid | |
1072 | * stuck interrupts on some machines. | |
1073 | */ | |
1074 | if (!IS_I945G(dev) && !IS_I945GM(dev)) { | |
52a05c30 | 1075 | if (pci_enable_msi(pdev) < 0) |
0673ad47 CW |
1076 | DRM_DEBUG_DRIVER("can't enable MSI"); |
1077 | } | |
1078 | ||
1079 | return 0; | |
1080 | ||
1081 | out_ggtt: | |
97d6d7ab | 1082 | i915_ggtt_cleanup_hw(dev_priv); |
0673ad47 CW |
1083 | |
1084 | return ret; | |
1085 | } | |
1086 | ||
1087 | /** | |
1088 | * i915_driver_cleanup_hw - cleanup the setup done in i915_driver_init_hw() | |
1089 | * @dev_priv: device private | |
1090 | */ | |
1091 | static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv) | |
1092 | { | |
52a05c30 | 1093 | struct pci_dev *pdev = dev_priv->drm.pdev; |
0673ad47 | 1094 | |
52a05c30 DW |
1095 | if (pdev->msi_enabled) |
1096 | pci_disable_msi(pdev); | |
0673ad47 CW |
1097 | |
1098 | pm_qos_remove_request(&dev_priv->pm_qos); | |
97d6d7ab | 1099 | i915_ggtt_cleanup_hw(dev_priv); |
0673ad47 CW |
1100 | } |
1101 | ||
1102 | /** | |
1103 | * i915_driver_register - register the driver with the rest of the system | |
1104 | * @dev_priv: device private | |
1105 | * | |
1106 | * Perform any steps necessary to make the driver available via kernel | |
1107 | * internal or userspace interfaces. | |
1108 | */ | |
1109 | static void i915_driver_register(struct drm_i915_private *dev_priv) | |
1110 | { | |
91c8a326 | 1111 | struct drm_device *dev = &dev_priv->drm; |
0673ad47 CW |
1112 | |
1113 | i915_gem_shrinker_init(dev_priv); | |
1114 | ||
1115 | /* | |
1116 | * Notify a valid surface after modesetting, | |
1117 | * when running inside a VM. | |
1118 | */ | |
1119 | if (intel_vgpu_active(dev_priv)) | |
1120 | I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY); | |
1121 | ||
1122 | /* Reveal our presence to userspace */ | |
1123 | if (drm_dev_register(dev, 0) == 0) { | |
1124 | i915_debugfs_register(dev_priv); | |
694c2828 | 1125 | i915_setup_sysfs(dev_priv); |
0673ad47 CW |
1126 | } else |
1127 | DRM_ERROR("Failed to register driver for userspace access!\n"); | |
1128 | ||
1129 | if (INTEL_INFO(dev_priv)->num_pipes) { | |
1130 | /* Must be done after probing outputs */ | |
1131 | intel_opregion_register(dev_priv); | |
1132 | acpi_video_register(); | |
1133 | } | |
1134 | ||
1135 | if (IS_GEN5(dev_priv)) | |
1136 | intel_gpu_ips_init(dev_priv); | |
1137 | ||
1138 | i915_audio_component_init(dev_priv); | |
1139 | ||
1140 | /* | |
1141 | * Some ports require correctly set-up hpd registers for detection to | |
1142 | * work properly (leading to ghost connected connector status), e.g. VGA | |
1143 | * on gm45. Hence we can only set up the initial fbdev config after hpd | |
1144 | * irqs are fully enabled. We do it last so that the async config | |
1145 | * cannot run before the connectors are registered. | |
1146 | */ | |
1147 | intel_fbdev_initial_config_async(dev); | |
1148 | } | |
1149 | ||
1150 | /** | |
1151 | * i915_driver_unregister - cleanup the registration done in i915_driver_regiser() | |
1152 | * @dev_priv: device private | |
1153 | */ | |
1154 | static void i915_driver_unregister(struct drm_i915_private *dev_priv) | |
1155 | { | |
1156 | i915_audio_component_cleanup(dev_priv); | |
1157 | ||
1158 | intel_gpu_ips_teardown(); | |
1159 | acpi_video_unregister(); | |
1160 | intel_opregion_unregister(dev_priv); | |
1161 | ||
694c2828 | 1162 | i915_teardown_sysfs(dev_priv); |
0673ad47 | 1163 | i915_debugfs_unregister(dev_priv); |
91c8a326 | 1164 | drm_dev_unregister(&dev_priv->drm); |
0673ad47 CW |
1165 | |
1166 | i915_gem_shrinker_cleanup(dev_priv); | |
1167 | } | |
1168 | ||
1169 | /** | |
1170 | * i915_driver_load - setup chip and create an initial config | |
1171 | * @dev: DRM device | |
1172 | * @flags: startup flags | |
1173 | * | |
1174 | * The driver load routine has to do several things: | |
1175 | * - drive output discovery via intel_modeset_init() | |
1176 | * - initialize the memory manager | |
1177 | * - allocate initial config memory | |
1178 | * - setup the DRM framebuffer with the allocated memory | |
1179 | */ | |
42f5551d | 1180 | int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) |
0673ad47 CW |
1181 | { |
1182 | struct drm_i915_private *dev_priv; | |
1183 | int ret; | |
7d87a7f7 | 1184 | |
a09d0ba1 CW |
1185 | if (i915.nuclear_pageflip) |
1186 | driver.driver_features |= DRIVER_ATOMIC; | |
1187 | ||
0673ad47 CW |
1188 | ret = -ENOMEM; |
1189 | dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); | |
1190 | if (dev_priv) | |
1191 | ret = drm_dev_init(&dev_priv->drm, &driver, &pdev->dev); | |
1192 | if (ret) { | |
1193 | dev_printk(KERN_ERR, &pdev->dev, | |
1194 | "[" DRM_NAME ":%s] allocation failed\n", __func__); | |
1195 | kfree(dev_priv); | |
1196 | return ret; | |
1197 | } | |
72bbf0af | 1198 | |
0673ad47 CW |
1199 | dev_priv->drm.pdev = pdev; |
1200 | dev_priv->drm.dev_private = dev_priv; | |
719388e1 | 1201 | |
0673ad47 CW |
1202 | ret = pci_enable_device(pdev); |
1203 | if (ret) | |
1204 | goto out_free_priv; | |
1347f5b4 | 1205 | |
0673ad47 | 1206 | pci_set_drvdata(pdev, &dev_priv->drm); |
ef11bdb3 | 1207 | |
0673ad47 CW |
1208 | ret = i915_driver_init_early(dev_priv, ent); |
1209 | if (ret < 0) | |
1210 | goto out_pci_disable; | |
ef11bdb3 | 1211 | |
0673ad47 | 1212 | intel_runtime_pm_get(dev_priv); |
1da177e4 | 1213 | |
0673ad47 CW |
1214 | ret = i915_driver_init_mmio(dev_priv); |
1215 | if (ret < 0) | |
1216 | goto out_runtime_pm_put; | |
79e53945 | 1217 | |
0673ad47 CW |
1218 | ret = i915_driver_init_hw(dev_priv); |
1219 | if (ret < 0) | |
1220 | goto out_cleanup_mmio; | |
30c964a6 RB |
1221 | |
1222 | /* | |
0673ad47 CW |
1223 | * TODO: move the vblank init and parts of modeset init steps into one |
1224 | * of the i915_driver_init_/i915_driver_register functions according | |
1225 | * to the role/effect of the given init step. | |
30c964a6 | 1226 | */ |
0673ad47 | 1227 | if (INTEL_INFO(dev_priv)->num_pipes) { |
91c8a326 | 1228 | ret = drm_vblank_init(&dev_priv->drm, |
0673ad47 CW |
1229 | INTEL_INFO(dev_priv)->num_pipes); |
1230 | if (ret) | |
1231 | goto out_cleanup_hw; | |
30c964a6 RB |
1232 | } |
1233 | ||
91c8a326 | 1234 | ret = i915_load_modeset_init(&dev_priv->drm); |
0673ad47 CW |
1235 | if (ret < 0) |
1236 | goto out_cleanup_vblank; | |
1237 | ||
1238 | i915_driver_register(dev_priv); | |
1239 | ||
1240 | intel_runtime_pm_enable(dev_priv); | |
1241 | ||
bc5ca47c CW |
1242 | /* Everything is in place, we can now relax! */ |
1243 | DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n", | |
1244 | driver.name, driver.major, driver.minor, driver.patchlevel, | |
1245 | driver.date, pci_name(pdev), dev_priv->drm.primary->index); | |
1246 | ||
0673ad47 CW |
1247 | intel_runtime_pm_put(dev_priv); |
1248 | ||
1249 | return 0; | |
1250 | ||
1251 | out_cleanup_vblank: | |
91c8a326 | 1252 | drm_vblank_cleanup(&dev_priv->drm); |
0673ad47 CW |
1253 | out_cleanup_hw: |
1254 | i915_driver_cleanup_hw(dev_priv); | |
1255 | out_cleanup_mmio: | |
1256 | i915_driver_cleanup_mmio(dev_priv); | |
1257 | out_runtime_pm_put: | |
1258 | intel_runtime_pm_put(dev_priv); | |
1259 | i915_driver_cleanup_early(dev_priv); | |
1260 | out_pci_disable: | |
1261 | pci_disable_device(pdev); | |
1262 | out_free_priv: | |
1263 | i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret); | |
1264 | drm_dev_unref(&dev_priv->drm); | |
30c964a6 RB |
1265 | return ret; |
1266 | } | |
1267 | ||
42f5551d | 1268 | void i915_driver_unload(struct drm_device *dev) |
3bad0781 | 1269 | { |
fac5e23e | 1270 | struct drm_i915_private *dev_priv = to_i915(dev); |
52a05c30 | 1271 | struct pci_dev *pdev = dev_priv->drm.pdev; |
3bad0781 | 1272 | |
0673ad47 CW |
1273 | intel_fbdev_fini(dev); |
1274 | ||
42f5551d CW |
1275 | if (i915_gem_suspend(dev)) |
1276 | DRM_ERROR("failed to idle hardware; continuing to unload!\n"); | |
ce1bb329 | 1277 | |
0673ad47 CW |
1278 | intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); |
1279 | ||
1280 | i915_driver_unregister(dev_priv); | |
1281 | ||
1282 | drm_vblank_cleanup(dev); | |
1283 | ||
1284 | intel_modeset_cleanup(dev); | |
1285 | ||
3bad0781 | 1286 | /* |
0673ad47 CW |
1287 | * free the memory space allocated for the child device |
1288 | * config parsed from VBT | |
3bad0781 | 1289 | */ |
0673ad47 CW |
1290 | if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) { |
1291 | kfree(dev_priv->vbt.child_dev); | |
1292 | dev_priv->vbt.child_dev = NULL; | |
1293 | dev_priv->vbt.child_dev_num = 0; | |
1294 | } | |
1295 | kfree(dev_priv->vbt.sdvo_lvds_vbt_mode); | |
1296 | dev_priv->vbt.sdvo_lvds_vbt_mode = NULL; | |
1297 | kfree(dev_priv->vbt.lfp_lvds_vbt_mode); | |
1298 | dev_priv->vbt.lfp_lvds_vbt_mode = NULL; | |
3bad0781 | 1299 | |
52a05c30 DW |
1300 | vga_switcheroo_unregister_client(pdev); |
1301 | vga_client_register(pdev, NULL, NULL, NULL); | |
bcdb72ac | 1302 | |
0673ad47 | 1303 | intel_csr_ucode_fini(dev_priv); |
bcdb72ac | 1304 | |
0673ad47 CW |
1305 | /* Free error state after interrupts are fully disabled. */ |
1306 | cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); | |
1307 | i915_destroy_error_state(dev); | |
1308 | ||
1309 | /* Flush any outstanding unpin_work. */ | |
b7137e0c | 1310 | drain_workqueue(dev_priv->wq); |
0673ad47 CW |
1311 | |
1312 | intel_guc_fini(dev); | |
1313 | i915_gem_fini(dev); | |
1314 | intel_fbc_cleanup_cfb(dev_priv); | |
1315 | ||
1316 | intel_power_domains_fini(dev_priv); | |
1317 | ||
1318 | i915_driver_cleanup_hw(dev_priv); | |
1319 | i915_driver_cleanup_mmio(dev_priv); | |
1320 | ||
1321 | intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); | |
1322 | ||
1323 | i915_driver_cleanup_early(dev_priv); | |
3bad0781 ZW |
1324 | } |
1325 | ||
0673ad47 | 1326 | static int i915_driver_open(struct drm_device *dev, struct drm_file *file) |
2911a35b | 1327 | { |
0673ad47 | 1328 | int ret; |
2911a35b | 1329 | |
0673ad47 CW |
1330 | ret = i915_gem_open(dev, file); |
1331 | if (ret) | |
1332 | return ret; | |
2911a35b | 1333 | |
0673ad47 CW |
1334 | return 0; |
1335 | } | |
71386ef9 | 1336 | |
0673ad47 CW |
1337 | /** |
1338 | * i915_driver_lastclose - clean up after all DRM clients have exited | |
1339 | * @dev: DRM device | |
1340 | * | |
1341 | * Take care of cleaning up after all DRM clients have exited. In the | |
1342 | * mode setting case, we want to restore the kernel's initial mode (just | |
1343 | * in case the last client left us in a bad state). | |
1344 | * | |
1345 | * Additionally, in the non-mode setting case, we'll tear down the GTT | |
1346 | * and DMA structures, since the kernel won't be using them, and clea | |
1347 | * up any GEM state. | |
1348 | */ | |
1349 | static void i915_driver_lastclose(struct drm_device *dev) | |
1350 | { | |
1351 | intel_fbdev_restore_mode(dev); | |
1352 | vga_switcheroo_process_delayed_switch(); | |
1353 | } | |
2911a35b | 1354 | |
0673ad47 CW |
1355 | static void i915_driver_preclose(struct drm_device *dev, struct drm_file *file) |
1356 | { | |
1357 | mutex_lock(&dev->struct_mutex); | |
1358 | i915_gem_context_close(dev, file); | |
1359 | i915_gem_release(dev, file); | |
1360 | mutex_unlock(&dev->struct_mutex); | |
1361 | } | |
1362 | ||
1363 | static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) | |
1364 | { | |
1365 | struct drm_i915_file_private *file_priv = file->driver_priv; | |
1366 | ||
1367 | kfree(file_priv); | |
2911a35b BW |
1368 | } |
1369 | ||
07f9cd0b ID |
1370 | static void intel_suspend_encoders(struct drm_i915_private *dev_priv) |
1371 | { | |
91c8a326 | 1372 | struct drm_device *dev = &dev_priv->drm; |
19c8054c | 1373 | struct intel_encoder *encoder; |
07f9cd0b ID |
1374 | |
1375 | drm_modeset_lock_all(dev); | |
19c8054c JN |
1376 | for_each_intel_encoder(dev, encoder) |
1377 | if (encoder->suspend) | |
1378 | encoder->suspend(encoder); | |
07f9cd0b ID |
1379 | drm_modeset_unlock_all(dev); |
1380 | } | |
1381 | ||
1a5df187 PZ |
1382 | static int vlv_resume_prepare(struct drm_i915_private *dev_priv, |
1383 | bool rpm_resume); | |
507e126e | 1384 | static int vlv_suspend_complete(struct drm_i915_private *dev_priv); |
f75a1985 | 1385 | |
bc87229f ID |
1386 | static bool suspend_to_idle(struct drm_i915_private *dev_priv) |
1387 | { | |
1388 | #if IS_ENABLED(CONFIG_ACPI_SLEEP) | |
1389 | if (acpi_target_system_state() < ACPI_STATE_S3) | |
1390 | return true; | |
1391 | #endif | |
1392 | return false; | |
1393 | } | |
ebc32824 | 1394 | |
5e365c39 | 1395 | static int i915_drm_suspend(struct drm_device *dev) |
ba8bbcf6 | 1396 | { |
fac5e23e | 1397 | struct drm_i915_private *dev_priv = to_i915(dev); |
52a05c30 | 1398 | struct pci_dev *pdev = dev_priv->drm.pdev; |
e5747e3a | 1399 | pci_power_t opregion_target_state; |
d5818938 | 1400 | int error; |
61caf87c | 1401 | |
b8efb17b ZR |
1402 | /* ignore lid events during suspend */ |
1403 | mutex_lock(&dev_priv->modeset_restore_lock); | |
1404 | dev_priv->modeset_restore = MODESET_SUSPENDED; | |
1405 | mutex_unlock(&dev_priv->modeset_restore_lock); | |
1406 | ||
1f814dac ID |
1407 | disable_rpm_wakeref_asserts(dev_priv); |
1408 | ||
c67a470b PZ |
1409 | /* We do a lot of poking in a lot of registers, make sure they work |
1410 | * properly. */ | |
da7e29bd | 1411 | intel_display_set_init_power(dev_priv, true); |
cb10799c | 1412 | |
5bcf719b DA |
1413 | drm_kms_helper_poll_disable(dev); |
1414 | ||
52a05c30 | 1415 | pci_save_state(pdev); |
ba8bbcf6 | 1416 | |
d5818938 DV |
1417 | error = i915_gem_suspend(dev); |
1418 | if (error) { | |
52a05c30 | 1419 | dev_err(&pdev->dev, |
d5818938 | 1420 | "GEM idle failed, resume might fail\n"); |
1f814dac | 1421 | goto out; |
d5818938 | 1422 | } |
db1b76ca | 1423 | |
a1c41994 AD |
1424 | intel_guc_suspend(dev); |
1425 | ||
6b72d486 | 1426 | intel_display_suspend(dev); |
2eb5252e | 1427 | |
d5818938 | 1428 | intel_dp_mst_suspend(dev); |
7d708ee4 | 1429 | |
d5818938 DV |
1430 | intel_runtime_pm_disable_interrupts(dev_priv); |
1431 | intel_hpd_cancel_work(dev_priv); | |
09b64267 | 1432 | |
d5818938 | 1433 | intel_suspend_encoders(dev_priv); |
0e32b39c | 1434 | |
d5818938 | 1435 | intel_suspend_hw(dev); |
5669fcac | 1436 | |
828c7908 BW |
1437 | i915_gem_suspend_gtt_mappings(dev); |
1438 | ||
9e06dd39 JB |
1439 | i915_save_state(dev); |
1440 | ||
bc87229f | 1441 | opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold; |
6f9f4b7a | 1442 | intel_opregion_notify_adapter(dev_priv, opregion_target_state); |
e5747e3a | 1443 | |
dc97997a | 1444 | intel_uncore_forcewake_reset(dev_priv, false); |
03d92e47 | 1445 | intel_opregion_unregister(dev_priv); |
8ee1c3db | 1446 | |
82e3b8c1 | 1447 | intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true); |
3fa016a0 | 1448 | |
62d5d69b MK |
1449 | dev_priv->suspend_count++; |
1450 | ||
85e90679 KCA |
1451 | intel_display_set_init_power(dev_priv, false); |
1452 | ||
f74ed08d | 1453 | intel_csr_ucode_suspend(dev_priv); |
f514c2d8 | 1454 | |
1f814dac ID |
1455 | out: |
1456 | enable_rpm_wakeref_asserts(dev_priv); | |
1457 | ||
1458 | return error; | |
84b79f8d RW |
1459 | } |
1460 | ||
c49d13ee | 1461 | static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) |
c3c09c95 | 1462 | { |
c49d13ee | 1463 | struct drm_i915_private *dev_priv = to_i915(dev); |
52a05c30 | 1464 | struct pci_dev *pdev = dev_priv->drm.pdev; |
bc87229f | 1465 | bool fw_csr; |
c3c09c95 ID |
1466 | int ret; |
1467 | ||
1f814dac ID |
1468 | disable_rpm_wakeref_asserts(dev_priv); |
1469 | ||
a7c8125f ID |
1470 | fw_csr = !IS_BROXTON(dev_priv) && |
1471 | suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload; | |
bc87229f ID |
1472 | /* |
1473 | * In case of firmware assisted context save/restore don't manually | |
1474 | * deinit the power domains. This also means the CSR/DMC firmware will | |
1475 | * stay active, it will power down any HW resources as required and | |
1476 | * also enable deeper system power states that would be blocked if the | |
1477 | * firmware was inactive. | |
1478 | */ | |
1479 | if (!fw_csr) | |
1480 | intel_power_domains_suspend(dev_priv); | |
73dfc227 | 1481 | |
507e126e | 1482 | ret = 0; |
b8aea3d1 | 1483 | if (IS_BROXTON(dev_priv)) |
507e126e | 1484 | bxt_enable_dc9(dev_priv); |
b8aea3d1 | 1485 | else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) |
507e126e ID |
1486 | hsw_enable_pc8(dev_priv); |
1487 | else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) | |
1488 | ret = vlv_suspend_complete(dev_priv); | |
c3c09c95 ID |
1489 | |
1490 | if (ret) { | |
1491 | DRM_ERROR("Suspend complete failed: %d\n", ret); | |
bc87229f ID |
1492 | if (!fw_csr) |
1493 | intel_power_domains_init_hw(dev_priv, true); | |
c3c09c95 | 1494 | |
1f814dac | 1495 | goto out; |
c3c09c95 ID |
1496 | } |
1497 | ||
52a05c30 | 1498 | pci_disable_device(pdev); |
ab3be73f | 1499 | /* |
54875571 | 1500 | * During hibernation on some platforms the BIOS may try to access |
ab3be73f ID |
1501 | * the device even though it's already in D3 and hang the machine. So |
1502 | * leave the device in D0 on those platforms and hope the BIOS will | |
54875571 ID |
1503 | * power down the device properly. The issue was seen on multiple old |
1504 | * GENs with different BIOS vendors, so having an explicit blacklist | |
1505 | * is inpractical; apply the workaround on everything pre GEN6. The | |
1506 | * platforms where the issue was seen: | |
1507 | * Lenovo Thinkpad X301, X61s, X60, T60, X41 | |
1508 | * Fujitsu FSC S7110 | |
1509 | * Acer Aspire 1830T | |
ab3be73f | 1510 | */ |
54875571 | 1511 | if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6)) |
52a05c30 | 1512 | pci_set_power_state(pdev, PCI_D3hot); |
c3c09c95 | 1513 | |
bc87229f ID |
1514 | dev_priv->suspended_to_idle = suspend_to_idle(dev_priv); |
1515 | ||
1f814dac ID |
1516 | out: |
1517 | enable_rpm_wakeref_asserts(dev_priv); | |
1518 | ||
1519 | return ret; | |
c3c09c95 ID |
1520 | } |
1521 | ||
1751fcf9 | 1522 | int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state) |
84b79f8d RW |
1523 | { |
1524 | int error; | |
1525 | ||
ded8b07d | 1526 | if (!dev) { |
84b79f8d RW |
1527 | DRM_ERROR("dev: %p\n", dev); |
1528 | DRM_ERROR("DRM not initialized, aborting suspend.\n"); | |
1529 | return -ENODEV; | |
1530 | } | |
1531 | ||
0b14cbd2 ID |
1532 | if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND && |
1533 | state.event != PM_EVENT_FREEZE)) | |
1534 | return -EINVAL; | |
5bcf719b DA |
1535 | |
1536 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) | |
1537 | return 0; | |
6eecba33 | 1538 | |
5e365c39 | 1539 | error = i915_drm_suspend(dev); |
84b79f8d RW |
1540 | if (error) |
1541 | return error; | |
1542 | ||
ab3be73f | 1543 | return i915_drm_suspend_late(dev, false); |
ba8bbcf6 JB |
1544 | } |
1545 | ||
5e365c39 | 1546 | static int i915_drm_resume(struct drm_device *dev) |
76c4b250 | 1547 | { |
fac5e23e | 1548 | struct drm_i915_private *dev_priv = to_i915(dev); |
ac840ae5 | 1549 | int ret; |
9d49c0ef | 1550 | |
1f814dac | 1551 | disable_rpm_wakeref_asserts(dev_priv); |
abc80abd | 1552 | intel_sanitize_gt_powersave(dev_priv); |
1f814dac | 1553 | |
97d6d7ab | 1554 | ret = i915_ggtt_enable_hw(dev_priv); |
ac840ae5 VS |
1555 | if (ret) |
1556 | DRM_ERROR("failed to re-enable GGTT\n"); | |
1557 | ||
f74ed08d ID |
1558 | intel_csr_ucode_resume(dev_priv); |
1559 | ||
5ab57c70 | 1560 | i915_gem_resume(dev); |
9d49c0ef | 1561 | |
61caf87c | 1562 | i915_restore_state(dev); |
8090ba8c | 1563 | intel_pps_unlock_regs_wa(dev_priv); |
6f9f4b7a | 1564 | intel_opregion_setup(dev_priv); |
61caf87c | 1565 | |
d5818938 DV |
1566 | intel_init_pch_refclk(dev); |
1567 | drm_mode_config_reset(dev); | |
1833b134 | 1568 | |
364aece0 PA |
1569 | /* |
1570 | * Interrupts have to be enabled before any batches are run. If not the | |
1571 | * GPU will hang. i915_gem_init_hw() will initiate batches to | |
1572 | * update/restore the context. | |
1573 | * | |
1574 | * Modeset enabling in intel_modeset_init_hw() also needs working | |
1575 | * interrupts. | |
1576 | */ | |
1577 | intel_runtime_pm_enable_interrupts(dev_priv); | |
1578 | ||
d5818938 DV |
1579 | mutex_lock(&dev->struct_mutex); |
1580 | if (i915_gem_init_hw(dev)) { | |
1581 | DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n"); | |
338d0eea | 1582 | atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter); |
d5818938 DV |
1583 | } |
1584 | mutex_unlock(&dev->struct_mutex); | |
226485e9 | 1585 | |
a1c41994 AD |
1586 | intel_guc_resume(dev); |
1587 | ||
d5818938 | 1588 | intel_modeset_init_hw(dev); |
24576d23 | 1589 | |
d5818938 DV |
1590 | spin_lock_irq(&dev_priv->irq_lock); |
1591 | if (dev_priv->display.hpd_irq_setup) | |
91d14251 | 1592 | dev_priv->display.hpd_irq_setup(dev_priv); |
d5818938 | 1593 | spin_unlock_irq(&dev_priv->irq_lock); |
0e32b39c | 1594 | |
d5818938 | 1595 | intel_dp_mst_resume(dev); |
e7d6f7d7 | 1596 | |
a16b7658 L |
1597 | intel_display_resume(dev); |
1598 | ||
d5818938 DV |
1599 | /* |
1600 | * ... but also need to make sure that hotplug processing | |
1601 | * doesn't cause havoc. Like in the driver load code we don't | |
1602 | * bother with the tiny race here where we might loose hotplug | |
1603 | * notifications. | |
1604 | * */ | |
1605 | intel_hpd_init(dev_priv); | |
1606 | /* Config may have changed between suspend and resume */ | |
1607 | drm_helper_hpd_irq_event(dev); | |
1daed3fb | 1608 | |
03d92e47 | 1609 | intel_opregion_register(dev_priv); |
44834a67 | 1610 | |
82e3b8c1 | 1611 | intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false); |
073f34d9 | 1612 | |
b8efb17b ZR |
1613 | mutex_lock(&dev_priv->modeset_restore_lock); |
1614 | dev_priv->modeset_restore = MODESET_DONE; | |
1615 | mutex_unlock(&dev_priv->modeset_restore_lock); | |
8a187455 | 1616 | |
6f9f4b7a | 1617 | intel_opregion_notify_adapter(dev_priv, PCI_D0); |
e5747e3a | 1618 | |
54b4f68f | 1619 | intel_autoenable_gt_powersave(dev_priv); |
ee6f280e ID |
1620 | drm_kms_helper_poll_enable(dev); |
1621 | ||
1f814dac ID |
1622 | enable_rpm_wakeref_asserts(dev_priv); |
1623 | ||
074c6ada | 1624 | return 0; |
84b79f8d RW |
1625 | } |
1626 | ||
5e365c39 | 1627 | static int i915_drm_resume_early(struct drm_device *dev) |
84b79f8d | 1628 | { |
fac5e23e | 1629 | struct drm_i915_private *dev_priv = to_i915(dev); |
52a05c30 | 1630 | struct pci_dev *pdev = dev_priv->drm.pdev; |
44410cd0 | 1631 | int ret; |
36d61e67 | 1632 | |
76c4b250 ID |
1633 | /* |
1634 | * We have a resume ordering issue with the snd-hda driver also | |
1635 | * requiring our device to be power up. Due to the lack of a | |
1636 | * parent/child relationship we currently solve this with an early | |
1637 | * resume hook. | |
1638 | * | |
1639 | * FIXME: This should be solved with a special hdmi sink device or | |
1640 | * similar so that power domains can be employed. | |
1641 | */ | |
44410cd0 ID |
1642 | |
1643 | /* | |
1644 | * Note that we need to set the power state explicitly, since we | |
1645 | * powered off the device during freeze and the PCI core won't power | |
1646 | * it back up for us during thaw. Powering off the device during | |
1647 | * freeze is not a hard requirement though, and during the | |
1648 | * suspend/resume phases the PCI core makes sure we get here with the | |
1649 | * device powered on. So in case we change our freeze logic and keep | |
1650 | * the device powered we can also remove the following set power state | |
1651 | * call. | |
1652 | */ | |
52a05c30 | 1653 | ret = pci_set_power_state(pdev, PCI_D0); |
44410cd0 ID |
1654 | if (ret) { |
1655 | DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret); | |
1656 | goto out; | |
1657 | } | |
1658 | ||
1659 | /* | |
1660 | * Note that pci_enable_device() first enables any parent bridge | |
1661 | * device and only then sets the power state for this device. The | |
1662 | * bridge enabling is a nop though, since bridge devices are resumed | |
1663 | * first. The order of enabling power and enabling the device is | |
1664 | * imposed by the PCI core as described above, so here we preserve the | |
1665 | * same order for the freeze/thaw phases. | |
1666 | * | |
1667 | * TODO: eventually we should remove pci_disable_device() / | |
1668 | * pci_enable_enable_device() from suspend/resume. Due to how they | |
1669 | * depend on the device enable refcount we can't anyway depend on them | |
1670 | * disabling/enabling the device. | |
1671 | */ | |
52a05c30 | 1672 | if (pci_enable_device(pdev)) { |
bc87229f ID |
1673 | ret = -EIO; |
1674 | goto out; | |
1675 | } | |
84b79f8d | 1676 | |
52a05c30 | 1677 | pci_set_master(pdev); |
84b79f8d | 1678 | |
1f814dac ID |
1679 | disable_rpm_wakeref_asserts(dev_priv); |
1680 | ||
666a4537 | 1681 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) |
1a5df187 | 1682 | ret = vlv_resume_prepare(dev_priv, false); |
36d61e67 | 1683 | if (ret) |
ff0b187f DL |
1684 | DRM_ERROR("Resume prepare failed: %d, continuing anyway\n", |
1685 | ret); | |
36d61e67 | 1686 | |
dc97997a | 1687 | intel_uncore_early_sanitize(dev_priv, true); |
efee833a | 1688 | |
dc97997a | 1689 | if (IS_BROXTON(dev_priv)) { |
da2f41d1 ID |
1690 | if (!dev_priv->suspended_to_idle) |
1691 | gen9_sanitize_dc_state(dev_priv); | |
507e126e | 1692 | bxt_disable_dc9(dev_priv); |
da2f41d1 | 1693 | } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { |
a9a6b73a | 1694 | hsw_disable_pc8(dev_priv); |
da2f41d1 | 1695 | } |
efee833a | 1696 | |
dc97997a | 1697 | intel_uncore_sanitize(dev_priv); |
bc87229f | 1698 | |
a7c8125f ID |
1699 | if (IS_BROXTON(dev_priv) || |
1700 | !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload)) | |
bc87229f ID |
1701 | intel_power_domains_init_hw(dev_priv, true); |
1702 | ||
6e35e8ab ID |
1703 | enable_rpm_wakeref_asserts(dev_priv); |
1704 | ||
bc87229f ID |
1705 | out: |
1706 | dev_priv->suspended_to_idle = false; | |
36d61e67 ID |
1707 | |
1708 | return ret; | |
76c4b250 ID |
1709 | } |
1710 | ||
1751fcf9 | 1711 | int i915_resume_switcheroo(struct drm_device *dev) |
76c4b250 | 1712 | { |
50a0072f | 1713 | int ret; |
76c4b250 | 1714 | |
097dd837 ID |
1715 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
1716 | return 0; | |
1717 | ||
5e365c39 | 1718 | ret = i915_drm_resume_early(dev); |
50a0072f ID |
1719 | if (ret) |
1720 | return ret; | |
1721 | ||
5a17514e ID |
1722 | return i915_drm_resume(dev); |
1723 | } | |
1724 | ||
11ed50ec | 1725 | /** |
f3953dcb | 1726 | * i915_reset - reset chip after a hang |
11ed50ec | 1727 | * @dev: drm device to reset |
11ed50ec BG |
1728 | * |
1729 | * Reset the chip. Useful if a hang is detected. Returns zero on successful | |
1730 | * reset or otherwise an error code. | |
1731 | * | |
1732 | * Procedure is fairly simple: | |
1733 | * - reset the chip using the reset reg | |
1734 | * - re-init context state | |
1735 | * - re-init hardware status page | |
1736 | * - re-init ring buffer | |
1737 | * - re-init interrupt state | |
1738 | * - re-init display | |
1739 | */ | |
c033666a | 1740 | int i915_reset(struct drm_i915_private *dev_priv) |
11ed50ec | 1741 | { |
91c8a326 | 1742 | struct drm_device *dev = &dev_priv->drm; |
d98c52cf CW |
1743 | struct i915_gpu_error *error = &dev_priv->gpu_error; |
1744 | unsigned reset_counter; | |
0573ed4a | 1745 | int ret; |
11ed50ec | 1746 | |
d54a02c0 | 1747 | mutex_lock(&dev->struct_mutex); |
11ed50ec | 1748 | |
d98c52cf CW |
1749 | /* Clear any previous failed attempts at recovery. Time to try again. */ |
1750 | atomic_andnot(I915_WEDGED, &error->reset_counter); | |
77f01230 | 1751 | |
d98c52cf CW |
1752 | /* Clear the reset-in-progress flag and increment the reset epoch. */ |
1753 | reset_counter = atomic_inc_return(&error->reset_counter); | |
1754 | if (WARN_ON(__i915_reset_in_progress(reset_counter))) { | |
1755 | ret = -EIO; | |
1756 | goto error; | |
1757 | } | |
1758 | ||
7b4d3a16 CW |
1759 | pr_notice("drm/i915: Resetting chip after gpu hang\n"); |
1760 | ||
d98c52cf | 1761 | i915_gem_reset(dev); |
2e7c8ee7 | 1762 | |
dc97997a | 1763 | ret = intel_gpu_reset(dev_priv, ALL_ENGINES); |
0573ed4a | 1764 | if (ret) { |
804e59a8 CW |
1765 | if (ret != -ENODEV) |
1766 | DRM_ERROR("Failed to reset chip: %i\n", ret); | |
1767 | else | |
1768 | DRM_DEBUG_DRIVER("GPU reset disabled\n"); | |
d98c52cf | 1769 | goto error; |
11ed50ec BG |
1770 | } |
1771 | ||
1362b776 VS |
1772 | intel_overlay_reset(dev_priv); |
1773 | ||
11ed50ec BG |
1774 | /* Ok, now get things going again... */ |
1775 | ||
1776 | /* | |
1777 | * Everything depends on having the GTT running, so we need to start | |
1778 | * there. Fortunately we don't need to do this unless we reset the | |
1779 | * chip at a PCI level. | |
1780 | * | |
1781 | * Next we need to restore the context, but we don't use those | |
1782 | * yet either... | |
1783 | * | |
1784 | * Ring buffer needs to be re-initialized in the KMS case, or if X | |
1785 | * was running at the time of the reset (i.e. we weren't VT | |
1786 | * switched away). | |
1787 | */ | |
33d30a9c | 1788 | ret = i915_gem_init_hw(dev); |
33d30a9c DV |
1789 | if (ret) { |
1790 | DRM_ERROR("Failed hw init on reset %d\n", ret); | |
d98c52cf | 1791 | goto error; |
11ed50ec BG |
1792 | } |
1793 | ||
d98c52cf CW |
1794 | mutex_unlock(&dev->struct_mutex); |
1795 | ||
33d30a9c DV |
1796 | /* |
1797 | * rps/rc6 re-init is necessary to restore state lost after the | |
1798 | * reset and the re-install of gt irqs. Skip for ironlake per | |
1799 | * previous concerns that it doesn't respond well to some forms | |
1800 | * of re-init after reset. | |
1801 | */ | |
abc80abd | 1802 | intel_sanitize_gt_powersave(dev_priv); |
54b4f68f | 1803 | intel_autoenable_gt_powersave(dev_priv); |
33d30a9c | 1804 | |
11ed50ec | 1805 | return 0; |
d98c52cf CW |
1806 | |
1807 | error: | |
1808 | atomic_or(I915_WEDGED, &error->reset_counter); | |
1809 | mutex_unlock(&dev->struct_mutex); | |
1810 | return ret; | |
11ed50ec BG |
1811 | } |
1812 | ||
c49d13ee | 1813 | static int i915_pm_suspend(struct device *kdev) |
112b715e | 1814 | { |
c49d13ee DW |
1815 | struct pci_dev *pdev = to_pci_dev(kdev); |
1816 | struct drm_device *dev = pci_get_drvdata(pdev); | |
112b715e | 1817 | |
c49d13ee DW |
1818 | if (!dev) { |
1819 | dev_err(kdev, "DRM not initialized, aborting suspend.\n"); | |
84b79f8d RW |
1820 | return -ENODEV; |
1821 | } | |
112b715e | 1822 | |
c49d13ee | 1823 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
5bcf719b DA |
1824 | return 0; |
1825 | ||
c49d13ee | 1826 | return i915_drm_suspend(dev); |
76c4b250 ID |
1827 | } |
1828 | ||
c49d13ee | 1829 | static int i915_pm_suspend_late(struct device *kdev) |
76c4b250 | 1830 | { |
c49d13ee | 1831 | struct drm_device *dev = &kdev_to_i915(kdev)->drm; |
76c4b250 ID |
1832 | |
1833 | /* | |
c965d995 | 1834 | * We have a suspend ordering issue with the snd-hda driver also |
76c4b250 ID |
1835 | * requiring our device to be power up. Due to the lack of a |
1836 | * parent/child relationship we currently solve this with an late | |
1837 | * suspend hook. | |
1838 | * | |
1839 | * FIXME: This should be solved with a special hdmi sink device or | |
1840 | * similar so that power domains can be employed. | |
1841 | */ | |
c49d13ee | 1842 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
76c4b250 | 1843 | return 0; |
112b715e | 1844 | |
c49d13ee | 1845 | return i915_drm_suspend_late(dev, false); |
ab3be73f ID |
1846 | } |
1847 | ||
c49d13ee | 1848 | static int i915_pm_poweroff_late(struct device *kdev) |
ab3be73f | 1849 | { |
c49d13ee | 1850 | struct drm_device *dev = &kdev_to_i915(kdev)->drm; |
ab3be73f | 1851 | |
c49d13ee | 1852 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
ab3be73f ID |
1853 | return 0; |
1854 | ||
c49d13ee | 1855 | return i915_drm_suspend_late(dev, true); |
cbda12d7 ZW |
1856 | } |
1857 | ||
c49d13ee | 1858 | static int i915_pm_resume_early(struct device *kdev) |
76c4b250 | 1859 | { |
c49d13ee | 1860 | struct drm_device *dev = &kdev_to_i915(kdev)->drm; |
76c4b250 | 1861 | |
c49d13ee | 1862 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
097dd837 ID |
1863 | return 0; |
1864 | ||
c49d13ee | 1865 | return i915_drm_resume_early(dev); |
76c4b250 ID |
1866 | } |
1867 | ||
c49d13ee | 1868 | static int i915_pm_resume(struct device *kdev) |
cbda12d7 | 1869 | { |
c49d13ee | 1870 | struct drm_device *dev = &kdev_to_i915(kdev)->drm; |
84b79f8d | 1871 | |
c49d13ee | 1872 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
097dd837 ID |
1873 | return 0; |
1874 | ||
c49d13ee | 1875 | return i915_drm_resume(dev); |
cbda12d7 ZW |
1876 | } |
1877 | ||
1f19ac2a | 1878 | /* freeze: before creating the hibernation_image */ |
c49d13ee | 1879 | static int i915_pm_freeze(struct device *kdev) |
1f19ac2a | 1880 | { |
c49d13ee | 1881 | return i915_pm_suspend(kdev); |
1f19ac2a CW |
1882 | } |
1883 | ||
c49d13ee | 1884 | static int i915_pm_freeze_late(struct device *kdev) |
1f19ac2a | 1885 | { |
461fb99c CW |
1886 | int ret; |
1887 | ||
c49d13ee | 1888 | ret = i915_pm_suspend_late(kdev); |
461fb99c CW |
1889 | if (ret) |
1890 | return ret; | |
1891 | ||
c49d13ee | 1892 | ret = i915_gem_freeze_late(kdev_to_i915(kdev)); |
461fb99c CW |
1893 | if (ret) |
1894 | return ret; | |
1895 | ||
1896 | return 0; | |
1f19ac2a CW |
1897 | } |
1898 | ||
1899 | /* thaw: called after creating the hibernation image, but before turning off. */ | |
c49d13ee | 1900 | static int i915_pm_thaw_early(struct device *kdev) |
1f19ac2a | 1901 | { |
c49d13ee | 1902 | return i915_pm_resume_early(kdev); |
1f19ac2a CW |
1903 | } |
1904 | ||
c49d13ee | 1905 | static int i915_pm_thaw(struct device *kdev) |
1f19ac2a | 1906 | { |
c49d13ee | 1907 | return i915_pm_resume(kdev); |
1f19ac2a CW |
1908 | } |
1909 | ||
1910 | /* restore: called after loading the hibernation image. */ | |
c49d13ee | 1911 | static int i915_pm_restore_early(struct device *kdev) |
1f19ac2a | 1912 | { |
c49d13ee | 1913 | return i915_pm_resume_early(kdev); |
1f19ac2a CW |
1914 | } |
1915 | ||
c49d13ee | 1916 | static int i915_pm_restore(struct device *kdev) |
1f19ac2a | 1917 | { |
c49d13ee | 1918 | return i915_pm_resume(kdev); |
1f19ac2a CW |
1919 | } |
1920 | ||
ddeea5b0 ID |
1921 | /* |
1922 | * Save all Gunit registers that may be lost after a D3 and a subsequent | |
1923 | * S0i[R123] transition. The list of registers needing a save/restore is | |
1924 | * defined in the VLV2_S0IXRegs document. This documents marks all Gunit | |
1925 | * registers in the following way: | |
1926 | * - Driver: saved/restored by the driver | |
1927 | * - Punit : saved/restored by the Punit firmware | |
1928 | * - No, w/o marking: no need to save/restore, since the register is R/O or | |
1929 | * used internally by the HW in a way that doesn't depend | |
1930 | * keeping the content across a suspend/resume. | |
1931 | * - Debug : used for debugging | |
1932 | * | |
1933 | * We save/restore all registers marked with 'Driver', with the following | |
1934 | * exceptions: | |
1935 | * - Registers out of use, including also registers marked with 'Debug'. | |
1936 | * These have no effect on the driver's operation, so we don't save/restore | |
1937 | * them to reduce the overhead. | |
1938 | * - Registers that are fully setup by an initialization function called from | |
1939 | * the resume path. For example many clock gating and RPS/RC6 registers. | |
1940 | * - Registers that provide the right functionality with their reset defaults. | |
1941 | * | |
1942 | * TODO: Except for registers that based on the above 3 criteria can be safely | |
1943 | * ignored, we save/restore all others, practically treating the HW context as | |
1944 | * a black-box for the driver. Further investigation is needed to reduce the | |
1945 | * saved/restored registers even further, by following the same 3 criteria. | |
1946 | */ | |
1947 | static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv) | |
1948 | { | |
1949 | struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state; | |
1950 | int i; | |
1951 | ||
1952 | /* GAM 0x4000-0x4770 */ | |
1953 | s->wr_watermark = I915_READ(GEN7_WR_WATERMARK); | |
1954 | s->gfx_prio_ctrl = I915_READ(GEN7_GFX_PRIO_CTRL); | |
1955 | s->arb_mode = I915_READ(ARB_MODE); | |
1956 | s->gfx_pend_tlb0 = I915_READ(GEN7_GFX_PEND_TLB0); | |
1957 | s->gfx_pend_tlb1 = I915_READ(GEN7_GFX_PEND_TLB1); | |
1958 | ||
1959 | for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++) | |
22dfe79f | 1960 | s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS(i)); |
ddeea5b0 ID |
1961 | |
1962 | s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT); | |
b5f1c97f | 1963 | s->gfx_max_req_count = I915_READ(GEN7_GFX_MAX_REQ_COUNT); |
ddeea5b0 ID |
1964 | |
1965 | s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7); | |
1966 | s->ecochk = I915_READ(GAM_ECOCHK); | |
1967 | s->bsd_hwsp = I915_READ(BSD_HWS_PGA_GEN7); | |
1968 | s->blt_hwsp = I915_READ(BLT_HWS_PGA_GEN7); | |
1969 | ||
1970 | s->tlb_rd_addr = I915_READ(GEN7_TLB_RD_ADDR); | |
1971 | ||
1972 | /* MBC 0x9024-0x91D0, 0x8500 */ | |
1973 | s->g3dctl = I915_READ(VLV_G3DCTL); | |
1974 | s->gsckgctl = I915_READ(VLV_GSCKGCTL); | |
1975 | s->mbctl = I915_READ(GEN6_MBCTL); | |
1976 | ||
1977 | /* GCP 0x9400-0x9424, 0x8100-0x810C */ | |
1978 | s->ucgctl1 = I915_READ(GEN6_UCGCTL1); | |
1979 | s->ucgctl3 = I915_READ(GEN6_UCGCTL3); | |
1980 | s->rcgctl1 = I915_READ(GEN6_RCGCTL1); | |
1981 | s->rcgctl2 = I915_READ(GEN6_RCGCTL2); | |
1982 | s->rstctl = I915_READ(GEN6_RSTCTL); | |
1983 | s->misccpctl = I915_READ(GEN7_MISCCPCTL); | |
1984 | ||
1985 | /* GPM 0xA000-0xAA84, 0x8000-0x80FC */ | |
1986 | s->gfxpause = I915_READ(GEN6_GFXPAUSE); | |
1987 | s->rpdeuhwtc = I915_READ(GEN6_RPDEUHWTC); | |
1988 | s->rpdeuc = I915_READ(GEN6_RPDEUC); | |
1989 | s->ecobus = I915_READ(ECOBUS); | |
1990 | s->pwrdwnupctl = I915_READ(VLV_PWRDWNUPCTL); | |
1991 | s->rp_down_timeout = I915_READ(GEN6_RP_DOWN_TIMEOUT); | |
1992 | s->rp_deucsw = I915_READ(GEN6_RPDEUCSW); | |
1993 | s->rcubmabdtmr = I915_READ(GEN6_RCUBMABDTMR); | |
1994 | s->rcedata = I915_READ(VLV_RCEDATA); | |
1995 | s->spare2gh = I915_READ(VLV_SPAREG2H); | |
1996 | ||
1997 | /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */ | |
1998 | s->gt_imr = I915_READ(GTIMR); | |
1999 | s->gt_ier = I915_READ(GTIER); | |
2000 | s->pm_imr = I915_READ(GEN6_PMIMR); | |
2001 | s->pm_ier = I915_READ(GEN6_PMIER); | |
2002 | ||
2003 | for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++) | |
22dfe79f | 2004 | s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH(i)); |
ddeea5b0 ID |
2005 | |
2006 | /* GT SA CZ domain, 0x100000-0x138124 */ | |
2007 | s->tilectl = I915_READ(TILECTL); | |
2008 | s->gt_fifoctl = I915_READ(GTFIFOCTL); | |
2009 | s->gtlc_wake_ctrl = I915_READ(VLV_GTLC_WAKE_CTRL); | |
2010 | s->gtlc_survive = I915_READ(VLV_GTLC_SURVIVABILITY_REG); | |
2011 | s->pmwgicz = I915_READ(VLV_PMWGICZ); | |
2012 | ||
2013 | /* Gunit-Display CZ domain, 0x182028-0x1821CF */ | |
2014 | s->gu_ctl0 = I915_READ(VLV_GU_CTL0); | |
2015 | s->gu_ctl1 = I915_READ(VLV_GU_CTL1); | |
9c25210f | 2016 | s->pcbr = I915_READ(VLV_PCBR); |
ddeea5b0 ID |
2017 | s->clock_gate_dis2 = I915_READ(VLV_GUNIT_CLOCK_GATE2); |
2018 | ||
2019 | /* | |
2020 | * Not saving any of: | |
2021 | * DFT, 0x9800-0x9EC0 | |
2022 | * SARB, 0xB000-0xB1FC | |
2023 | * GAC, 0x5208-0x524C, 0x14000-0x14C000 | |
2024 | * PCI CFG | |
2025 | */ | |
2026 | } | |
2027 | ||
2028 | static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv) | |
2029 | { | |
2030 | struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state; | |
2031 | u32 val; | |
2032 | int i; | |
2033 | ||
2034 | /* GAM 0x4000-0x4770 */ | |
2035 | I915_WRITE(GEN7_WR_WATERMARK, s->wr_watermark); | |
2036 | I915_WRITE(GEN7_GFX_PRIO_CTRL, s->gfx_prio_ctrl); | |
2037 | I915_WRITE(ARB_MODE, s->arb_mode | (0xffff << 16)); | |
2038 | I915_WRITE(GEN7_GFX_PEND_TLB0, s->gfx_pend_tlb0); | |
2039 | I915_WRITE(GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1); | |
2040 | ||
2041 | for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++) | |
22dfe79f | 2042 | I915_WRITE(GEN7_LRA_LIMITS(i), s->lra_limits[i]); |
ddeea5b0 ID |
2043 | |
2044 | I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count); | |
b5f1c97f | 2045 | I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count); |
ddeea5b0 ID |
2046 | |
2047 | I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp); | |
2048 | I915_WRITE(GAM_ECOCHK, s->ecochk); | |
2049 | I915_WRITE(BSD_HWS_PGA_GEN7, s->bsd_hwsp); | |
2050 | I915_WRITE(BLT_HWS_PGA_GEN7, s->blt_hwsp); | |
2051 | ||
2052 | I915_WRITE(GEN7_TLB_RD_ADDR, s->tlb_rd_addr); | |
2053 | ||
2054 | /* MBC 0x9024-0x91D0, 0x8500 */ | |
2055 | I915_WRITE(VLV_G3DCTL, s->g3dctl); | |
2056 | I915_WRITE(VLV_GSCKGCTL, s->gsckgctl); | |
2057 | I915_WRITE(GEN6_MBCTL, s->mbctl); | |
2058 | ||
2059 | /* GCP 0x9400-0x9424, 0x8100-0x810C */ | |
2060 | I915_WRITE(GEN6_UCGCTL1, s->ucgctl1); | |
2061 | I915_WRITE(GEN6_UCGCTL3, s->ucgctl3); | |
2062 | I915_WRITE(GEN6_RCGCTL1, s->rcgctl1); | |
2063 | I915_WRITE(GEN6_RCGCTL2, s->rcgctl2); | |
2064 | I915_WRITE(GEN6_RSTCTL, s->rstctl); | |
2065 | I915_WRITE(GEN7_MISCCPCTL, s->misccpctl); | |
2066 | ||
2067 | /* GPM 0xA000-0xAA84, 0x8000-0x80FC */ | |
2068 | I915_WRITE(GEN6_GFXPAUSE, s->gfxpause); | |
2069 | I915_WRITE(GEN6_RPDEUHWTC, s->rpdeuhwtc); | |
2070 | I915_WRITE(GEN6_RPDEUC, s->rpdeuc); | |
2071 | I915_WRITE(ECOBUS, s->ecobus); | |
2072 | I915_WRITE(VLV_PWRDWNUPCTL, s->pwrdwnupctl); | |
2073 | I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout); | |
2074 | I915_WRITE(GEN6_RPDEUCSW, s->rp_deucsw); | |
2075 | I915_WRITE(GEN6_RCUBMABDTMR, s->rcubmabdtmr); | |
2076 | I915_WRITE(VLV_RCEDATA, s->rcedata); | |
2077 | I915_WRITE(VLV_SPAREG2H, s->spare2gh); | |
2078 | ||
2079 | /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */ | |
2080 | I915_WRITE(GTIMR, s->gt_imr); | |
2081 | I915_WRITE(GTIER, s->gt_ier); | |
2082 | I915_WRITE(GEN6_PMIMR, s->pm_imr); | |
2083 | I915_WRITE(GEN6_PMIER, s->pm_ier); | |
2084 | ||
2085 | for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++) | |
22dfe79f | 2086 | I915_WRITE(GEN7_GT_SCRATCH(i), s->gt_scratch[i]); |
ddeea5b0 ID |
2087 | |
2088 | /* GT SA CZ domain, 0x100000-0x138124 */ | |
2089 | I915_WRITE(TILECTL, s->tilectl); | |
2090 | I915_WRITE(GTFIFOCTL, s->gt_fifoctl); | |
2091 | /* | |
2092 | * Preserve the GT allow wake and GFX force clock bit, they are not | |
2093 | * be restored, as they are used to control the s0ix suspend/resume | |
2094 | * sequence by the caller. | |
2095 | */ | |
2096 | val = I915_READ(VLV_GTLC_WAKE_CTRL); | |
2097 | val &= VLV_GTLC_ALLOWWAKEREQ; | |
2098 | val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ; | |
2099 | I915_WRITE(VLV_GTLC_WAKE_CTRL, val); | |
2100 | ||
2101 | val = I915_READ(VLV_GTLC_SURVIVABILITY_REG); | |
2102 | val &= VLV_GFX_CLK_FORCE_ON_BIT; | |
2103 | val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT; | |
2104 | I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val); | |
2105 | ||
2106 | I915_WRITE(VLV_PMWGICZ, s->pmwgicz); | |
2107 | ||
2108 | /* Gunit-Display CZ domain, 0x182028-0x1821CF */ | |
2109 | I915_WRITE(VLV_GU_CTL0, s->gu_ctl0); | |
2110 | I915_WRITE(VLV_GU_CTL1, s->gu_ctl1); | |
9c25210f | 2111 | I915_WRITE(VLV_PCBR, s->pcbr); |
ddeea5b0 ID |
2112 | I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2); |
2113 | } | |
2114 | ||
650ad970 ID |
2115 | int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on) |
2116 | { | |
2117 | u32 val; | |
2118 | int err; | |
2119 | ||
650ad970 ID |
2120 | val = I915_READ(VLV_GTLC_SURVIVABILITY_REG); |
2121 | val &= ~VLV_GFX_CLK_FORCE_ON_BIT; | |
2122 | if (force_on) | |
2123 | val |= VLV_GFX_CLK_FORCE_ON_BIT; | |
2124 | I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val); | |
2125 | ||
2126 | if (!force_on) | |
2127 | return 0; | |
2128 | ||
c6ddc5f3 CW |
2129 | err = intel_wait_for_register(dev_priv, |
2130 | VLV_GTLC_SURVIVABILITY_REG, | |
2131 | VLV_GFX_CLK_STATUS_BIT, | |
2132 | VLV_GFX_CLK_STATUS_BIT, | |
2133 | 20); | |
650ad970 ID |
2134 | if (err) |
2135 | DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n", | |
2136 | I915_READ(VLV_GTLC_SURVIVABILITY_REG)); | |
2137 | ||
2138 | return err; | |
650ad970 ID |
2139 | } |
2140 | ||
ddeea5b0 ID |
2141 | static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow) |
2142 | { | |
2143 | u32 val; | |
2144 | int err = 0; | |
2145 | ||
2146 | val = I915_READ(VLV_GTLC_WAKE_CTRL); | |
2147 | val &= ~VLV_GTLC_ALLOWWAKEREQ; | |
2148 | if (allow) | |
2149 | val |= VLV_GTLC_ALLOWWAKEREQ; | |
2150 | I915_WRITE(VLV_GTLC_WAKE_CTRL, val); | |
2151 | POSTING_READ(VLV_GTLC_WAKE_CTRL); | |
2152 | ||
b2736695 CW |
2153 | err = intel_wait_for_register(dev_priv, |
2154 | VLV_GTLC_PW_STATUS, | |
2155 | VLV_GTLC_ALLOWWAKEACK, | |
2156 | allow, | |
2157 | 1); | |
ddeea5b0 ID |
2158 | if (err) |
2159 | DRM_ERROR("timeout disabling GT waking\n"); | |
b2736695 | 2160 | |
ddeea5b0 | 2161 | return err; |
ddeea5b0 ID |
2162 | } |
2163 | ||
2164 | static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv, | |
2165 | bool wait_for_on) | |
2166 | { | |
2167 | u32 mask; | |
2168 | u32 val; | |
2169 | int err; | |
2170 | ||
2171 | mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK; | |
2172 | val = wait_for_on ? mask : 0; | |
41ce405e | 2173 | if ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val) |
ddeea5b0 ID |
2174 | return 0; |
2175 | ||
2176 | DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n", | |
87ad3212 JN |
2177 | onoff(wait_for_on), |
2178 | I915_READ(VLV_GTLC_PW_STATUS)); | |
ddeea5b0 ID |
2179 | |
2180 | /* | |
2181 | * RC6 transitioning can be delayed up to 2 msec (see | |
2182 | * valleyview_enable_rps), use 3 msec for safety. | |
2183 | */ | |
41ce405e CW |
2184 | err = intel_wait_for_register(dev_priv, |
2185 | VLV_GTLC_PW_STATUS, mask, val, | |
2186 | 3); | |
ddeea5b0 ID |
2187 | if (err) |
2188 | DRM_ERROR("timeout waiting for GT wells to go %s\n", | |
87ad3212 | 2189 | onoff(wait_for_on)); |
ddeea5b0 ID |
2190 | |
2191 | return err; | |
ddeea5b0 ID |
2192 | } |
2193 | ||
2194 | static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv) | |
2195 | { | |
2196 | if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR)) | |
2197 | return; | |
2198 | ||
6fa283b0 | 2199 | DRM_DEBUG_DRIVER("GT register access while GT waking disabled\n"); |
ddeea5b0 ID |
2200 | I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR); |
2201 | } | |
2202 | ||
ebc32824 | 2203 | static int vlv_suspend_complete(struct drm_i915_private *dev_priv) |
ddeea5b0 ID |
2204 | { |
2205 | u32 mask; | |
2206 | int err; | |
2207 | ||
2208 | /* | |
2209 | * Bspec defines the following GT well on flags as debug only, so | |
2210 | * don't treat them as hard failures. | |
2211 | */ | |
2212 | (void)vlv_wait_for_gt_wells(dev_priv, false); | |
2213 | ||
2214 | mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS; | |
2215 | WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask); | |
2216 | ||
2217 | vlv_check_no_gt_access(dev_priv); | |
2218 | ||
2219 | err = vlv_force_gfx_clock(dev_priv, true); | |
2220 | if (err) | |
2221 | goto err1; | |
2222 | ||
2223 | err = vlv_allow_gt_wake(dev_priv, false); | |
2224 | if (err) | |
2225 | goto err2; | |
98711167 | 2226 | |
2d1fe073 | 2227 | if (!IS_CHERRYVIEW(dev_priv)) |
98711167 | 2228 | vlv_save_gunit_s0ix_state(dev_priv); |
ddeea5b0 ID |
2229 | |
2230 | err = vlv_force_gfx_clock(dev_priv, false); | |
2231 | if (err) | |
2232 | goto err2; | |
2233 | ||
2234 | return 0; | |
2235 | ||
2236 | err2: | |
2237 | /* For safety always re-enable waking and disable gfx clock forcing */ | |
2238 | vlv_allow_gt_wake(dev_priv, true); | |
2239 | err1: | |
2240 | vlv_force_gfx_clock(dev_priv, false); | |
2241 | ||
2242 | return err; | |
2243 | } | |
2244 | ||
016970be SK |
2245 | static int vlv_resume_prepare(struct drm_i915_private *dev_priv, |
2246 | bool rpm_resume) | |
ddeea5b0 | 2247 | { |
91c8a326 | 2248 | struct drm_device *dev = &dev_priv->drm; |
ddeea5b0 ID |
2249 | int err; |
2250 | int ret; | |
2251 | ||
2252 | /* | |
2253 | * If any of the steps fail just try to continue, that's the best we | |
2254 | * can do at this point. Return the first error code (which will also | |
2255 | * leave RPM permanently disabled). | |
2256 | */ | |
2257 | ret = vlv_force_gfx_clock(dev_priv, true); | |
2258 | ||
2d1fe073 | 2259 | if (!IS_CHERRYVIEW(dev_priv)) |
98711167 | 2260 | vlv_restore_gunit_s0ix_state(dev_priv); |
ddeea5b0 ID |
2261 | |
2262 | err = vlv_allow_gt_wake(dev_priv, true); | |
2263 | if (!ret) | |
2264 | ret = err; | |
2265 | ||
2266 | err = vlv_force_gfx_clock(dev_priv, false); | |
2267 | if (!ret) | |
2268 | ret = err; | |
2269 | ||
2270 | vlv_check_no_gt_access(dev_priv); | |
2271 | ||
016970be SK |
2272 | if (rpm_resume) { |
2273 | intel_init_clock_gating(dev); | |
2274 | i915_gem_restore_fences(dev); | |
2275 | } | |
ddeea5b0 ID |
2276 | |
2277 | return ret; | |
2278 | } | |
2279 | ||
c49d13ee | 2280 | static int intel_runtime_suspend(struct device *kdev) |
8a187455 | 2281 | { |
c49d13ee | 2282 | struct pci_dev *pdev = to_pci_dev(kdev); |
8a187455 | 2283 | struct drm_device *dev = pci_get_drvdata(pdev); |
fac5e23e | 2284 | struct drm_i915_private *dev_priv = to_i915(dev); |
0ab9cfeb | 2285 | int ret; |
8a187455 | 2286 | |
dc97997a | 2287 | if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6()))) |
c6df39b5 ID |
2288 | return -ENODEV; |
2289 | ||
604effb7 ID |
2290 | if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev))) |
2291 | return -ENODEV; | |
2292 | ||
8a187455 PZ |
2293 | DRM_DEBUG_KMS("Suspending device\n"); |
2294 | ||
d6102977 ID |
2295 | /* |
2296 | * We could deadlock here in case another thread holding struct_mutex | |
2297 | * calls RPM suspend concurrently, since the RPM suspend will wait | |
2298 | * first for this RPM suspend to finish. In this case the concurrent | |
2299 | * RPM resume will be followed by its RPM suspend counterpart. Still | |
2300 | * for consistency return -EAGAIN, which will reschedule this suspend. | |
2301 | */ | |
2302 | if (!mutex_trylock(&dev->struct_mutex)) { | |
2303 | DRM_DEBUG_KMS("device lock contention, deffering suspend\n"); | |
2304 | /* | |
2305 | * Bump the expiration timestamp, otherwise the suspend won't | |
2306 | * be rescheduled. | |
2307 | */ | |
c49d13ee | 2308 | pm_runtime_mark_last_busy(kdev); |
d6102977 ID |
2309 | |
2310 | return -EAGAIN; | |
2311 | } | |
1f814dac ID |
2312 | |
2313 | disable_rpm_wakeref_asserts(dev_priv); | |
2314 | ||
d6102977 ID |
2315 | /* |
2316 | * We are safe here against re-faults, since the fault handler takes | |
2317 | * an RPM reference. | |
2318 | */ | |
2319 | i915_gem_release_all_mmaps(dev_priv); | |
2320 | mutex_unlock(&dev->struct_mutex); | |
2321 | ||
a1c41994 AD |
2322 | intel_guc_suspend(dev); |
2323 | ||
2eb5252e | 2324 | intel_runtime_pm_disable_interrupts(dev_priv); |
b5478bcd | 2325 | |
507e126e ID |
2326 | ret = 0; |
2327 | if (IS_BROXTON(dev_priv)) { | |
2328 | bxt_display_core_uninit(dev_priv); | |
2329 | bxt_enable_dc9(dev_priv); | |
2330 | } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { | |
2331 | hsw_enable_pc8(dev_priv); | |
2332 | } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { | |
2333 | ret = vlv_suspend_complete(dev_priv); | |
2334 | } | |
2335 | ||
0ab9cfeb ID |
2336 | if (ret) { |
2337 | DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret); | |
b963291c | 2338 | intel_runtime_pm_enable_interrupts(dev_priv); |
0ab9cfeb | 2339 | |
1f814dac ID |
2340 | enable_rpm_wakeref_asserts(dev_priv); |
2341 | ||
0ab9cfeb ID |
2342 | return ret; |
2343 | } | |
a8a8bd54 | 2344 | |
dc97997a | 2345 | intel_uncore_forcewake_reset(dev_priv, false); |
1f814dac ID |
2346 | |
2347 | enable_rpm_wakeref_asserts(dev_priv); | |
2348 | WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count)); | |
55ec45c2 | 2349 | |
bc3b9346 | 2350 | if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv)) |
55ec45c2 MK |
2351 | DRM_ERROR("Unclaimed access detected prior to suspending\n"); |
2352 | ||
8a187455 | 2353 | dev_priv->pm.suspended = true; |
1fb2362b KCA |
2354 | |
2355 | /* | |
c8a0bd42 PZ |
2356 | * FIXME: We really should find a document that references the arguments |
2357 | * used below! | |
1fb2362b | 2358 | */ |
6f9f4b7a | 2359 | if (IS_BROADWELL(dev_priv)) { |
d37ae19a PZ |
2360 | /* |
2361 | * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop | |
2362 | * being detected, and the call we do at intel_runtime_resume() | |
2363 | * won't be able to restore them. Since PCI_D3hot matches the | |
2364 | * actual specification and appears to be working, use it. | |
2365 | */ | |
6f9f4b7a | 2366 | intel_opregion_notify_adapter(dev_priv, PCI_D3hot); |
d37ae19a | 2367 | } else { |
c8a0bd42 PZ |
2368 | /* |
2369 | * current versions of firmware which depend on this opregion | |
2370 | * notification have repurposed the D1 definition to mean | |
2371 | * "runtime suspended" vs. what you would normally expect (D3) | |
2372 | * to distinguish it from notifications that might be sent via | |
2373 | * the suspend path. | |
2374 | */ | |
6f9f4b7a | 2375 | intel_opregion_notify_adapter(dev_priv, PCI_D1); |
c8a0bd42 | 2376 | } |
8a187455 | 2377 | |
59bad947 | 2378 | assert_forcewakes_inactive(dev_priv); |
dc9fb09c | 2379 | |
19625e85 L |
2380 | if (!IS_VALLEYVIEW(dev_priv) || !IS_CHERRYVIEW(dev_priv)) |
2381 | intel_hpd_poll_init(dev_priv); | |
2382 | ||
a8a8bd54 | 2383 | DRM_DEBUG_KMS("Device suspended\n"); |
8a187455 PZ |
2384 | return 0; |
2385 | } | |
2386 | ||
c49d13ee | 2387 | static int intel_runtime_resume(struct device *kdev) |
8a187455 | 2388 | { |
c49d13ee | 2389 | struct pci_dev *pdev = to_pci_dev(kdev); |
8a187455 | 2390 | struct drm_device *dev = pci_get_drvdata(pdev); |
fac5e23e | 2391 | struct drm_i915_private *dev_priv = to_i915(dev); |
1a5df187 | 2392 | int ret = 0; |
8a187455 | 2393 | |
604effb7 ID |
2394 | if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev))) |
2395 | return -ENODEV; | |
8a187455 PZ |
2396 | |
2397 | DRM_DEBUG_KMS("Resuming device\n"); | |
2398 | ||
1f814dac ID |
2399 | WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count)); |
2400 | disable_rpm_wakeref_asserts(dev_priv); | |
2401 | ||
6f9f4b7a | 2402 | intel_opregion_notify_adapter(dev_priv, PCI_D0); |
8a187455 | 2403 | dev_priv->pm.suspended = false; |
55ec45c2 MK |
2404 | if (intel_uncore_unclaimed_mmio(dev_priv)) |
2405 | DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n"); | |
8a187455 | 2406 | |
a1c41994 AD |
2407 | intel_guc_resume(dev); |
2408 | ||
1a5df187 PZ |
2409 | if (IS_GEN6(dev_priv)) |
2410 | intel_init_pch_refclk(dev); | |
31335cec | 2411 | |
507e126e ID |
2412 | if (IS_BROXTON(dev)) { |
2413 | bxt_disable_dc9(dev_priv); | |
2414 | bxt_display_core_init(dev_priv, true); | |
f62c79b3 ID |
2415 | if (dev_priv->csr.dmc_payload && |
2416 | (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)) | |
2417 | gen9_enable_dc5(dev_priv); | |
507e126e | 2418 | } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { |
1a5df187 | 2419 | hsw_disable_pc8(dev_priv); |
507e126e | 2420 | } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { |
1a5df187 | 2421 | ret = vlv_resume_prepare(dev_priv, true); |
507e126e | 2422 | } |
1a5df187 | 2423 | |
0ab9cfeb ID |
2424 | /* |
2425 | * No point of rolling back things in case of an error, as the best | |
2426 | * we can do is to hope that things will still work (and disable RPM). | |
2427 | */ | |
92b806d3 | 2428 | i915_gem_init_swizzling(dev); |
92b806d3 | 2429 | |
b963291c | 2430 | intel_runtime_pm_enable_interrupts(dev_priv); |
08d8a232 VS |
2431 | |
2432 | /* | |
2433 | * On VLV/CHV display interrupts are part of the display | |
2434 | * power well, so hpd is reinitialized from there. For | |
2435 | * everyone else do it here. | |
2436 | */ | |
666a4537 | 2437 | if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) |
08d8a232 VS |
2438 | intel_hpd_init(dev_priv); |
2439 | ||
1f814dac ID |
2440 | enable_rpm_wakeref_asserts(dev_priv); |
2441 | ||
0ab9cfeb ID |
2442 | if (ret) |
2443 | DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret); | |
2444 | else | |
2445 | DRM_DEBUG_KMS("Device resumed\n"); | |
2446 | ||
2447 | return ret; | |
8a187455 PZ |
2448 | } |
2449 | ||
42f5551d | 2450 | const struct dev_pm_ops i915_pm_ops = { |
5545dbbf ID |
2451 | /* |
2452 | * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND, | |
2453 | * PMSG_RESUME] | |
2454 | */ | |
0206e353 | 2455 | .suspend = i915_pm_suspend, |
76c4b250 ID |
2456 | .suspend_late = i915_pm_suspend_late, |
2457 | .resume_early = i915_pm_resume_early, | |
0206e353 | 2458 | .resume = i915_pm_resume, |
5545dbbf ID |
2459 | |
2460 | /* | |
2461 | * S4 event handlers | |
2462 | * @freeze, @freeze_late : called (1) before creating the | |
2463 | * hibernation image [PMSG_FREEZE] and | |
2464 | * (2) after rebooting, before restoring | |
2465 | * the image [PMSG_QUIESCE] | |
2466 | * @thaw, @thaw_early : called (1) after creating the hibernation | |
2467 | * image, before writing it [PMSG_THAW] | |
2468 | * and (2) after failing to create or | |
2469 | * restore the image [PMSG_RECOVER] | |
2470 | * @poweroff, @poweroff_late: called after writing the hibernation | |
2471 | * image, before rebooting [PMSG_HIBERNATE] | |
2472 | * @restore, @restore_early : called after rebooting and restoring the | |
2473 | * hibernation image [PMSG_RESTORE] | |
2474 | */ | |
1f19ac2a CW |
2475 | .freeze = i915_pm_freeze, |
2476 | .freeze_late = i915_pm_freeze_late, | |
2477 | .thaw_early = i915_pm_thaw_early, | |
2478 | .thaw = i915_pm_thaw, | |
36d61e67 | 2479 | .poweroff = i915_pm_suspend, |
ab3be73f | 2480 | .poweroff_late = i915_pm_poweroff_late, |
1f19ac2a CW |
2481 | .restore_early = i915_pm_restore_early, |
2482 | .restore = i915_pm_restore, | |
5545dbbf ID |
2483 | |
2484 | /* S0ix (via runtime suspend) event handlers */ | |
97bea207 PZ |
2485 | .runtime_suspend = intel_runtime_suspend, |
2486 | .runtime_resume = intel_runtime_resume, | |
cbda12d7 ZW |
2487 | }; |
2488 | ||
78b68556 | 2489 | static const struct vm_operations_struct i915_gem_vm_ops = { |
de151cf6 | 2490 | .fault = i915_gem_fault, |
ab00b3e5 JB |
2491 | .open = drm_gem_vm_open, |
2492 | .close = drm_gem_vm_close, | |
de151cf6 JB |
2493 | }; |
2494 | ||
e08e96de AV |
2495 | static const struct file_operations i915_driver_fops = { |
2496 | .owner = THIS_MODULE, | |
2497 | .open = drm_open, | |
2498 | .release = drm_release, | |
2499 | .unlocked_ioctl = drm_ioctl, | |
2500 | .mmap = drm_gem_mmap, | |
2501 | .poll = drm_poll, | |
e08e96de AV |
2502 | .read = drm_read, |
2503 | #ifdef CONFIG_COMPAT | |
2504 | .compat_ioctl = i915_compat_ioctl, | |
2505 | #endif | |
2506 | .llseek = noop_llseek, | |
2507 | }; | |
2508 | ||
0673ad47 CW |
2509 | static int |
2510 | i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data, | |
2511 | struct drm_file *file) | |
2512 | { | |
2513 | return -ENODEV; | |
2514 | } | |
2515 | ||
2516 | static const struct drm_ioctl_desc i915_ioctls[] = { | |
2517 | DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
2518 | DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH), | |
2519 | DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH), | |
2520 | DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH), | |
2521 | DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH), | |
2522 | DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH), | |
2523 | DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW), | |
2524 | DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
2525 | DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH), | |
2526 | DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH), | |
2527 | DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
2528 | DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH), | |
2529 | DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
2530 | DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
2531 | DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH), | |
2532 | DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH), | |
2533 | DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
2534 | DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
2535 | DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH), | |
2536 | DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW), | |
2537 | DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), | |
2538 | DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), | |
2539 | DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), | |
2540 | DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW), | |
2541 | DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW), | |
2542 | DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), | |
2543 | DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
2544 | DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
2545 | DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW), | |
2546 | DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW), | |
2547 | DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW), | |
2548 | DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW), | |
2549 | DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW), | |
2550 | DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW), | |
2551 | DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW), | |
2552 | DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_RENDER_ALLOW), | |
2553 | DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_RENDER_ALLOW), | |
2554 | DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW), | |
2555 | DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0), | |
2556 | DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW), | |
2557 | DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), | |
2558 | DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), | |
2559 | DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW), | |
2560 | DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW), | |
2561 | DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), | |
2562 | DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW), | |
2563 | DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW), | |
2564 | DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW), | |
2565 | DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW), | |
2566 | DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW), | |
2567 | DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW), | |
2568 | DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW), | |
2569 | }; | |
2570 | ||
1da177e4 | 2571 | static struct drm_driver driver = { |
0c54781b MW |
2572 | /* Don't use MTRRs here; the Xserver or userspace app should |
2573 | * deal with them for Intel hardware. | |
792d2b9a | 2574 | */ |
673a394b | 2575 | .driver_features = |
10ba5012 | 2576 | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME | |
1751fcf9 | 2577 | DRIVER_RENDER | DRIVER_MODESET, |
673a394b | 2578 | .open = i915_driver_open, |
22eae947 DA |
2579 | .lastclose = i915_driver_lastclose, |
2580 | .preclose = i915_driver_preclose, | |
673a394b | 2581 | .postclose = i915_driver_postclose, |
915b4d11 | 2582 | .set_busid = drm_pci_set_busid, |
d8e29209 | 2583 | |
b1f788c6 | 2584 | .gem_close_object = i915_gem_close_object, |
673a394b | 2585 | .gem_free_object = i915_gem_free_object, |
de151cf6 | 2586 | .gem_vm_ops = &i915_gem_vm_ops, |
1286ff73 DV |
2587 | |
2588 | .prime_handle_to_fd = drm_gem_prime_handle_to_fd, | |
2589 | .prime_fd_to_handle = drm_gem_prime_fd_to_handle, | |
2590 | .gem_prime_export = i915_gem_prime_export, | |
2591 | .gem_prime_import = i915_gem_prime_import, | |
2592 | ||
ff72145b | 2593 | .dumb_create = i915_gem_dumb_create, |
da6b51d0 | 2594 | .dumb_map_offset = i915_gem_mmap_gtt, |
43387b37 | 2595 | .dumb_destroy = drm_gem_dumb_destroy, |
1da177e4 | 2596 | .ioctls = i915_ioctls, |
0673ad47 | 2597 | .num_ioctls = ARRAY_SIZE(i915_ioctls), |
e08e96de | 2598 | .fops = &i915_driver_fops, |
22eae947 DA |
2599 | .name = DRIVER_NAME, |
2600 | .desc = DRIVER_DESC, | |
2601 | .date = DRIVER_DATE, | |
2602 | .major = DRIVER_MAJOR, | |
2603 | .minor = DRIVER_MINOR, | |
2604 | .patchlevel = DRIVER_PATCHLEVEL, | |
1da177e4 | 2605 | }; |