drm/i915: LPT:LP needs port A HPD enabled in both north and south
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_irq.c
CommitLineData
0d6aa60b 1/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
1da177e4 2 */
0d6aa60b 3/*
1da177e4
LT
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
bc54fd1a
DA
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
0d6aa60b 27 */
1da177e4 28
a70491cc
JP
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
63eeaf38 31#include <linux/sysrq.h>
5a0e3ad6 32#include <linux/slab.h>
b2c88f5b 33#include <linux/circ_buf.h>
760285e7
DH
34#include <drm/drmP.h>
35#include <drm/i915_drm.h>
1da177e4 36#include "i915_drv.h"
1c5d22f7 37#include "i915_trace.h"
79e53945 38#include "intel_drv.h"
1da177e4 39
fca52a55
DV
40/**
41 * DOC: interrupt handling
42 *
43 * These functions provide the basic support for enabling and disabling the
44 * interrupt handling support. There's a lot more functionality in i915_irq.c
45 * and related files, but that will be described in separate chapters.
46 */
47
e4ce95aa
VS
48static const u32 hpd_ilk[HPD_NUM_PINS] = {
49 [HPD_PORT_A] = DE_DP_A_HOTPLUG,
50};
51
23bb4cb5
VS
52static const u32 hpd_ivb[HPD_NUM_PINS] = {
53 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
54};
55
7c7e10db 56static const u32 hpd_ibx[HPD_NUM_PINS] = {
e5868a31
EE
57 [HPD_CRT] = SDE_CRT_HOTPLUG,
58 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
59 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
60 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
61 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
62};
63
7c7e10db 64static const u32 hpd_cpt[HPD_NUM_PINS] = {
e5868a31 65 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
73c352a2 66 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
e5868a31
EE
67 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
68 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
69 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
70};
71
26951caf
XZ
72static const u32 hpd_spt[HPD_NUM_PINS] = {
73 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
74 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
75 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
76 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
77};
78
7c7e10db 79static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
e5868a31
EE
80 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
81 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
82 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
83 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
84 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
85 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
86};
87
7c7e10db 88static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
e5868a31
EE
89 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
90 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
91 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
92 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
93 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
94 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
95};
96
4bca26d0 97static const u32 hpd_status_i915[HPD_NUM_PINS] = {
e5868a31
EE
98 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
99 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
100 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
101 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
102 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
103 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
104};
105
e0a20ad7
SS
106/* BXT hpd list */
107static const u32 hpd_bxt[HPD_NUM_PINS] = {
7f3561be 108 [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
e0a20ad7
SS
109 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
110 [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
111};
112
5c502442 113/* IIR can theoretically queue up two events. Be paranoid. */
f86f3fb0 114#define GEN8_IRQ_RESET_NDX(type, which) do { \
5c502442
PZ
115 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
116 POSTING_READ(GEN8_##type##_IMR(which)); \
117 I915_WRITE(GEN8_##type##_IER(which), 0); \
118 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
119 POSTING_READ(GEN8_##type##_IIR(which)); \
120 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
121 POSTING_READ(GEN8_##type##_IIR(which)); \
122} while (0)
123
f86f3fb0 124#define GEN5_IRQ_RESET(type) do { \
a9d356a6 125 I915_WRITE(type##IMR, 0xffffffff); \
5c502442 126 POSTING_READ(type##IMR); \
a9d356a6 127 I915_WRITE(type##IER, 0); \
5c502442
PZ
128 I915_WRITE(type##IIR, 0xffffffff); \
129 POSTING_READ(type##IIR); \
130 I915_WRITE(type##IIR, 0xffffffff); \
131 POSTING_READ(type##IIR); \
a9d356a6
PZ
132} while (0)
133
337ba017
PZ
134/*
135 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
136 */
137#define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \
138 u32 val = I915_READ(reg); \
139 if (val) { \
140 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \
141 (reg), val); \
142 I915_WRITE((reg), 0xffffffff); \
143 POSTING_READ(reg); \
144 I915_WRITE((reg), 0xffffffff); \
145 POSTING_READ(reg); \
146 } \
147} while (0)
148
35079899 149#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
337ba017 150 GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \
35079899 151 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
7d1bd539
VS
152 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
153 POSTING_READ(GEN8_##type##_IMR(which)); \
35079899
PZ
154} while (0)
155
156#define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
337ba017 157 GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \
35079899 158 I915_WRITE(type##IER, (ier_val)); \
7d1bd539
VS
159 I915_WRITE(type##IMR, (imr_val)); \
160 POSTING_READ(type##IMR); \
35079899
PZ
161} while (0)
162
c9a9a268
ID
163static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
164
d9dc34f1
VS
165/**
166 * ilk_update_display_irq - update DEIMR
167 * @dev_priv: driver private
168 * @interrupt_mask: mask of interrupt bits to update
169 * @enabled_irq_mask: mask of interrupt bits to enable
170 */
171static void ilk_update_display_irq(struct drm_i915_private *dev_priv,
172 uint32_t interrupt_mask,
173 uint32_t enabled_irq_mask)
036a4a7d 174{
d9dc34f1
VS
175 uint32_t new_val;
176
4bc9d430
DV
177 assert_spin_locked(&dev_priv->irq_lock);
178
d9dc34f1
VS
179 WARN_ON(enabled_irq_mask & ~interrupt_mask);
180
9df7575f 181 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
c67a470b 182 return;
c67a470b 183
d9dc34f1
VS
184 new_val = dev_priv->irq_mask;
185 new_val &= ~interrupt_mask;
186 new_val |= (~enabled_irq_mask & interrupt_mask);
187
188 if (new_val != dev_priv->irq_mask) {
189 dev_priv->irq_mask = new_val;
1ec14ad3 190 I915_WRITE(DEIMR, dev_priv->irq_mask);
3143a2bf 191 POSTING_READ(DEIMR);
036a4a7d
ZW
192 }
193}
194
47339cd9 195void
d9dc34f1 196ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
036a4a7d 197{
d9dc34f1
VS
198 ilk_update_display_irq(dev_priv, mask, mask);
199}
c67a470b 200
d9dc34f1
VS
201void
202ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
203{
204 ilk_update_display_irq(dev_priv, mask, 0);
036a4a7d
ZW
205}
206
43eaea13
PZ
207/**
208 * ilk_update_gt_irq - update GTIMR
209 * @dev_priv: driver private
210 * @interrupt_mask: mask of interrupt bits to update
211 * @enabled_irq_mask: mask of interrupt bits to enable
212 */
213static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
214 uint32_t interrupt_mask,
215 uint32_t enabled_irq_mask)
216{
217 assert_spin_locked(&dev_priv->irq_lock);
218
15a17aae
DV
219 WARN_ON(enabled_irq_mask & ~interrupt_mask);
220
9df7575f 221 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
c67a470b 222 return;
c67a470b 223
43eaea13
PZ
224 dev_priv->gt_irq_mask &= ~interrupt_mask;
225 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
226 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
227 POSTING_READ(GTIMR);
228}
229
480c8033 230void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
43eaea13
PZ
231{
232 ilk_update_gt_irq(dev_priv, mask, mask);
233}
234
480c8033 235void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
43eaea13
PZ
236{
237 ilk_update_gt_irq(dev_priv, mask, 0);
238}
239
b900b949
ID
240static u32 gen6_pm_iir(struct drm_i915_private *dev_priv)
241{
242 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
243}
244
a72fbc3a
ID
245static u32 gen6_pm_imr(struct drm_i915_private *dev_priv)
246{
247 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
248}
249
b900b949
ID
250static u32 gen6_pm_ier(struct drm_i915_private *dev_priv)
251{
252 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
253}
254
edbfdb45
PZ
255/**
256 * snb_update_pm_irq - update GEN6_PMIMR
257 * @dev_priv: driver private
258 * @interrupt_mask: mask of interrupt bits to update
259 * @enabled_irq_mask: mask of interrupt bits to enable
260 */
261static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
262 uint32_t interrupt_mask,
263 uint32_t enabled_irq_mask)
264{
605cd25b 265 uint32_t new_val;
edbfdb45 266
15a17aae
DV
267 WARN_ON(enabled_irq_mask & ~interrupt_mask);
268
edbfdb45
PZ
269 assert_spin_locked(&dev_priv->irq_lock);
270
605cd25b 271 new_val = dev_priv->pm_irq_mask;
f52ecbcf
PZ
272 new_val &= ~interrupt_mask;
273 new_val |= (~enabled_irq_mask & interrupt_mask);
274
605cd25b
PZ
275 if (new_val != dev_priv->pm_irq_mask) {
276 dev_priv->pm_irq_mask = new_val;
a72fbc3a
ID
277 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
278 POSTING_READ(gen6_pm_imr(dev_priv));
f52ecbcf 279 }
edbfdb45
PZ
280}
281
480c8033 282void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
edbfdb45 283{
9939fba2
ID
284 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
285 return;
286
edbfdb45
PZ
287 snb_update_pm_irq(dev_priv, mask, mask);
288}
289
9939fba2
ID
290static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
291 uint32_t mask)
edbfdb45
PZ
292{
293 snb_update_pm_irq(dev_priv, mask, 0);
294}
295
9939fba2
ID
296void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
297{
298 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
299 return;
300
301 __gen6_disable_pm_irq(dev_priv, mask);
302}
303
3cc134e3
ID
304void gen6_reset_rps_interrupts(struct drm_device *dev)
305{
306 struct drm_i915_private *dev_priv = dev->dev_private;
307 uint32_t reg = gen6_pm_iir(dev_priv);
308
309 spin_lock_irq(&dev_priv->irq_lock);
310 I915_WRITE(reg, dev_priv->pm_rps_events);
311 I915_WRITE(reg, dev_priv->pm_rps_events);
312 POSTING_READ(reg);
096fad9e 313 dev_priv->rps.pm_iir = 0;
3cc134e3
ID
314 spin_unlock_irq(&dev_priv->irq_lock);
315}
316
b900b949
ID
317void gen6_enable_rps_interrupts(struct drm_device *dev)
318{
319 struct drm_i915_private *dev_priv = dev->dev_private;
320
321 spin_lock_irq(&dev_priv->irq_lock);
78e68d36 322
b900b949 323 WARN_ON(dev_priv->rps.pm_iir);
3cc134e3 324 WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
d4d70aa5 325 dev_priv->rps.interrupts_enabled = true;
78e68d36
ID
326 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
327 dev_priv->pm_rps_events);
b900b949 328 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
78e68d36 329
b900b949
ID
330 spin_unlock_irq(&dev_priv->irq_lock);
331}
332
59d02a1f
ID
333u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
334{
335 /*
f24eeb19 336 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
59d02a1f 337 * if GEN6_PM_UP_EI_EXPIRED is masked.
f24eeb19
ID
338 *
339 * TODO: verify if this can be reproduced on VLV,CHV.
59d02a1f
ID
340 */
341 if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
342 mask &= ~GEN6_PM_RP_UP_EI_EXPIRED;
343
344 if (INTEL_INFO(dev_priv)->gen >= 8)
345 mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
346
347 return mask;
348}
349
b900b949
ID
350void gen6_disable_rps_interrupts(struct drm_device *dev)
351{
352 struct drm_i915_private *dev_priv = dev->dev_private;
353
d4d70aa5
ID
354 spin_lock_irq(&dev_priv->irq_lock);
355 dev_priv->rps.interrupts_enabled = false;
356 spin_unlock_irq(&dev_priv->irq_lock);
357
358 cancel_work_sync(&dev_priv->rps.work);
359
9939fba2
ID
360 spin_lock_irq(&dev_priv->irq_lock);
361
59d02a1f 362 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
9939fba2
ID
363
364 __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
b900b949
ID
365 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
366 ~dev_priv->pm_rps_events);
58072ccb
ID
367
368 spin_unlock_irq(&dev_priv->irq_lock);
369
370 synchronize_irq(dev->irq);
b900b949
ID
371}
372
fee884ed
DV
373/**
374 * ibx_display_interrupt_update - update SDEIMR
375 * @dev_priv: driver private
376 * @interrupt_mask: mask of interrupt bits to update
377 * @enabled_irq_mask: mask of interrupt bits to enable
378 */
47339cd9
DV
379void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
380 uint32_t interrupt_mask,
381 uint32_t enabled_irq_mask)
fee884ed
DV
382{
383 uint32_t sdeimr = I915_READ(SDEIMR);
384 sdeimr &= ~interrupt_mask;
385 sdeimr |= (~enabled_irq_mask & interrupt_mask);
386
15a17aae
DV
387 WARN_ON(enabled_irq_mask & ~interrupt_mask);
388
fee884ed
DV
389 assert_spin_locked(&dev_priv->irq_lock);
390
9df7575f 391 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
c67a470b 392 return;
c67a470b 393
fee884ed
DV
394 I915_WRITE(SDEIMR, sdeimr);
395 POSTING_READ(SDEIMR);
396}
8664281b 397
b5ea642a 398static void
755e9019
ID
399__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
400 u32 enable_mask, u32 status_mask)
7c463586 401{
46c06a30 402 u32 reg = PIPESTAT(pipe);
755e9019 403 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
7c463586 404
b79480ba 405 assert_spin_locked(&dev_priv->irq_lock);
d518ce50 406 WARN_ON(!intel_irqs_enabled(dev_priv));
b79480ba 407
04feced9
VS
408 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
409 status_mask & ~PIPESTAT_INT_STATUS_MASK,
410 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
411 pipe_name(pipe), enable_mask, status_mask))
755e9019
ID
412 return;
413
414 if ((pipestat & enable_mask) == enable_mask)
46c06a30
VS
415 return;
416
91d181dd
ID
417 dev_priv->pipestat_irq_mask[pipe] |= status_mask;
418
46c06a30 419 /* Enable the interrupt, clear any pending status */
755e9019 420 pipestat |= enable_mask | status_mask;
46c06a30
VS
421 I915_WRITE(reg, pipestat);
422 POSTING_READ(reg);
7c463586
KP
423}
424
b5ea642a 425static void
755e9019
ID
426__i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
427 u32 enable_mask, u32 status_mask)
7c463586 428{
46c06a30 429 u32 reg = PIPESTAT(pipe);
755e9019 430 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
7c463586 431
b79480ba 432 assert_spin_locked(&dev_priv->irq_lock);
d518ce50 433 WARN_ON(!intel_irqs_enabled(dev_priv));
b79480ba 434
04feced9
VS
435 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
436 status_mask & ~PIPESTAT_INT_STATUS_MASK,
437 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
438 pipe_name(pipe), enable_mask, status_mask))
46c06a30
VS
439 return;
440
755e9019
ID
441 if ((pipestat & enable_mask) == 0)
442 return;
443
91d181dd
ID
444 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
445
755e9019 446 pipestat &= ~enable_mask;
46c06a30
VS
447 I915_WRITE(reg, pipestat);
448 POSTING_READ(reg);
7c463586
KP
449}
450
10c59c51
ID
451static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
452{
453 u32 enable_mask = status_mask << 16;
454
455 /*
724a6905
VS
456 * On pipe A we don't support the PSR interrupt yet,
457 * on pipe B and C the same bit MBZ.
10c59c51
ID
458 */
459 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
460 return 0;
724a6905
VS
461 /*
462 * On pipe B and C we don't support the PSR interrupt yet, on pipe
463 * A the same bit is for perf counters which we don't use either.
464 */
465 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
466 return 0;
10c59c51
ID
467
468 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
469 SPRITE0_FLIP_DONE_INT_EN_VLV |
470 SPRITE1_FLIP_DONE_INT_EN_VLV);
471 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
472 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
473 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
474 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
475
476 return enable_mask;
477}
478
755e9019
ID
479void
480i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
481 u32 status_mask)
482{
483 u32 enable_mask;
484
10c59c51
ID
485 if (IS_VALLEYVIEW(dev_priv->dev))
486 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
487 status_mask);
488 else
489 enable_mask = status_mask << 16;
755e9019
ID
490 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
491}
492
493void
494i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
495 u32 status_mask)
496{
497 u32 enable_mask;
498
10c59c51
ID
499 if (IS_VALLEYVIEW(dev_priv->dev))
500 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
501 status_mask);
502 else
503 enable_mask = status_mask << 16;
755e9019
ID
504 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
505}
506
01c66889 507/**
f49e38dd 508 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
01c66889 509 */
f49e38dd 510static void i915_enable_asle_pipestat(struct drm_device *dev)
01c66889 511{
2d1013dd 512 struct drm_i915_private *dev_priv = dev->dev_private;
1ec14ad3 513
f49e38dd
JN
514 if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
515 return;
516
13321786 517 spin_lock_irq(&dev_priv->irq_lock);
01c66889 518
755e9019 519 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
f898780b 520 if (INTEL_INFO(dev)->gen >= 4)
3b6c42e8 521 i915_enable_pipestat(dev_priv, PIPE_A,
755e9019 522 PIPE_LEGACY_BLC_EVENT_STATUS);
1ec14ad3 523
13321786 524 spin_unlock_irq(&dev_priv->irq_lock);
01c66889
ZY
525}
526
f75f3746
VS
527/*
528 * This timing diagram depicts the video signal in and
529 * around the vertical blanking period.
530 *
531 * Assumptions about the fictitious mode used in this example:
532 * vblank_start >= 3
533 * vsync_start = vblank_start + 1
534 * vsync_end = vblank_start + 2
535 * vtotal = vblank_start + 3
536 *
537 * start of vblank:
538 * latch double buffered registers
539 * increment frame counter (ctg+)
540 * generate start of vblank interrupt (gen4+)
541 * |
542 * | frame start:
543 * | generate frame start interrupt (aka. vblank interrupt) (gmch)
544 * | may be shifted forward 1-3 extra lines via PIPECONF
545 * | |
546 * | | start of vsync:
547 * | | generate vsync interrupt
548 * | | |
549 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
550 * . \hs/ . \hs/ \hs/ \hs/ . \hs/
551 * ----va---> <-----------------vb--------------------> <--------va-------------
552 * | | <----vs-----> |
553 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
554 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
555 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
556 * | | |
557 * last visible pixel first visible pixel
558 * | increment frame counter (gen3/4)
559 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
560 *
561 * x = horizontal active
562 * _ = horizontal blanking
563 * hs = horizontal sync
564 * va = vertical active
565 * vb = vertical blanking
566 * vs = vertical sync
567 * vbs = vblank_start (number)
568 *
569 * Summary:
570 * - most events happen at the start of horizontal sync
571 * - frame start happens at the start of horizontal blank, 1-4 lines
572 * (depending on PIPECONF settings) after the start of vblank
573 * - gen3/4 pixel and frame counter are synchronized with the start
574 * of horizontal active on the first line of vertical active
575 */
576
4cdb83ec
VS
577static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
578{
579 /* Gen2 doesn't have a hardware frame counter */
580 return 0;
581}
582
42f52ef8
KP
583/* Called from drm generic code, passed a 'crtc', which
584 * we use as a pipe index
585 */
f71d4af4 586static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
0a3e67a4 587{
2d1013dd 588 struct drm_i915_private *dev_priv = dev->dev_private;
0a3e67a4
JB
589 unsigned long high_frame;
590 unsigned long low_frame;
0b2a8e09 591 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
f3a5c3f6
DV
592 struct intel_crtc *intel_crtc =
593 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
fc467a22 594 const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
0a3e67a4 595
f3a5c3f6
DV
596 htotal = mode->crtc_htotal;
597 hsync_start = mode->crtc_hsync_start;
598 vbl_start = mode->crtc_vblank_start;
599 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
600 vbl_start = DIV_ROUND_UP(vbl_start, 2);
391f75e2 601
0b2a8e09
VS
602 /* Convert to pixel count */
603 vbl_start *= htotal;
604
605 /* Start of vblank event occurs at start of hsync */
606 vbl_start -= htotal - hsync_start;
607
9db4a9c7
JB
608 high_frame = PIPEFRAME(pipe);
609 low_frame = PIPEFRAMEPIXEL(pipe);
5eddb70b 610
0a3e67a4
JB
611 /*
612 * High & low register fields aren't synchronized, so make sure
613 * we get a low value that's stable across two reads of the high
614 * register.
615 */
616 do {
5eddb70b 617 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
391f75e2 618 low = I915_READ(low_frame);
5eddb70b 619 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
0a3e67a4
JB
620 } while (high1 != high2);
621
5eddb70b 622 high1 >>= PIPE_FRAME_HIGH_SHIFT;
391f75e2 623 pixel = low & PIPE_PIXEL_MASK;
5eddb70b 624 low >>= PIPE_FRAME_LOW_SHIFT;
391f75e2
VS
625
626 /*
627 * The frame counter increments at beginning of active.
628 * Cook up a vblank counter by also checking the pixel
629 * counter against vblank start.
630 */
edc08d0a 631 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
0a3e67a4
JB
632}
633
f71d4af4 634static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
9880b7a5 635{
2d1013dd 636 struct drm_i915_private *dev_priv = dev->dev_private;
9db4a9c7 637 int reg = PIPE_FRMCOUNT_GM45(pipe);
9880b7a5 638
9880b7a5
JB
639 return I915_READ(reg);
640}
641
ad3543ed
MK
642/* raw reads, only for fast reads of display block, no need for forcewake etc. */
643#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
ad3543ed 644
a225f079
VS
645static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
646{
647 struct drm_device *dev = crtc->base.dev;
648 struct drm_i915_private *dev_priv = dev->dev_private;
fc467a22 649 const struct drm_display_mode *mode = &crtc->base.hwmode;
a225f079 650 enum pipe pipe = crtc->pipe;
80715b2f 651 int position, vtotal;
a225f079 652
80715b2f 653 vtotal = mode->crtc_vtotal;
a225f079
VS
654 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
655 vtotal /= 2;
656
657 if (IS_GEN2(dev))
658 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
659 else
660 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
661
662 /*
80715b2f
VS
663 * See update_scanline_offset() for the details on the
664 * scanline_offset adjustment.
a225f079 665 */
80715b2f 666 return (position + crtc->scanline_offset) % vtotal;
a225f079
VS
667}
668
f71d4af4 669static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
abca9e45
VS
670 unsigned int flags, int *vpos, int *hpos,
671 ktime_t *stime, ktime_t *etime)
0af7e4df 672{
c2baf4b7
VS
673 struct drm_i915_private *dev_priv = dev->dev_private;
674 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
675 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
fc467a22 676 const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
3aa18df8 677 int position;
78e8fc6b 678 int vbl_start, vbl_end, hsync_start, htotal, vtotal;
0af7e4df
MK
679 bool in_vbl = true;
680 int ret = 0;
ad3543ed 681 unsigned long irqflags;
0af7e4df 682
fc467a22 683 if (WARN_ON(!mode->crtc_clock)) {
0af7e4df 684 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
9db4a9c7 685 "pipe %c\n", pipe_name(pipe));
0af7e4df
MK
686 return 0;
687 }
688
c2baf4b7 689 htotal = mode->crtc_htotal;
78e8fc6b 690 hsync_start = mode->crtc_hsync_start;
c2baf4b7
VS
691 vtotal = mode->crtc_vtotal;
692 vbl_start = mode->crtc_vblank_start;
693 vbl_end = mode->crtc_vblank_end;
0af7e4df 694
d31faf65
VS
695 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
696 vbl_start = DIV_ROUND_UP(vbl_start, 2);
697 vbl_end /= 2;
698 vtotal /= 2;
699 }
700
c2baf4b7
VS
701 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
702
ad3543ed
MK
703 /*
704 * Lock uncore.lock, as we will do multiple timing critical raw
705 * register reads, potentially with preemption disabled, so the
706 * following code must not block on uncore.lock.
707 */
708 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
78e8fc6b 709
ad3543ed
MK
710 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
711
712 /* Get optional system timestamp before query. */
713 if (stime)
714 *stime = ktime_get();
715
7c06b08a 716 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
0af7e4df
MK
717 /* No obvious pixelcount register. Only query vertical
718 * scanout position from Display scan line register.
719 */
a225f079 720 position = __intel_get_crtc_scanline(intel_crtc);
0af7e4df
MK
721 } else {
722 /* Have access to pixelcount since start of frame.
723 * We can split this into vertical and horizontal
724 * scanout position.
725 */
ad3543ed 726 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
0af7e4df 727
3aa18df8
VS
728 /* convert to pixel counts */
729 vbl_start *= htotal;
730 vbl_end *= htotal;
731 vtotal *= htotal;
78e8fc6b 732
7e78f1cb
VS
733 /*
734 * In interlaced modes, the pixel counter counts all pixels,
735 * so one field will have htotal more pixels. In order to avoid
736 * the reported position from jumping backwards when the pixel
737 * counter is beyond the length of the shorter field, just
738 * clamp the position the length of the shorter field. This
739 * matches how the scanline counter based position works since
740 * the scanline counter doesn't count the two half lines.
741 */
742 if (position >= vtotal)
743 position = vtotal - 1;
744
78e8fc6b
VS
745 /*
746 * Start of vblank interrupt is triggered at start of hsync,
747 * just prior to the first active line of vblank. However we
748 * consider lines to start at the leading edge of horizontal
749 * active. So, should we get here before we've crossed into
750 * the horizontal active of the first line in vblank, we would
751 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
752 * always add htotal-hsync_start to the current pixel position.
753 */
754 position = (position + htotal - hsync_start) % vtotal;
0af7e4df
MK
755 }
756
ad3543ed
MK
757 /* Get optional system timestamp after query. */
758 if (etime)
759 *etime = ktime_get();
760
761 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
762
763 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
764
3aa18df8
VS
765 in_vbl = position >= vbl_start && position < vbl_end;
766
767 /*
768 * While in vblank, position will be negative
769 * counting up towards 0 at vbl_end. And outside
770 * vblank, position will be positive counting
771 * up since vbl_end.
772 */
773 if (position >= vbl_start)
774 position -= vbl_end;
775 else
776 position += vtotal - vbl_end;
0af7e4df 777
7c06b08a 778 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
3aa18df8
VS
779 *vpos = position;
780 *hpos = 0;
781 } else {
782 *vpos = position / htotal;
783 *hpos = position - (*vpos * htotal);
784 }
0af7e4df 785
0af7e4df
MK
786 /* In vblank? */
787 if (in_vbl)
3d3cbd84 788 ret |= DRM_SCANOUTPOS_IN_VBLANK;
0af7e4df
MK
789
790 return ret;
791}
792
a225f079
VS
793int intel_get_crtc_scanline(struct intel_crtc *crtc)
794{
795 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
796 unsigned long irqflags;
797 int position;
798
799 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
800 position = __intel_get_crtc_scanline(crtc);
801 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
802
803 return position;
804}
805
f71d4af4 806static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
0af7e4df
MK
807 int *max_error,
808 struct timeval *vblank_time,
809 unsigned flags)
810{
4041b853 811 struct drm_crtc *crtc;
0af7e4df 812
7eb552ae 813 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
4041b853 814 DRM_ERROR("Invalid crtc %d\n", pipe);
0af7e4df
MK
815 return -EINVAL;
816 }
817
818 /* Get drm_crtc to timestamp: */
4041b853
CW
819 crtc = intel_get_crtc_for_pipe(dev, pipe);
820 if (crtc == NULL) {
821 DRM_ERROR("Invalid crtc %d\n", pipe);
822 return -EINVAL;
823 }
824
fc467a22 825 if (!crtc->hwmode.crtc_clock) {
4041b853
CW
826 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
827 return -EBUSY;
828 }
0af7e4df
MK
829
830 /* Helper routine in DRM core does all the work: */
4041b853
CW
831 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
832 vblank_time, flags,
7da903ef 833 crtc,
fc467a22 834 &crtc->hwmode);
0af7e4df
MK
835}
836
d0ecd7e2 837static void ironlake_rps_change_irq_handler(struct drm_device *dev)
f97108d1 838{
2d1013dd 839 struct drm_i915_private *dev_priv = dev->dev_private;
b5b72e89 840 u32 busy_up, busy_down, max_avg, min_avg;
9270388e 841 u8 new_delay;
9270388e 842
d0ecd7e2 843 spin_lock(&mchdev_lock);
f97108d1 844
73edd18f
DV
845 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
846
20e4d407 847 new_delay = dev_priv->ips.cur_delay;
9270388e 848
7648fa99 849 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
b5b72e89
MG
850 busy_up = I915_READ(RCPREVBSYTUPAVG);
851 busy_down = I915_READ(RCPREVBSYTDNAVG);
f97108d1
JB
852 max_avg = I915_READ(RCBMAXAVG);
853 min_avg = I915_READ(RCBMINAVG);
854
855 /* Handle RCS change request from hw */
b5b72e89 856 if (busy_up > max_avg) {
20e4d407
DV
857 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
858 new_delay = dev_priv->ips.cur_delay - 1;
859 if (new_delay < dev_priv->ips.max_delay)
860 new_delay = dev_priv->ips.max_delay;
b5b72e89 861 } else if (busy_down < min_avg) {
20e4d407
DV
862 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
863 new_delay = dev_priv->ips.cur_delay + 1;
864 if (new_delay > dev_priv->ips.min_delay)
865 new_delay = dev_priv->ips.min_delay;
f97108d1
JB
866 }
867
7648fa99 868 if (ironlake_set_drps(dev, new_delay))
20e4d407 869 dev_priv->ips.cur_delay = new_delay;
f97108d1 870
d0ecd7e2 871 spin_unlock(&mchdev_lock);
9270388e 872
f97108d1
JB
873 return;
874}
875
74cdb337 876static void notify_ring(struct intel_engine_cs *ring)
549f7365 877{
93b0a4e0 878 if (!intel_ring_initialized(ring))
475553de
CW
879 return;
880
bcfcc8ba 881 trace_i915_gem_request_notify(ring);
9862e600 882
549f7365 883 wake_up_all(&ring->irq_queue);
549f7365
CW
884}
885
43cf3bf0
CW
886static void vlv_c0_read(struct drm_i915_private *dev_priv,
887 struct intel_rps_ei *ei)
31685c25 888{
43cf3bf0
CW
889 ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
890 ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
891 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
892}
31685c25 893
43cf3bf0
CW
894static bool vlv_c0_above(struct drm_i915_private *dev_priv,
895 const struct intel_rps_ei *old,
896 const struct intel_rps_ei *now,
897 int threshold)
898{
899 u64 time, c0;
31685c25 900
43cf3bf0
CW
901 if (old->cz_clock == 0)
902 return false;
31685c25 903
43cf3bf0
CW
904 time = now->cz_clock - old->cz_clock;
905 time *= threshold * dev_priv->mem_freq;
31685c25 906
43cf3bf0
CW
907 /* Workload can be split between render + media, e.g. SwapBuffers
908 * being blitted in X after being rendered in mesa. To account for
909 * this we need to combine both engines into our activity counter.
31685c25 910 */
43cf3bf0
CW
911 c0 = now->render_c0 - old->render_c0;
912 c0 += now->media_c0 - old->media_c0;
913 c0 *= 100 * VLV_CZ_CLOCK_TO_MILLI_SEC * 4 / 1000;
31685c25 914
43cf3bf0 915 return c0 >= time;
31685c25
D
916}
917
43cf3bf0 918void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
31685c25 919{
43cf3bf0
CW
920 vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
921 dev_priv->rps.up_ei = dev_priv->rps.down_ei;
43cf3bf0 922}
31685c25 923
43cf3bf0
CW
924static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
925{
926 struct intel_rps_ei now;
927 u32 events = 0;
31685c25 928
6f4b12f8 929 if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
43cf3bf0 930 return 0;
31685c25 931
43cf3bf0
CW
932 vlv_c0_read(dev_priv, &now);
933 if (now.cz_clock == 0)
934 return 0;
31685c25 935
43cf3bf0
CW
936 if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
937 if (!vlv_c0_above(dev_priv,
938 &dev_priv->rps.down_ei, &now,
8fb55197 939 dev_priv->rps.down_threshold))
43cf3bf0
CW
940 events |= GEN6_PM_RP_DOWN_THRESHOLD;
941 dev_priv->rps.down_ei = now;
942 }
31685c25 943
43cf3bf0
CW
944 if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
945 if (vlv_c0_above(dev_priv,
946 &dev_priv->rps.up_ei, &now,
8fb55197 947 dev_priv->rps.up_threshold))
43cf3bf0
CW
948 events |= GEN6_PM_RP_UP_THRESHOLD;
949 dev_priv->rps.up_ei = now;
31685c25
D
950 }
951
43cf3bf0 952 return events;
31685c25
D
953}
954
f5a4c67d
CW
955static bool any_waiters(struct drm_i915_private *dev_priv)
956{
957 struct intel_engine_cs *ring;
958 int i;
959
960 for_each_ring(ring, dev_priv, i)
961 if (ring->irq_refcount)
962 return true;
963
964 return false;
965}
966
4912d041 967static void gen6_pm_rps_work(struct work_struct *work)
3b8d8d91 968{
2d1013dd
JN
969 struct drm_i915_private *dev_priv =
970 container_of(work, struct drm_i915_private, rps.work);
8d3afd7d
CW
971 bool client_boost;
972 int new_delay, adj, min, max;
edbfdb45 973 u32 pm_iir;
4912d041 974
59cdb63d 975 spin_lock_irq(&dev_priv->irq_lock);
d4d70aa5
ID
976 /* Speed up work cancelation during disabling rps interrupts. */
977 if (!dev_priv->rps.interrupts_enabled) {
978 spin_unlock_irq(&dev_priv->irq_lock);
979 return;
980 }
c6a828d3
DV
981 pm_iir = dev_priv->rps.pm_iir;
982 dev_priv->rps.pm_iir = 0;
a72fbc3a
ID
983 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
984 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
8d3afd7d
CW
985 client_boost = dev_priv->rps.client_boost;
986 dev_priv->rps.client_boost = false;
59cdb63d 987 spin_unlock_irq(&dev_priv->irq_lock);
3b8d8d91 988
60611c13 989 /* Make sure we didn't queue anything we're not going to process. */
a6706b45 990 WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
60611c13 991
8d3afd7d 992 if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
3b8d8d91
JB
993 return;
994
4fc688ce 995 mutex_lock(&dev_priv->rps.hw_lock);
7b9e0ae6 996
43cf3bf0
CW
997 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
998
dd75fdc8 999 adj = dev_priv->rps.last_adj;
edcf284b 1000 new_delay = dev_priv->rps.cur_freq;
8d3afd7d
CW
1001 min = dev_priv->rps.min_freq_softlimit;
1002 max = dev_priv->rps.max_freq_softlimit;
1003
1004 if (client_boost) {
1005 new_delay = dev_priv->rps.max_freq_softlimit;
1006 adj = 0;
1007 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
dd75fdc8
CW
1008 if (adj > 0)
1009 adj *= 2;
edcf284b
CW
1010 else /* CHV needs even encode values */
1011 adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
7425034a
VS
1012 /*
1013 * For better performance, jump directly
1014 * to RPe if we're below it.
1015 */
edcf284b 1016 if (new_delay < dev_priv->rps.efficient_freq - adj) {
b39fb297 1017 new_delay = dev_priv->rps.efficient_freq;
edcf284b
CW
1018 adj = 0;
1019 }
f5a4c67d
CW
1020 } else if (any_waiters(dev_priv)) {
1021 adj = 0;
dd75fdc8 1022 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
b39fb297
BW
1023 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1024 new_delay = dev_priv->rps.efficient_freq;
dd75fdc8 1025 else
b39fb297 1026 new_delay = dev_priv->rps.min_freq_softlimit;
dd75fdc8
CW
1027 adj = 0;
1028 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1029 if (adj < 0)
1030 adj *= 2;
edcf284b
CW
1031 else /* CHV needs even encode values */
1032 adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
dd75fdc8 1033 } else { /* unknown event */
edcf284b 1034 adj = 0;
dd75fdc8 1035 }
3b8d8d91 1036
edcf284b
CW
1037 dev_priv->rps.last_adj = adj;
1038
79249636
BW
1039 /* sysfs frequency interfaces may have snuck in while servicing the
1040 * interrupt
1041 */
edcf284b 1042 new_delay += adj;
8d3afd7d 1043 new_delay = clamp_t(int, new_delay, min, max);
27544369 1044
ffe02b40 1045 intel_set_rps(dev_priv->dev, new_delay);
3b8d8d91 1046
4fc688ce 1047 mutex_unlock(&dev_priv->rps.hw_lock);
3b8d8d91
JB
1048}
1049
e3689190
BW
1050
1051/**
1052 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1053 * occurred.
1054 * @work: workqueue struct
1055 *
1056 * Doesn't actually do anything except notify userspace. As a consequence of
1057 * this event, userspace should try to remap the bad rows since statistically
1058 * it is likely the same row is more likely to go bad again.
1059 */
1060static void ivybridge_parity_work(struct work_struct *work)
1061{
2d1013dd
JN
1062 struct drm_i915_private *dev_priv =
1063 container_of(work, struct drm_i915_private, l3_parity.error_work);
e3689190 1064 u32 error_status, row, bank, subbank;
35a85ac6 1065 char *parity_event[6];
e3689190 1066 uint32_t misccpctl;
35a85ac6 1067 uint8_t slice = 0;
e3689190
BW
1068
1069 /* We must turn off DOP level clock gating to access the L3 registers.
1070 * In order to prevent a get/put style interface, acquire struct mutex
1071 * any time we access those registers.
1072 */
1073 mutex_lock(&dev_priv->dev->struct_mutex);
1074
35a85ac6
BW
1075 /* If we've screwed up tracking, just let the interrupt fire again */
1076 if (WARN_ON(!dev_priv->l3_parity.which_slice))
1077 goto out;
1078
e3689190
BW
1079 misccpctl = I915_READ(GEN7_MISCCPCTL);
1080 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1081 POSTING_READ(GEN7_MISCCPCTL);
1082
35a85ac6
BW
1083 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1084 u32 reg;
e3689190 1085
35a85ac6
BW
1086 slice--;
1087 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
1088 break;
e3689190 1089
35a85ac6 1090 dev_priv->l3_parity.which_slice &= ~(1<<slice);
e3689190 1091
35a85ac6 1092 reg = GEN7_L3CDERRST1 + (slice * 0x200);
e3689190 1093
35a85ac6
BW
1094 error_status = I915_READ(reg);
1095 row = GEN7_PARITY_ERROR_ROW(error_status);
1096 bank = GEN7_PARITY_ERROR_BANK(error_status);
1097 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1098
1099 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1100 POSTING_READ(reg);
1101
1102 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1103 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1104 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1105 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1106 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1107 parity_event[5] = NULL;
1108
5bdebb18 1109 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
35a85ac6 1110 KOBJ_CHANGE, parity_event);
e3689190 1111
35a85ac6
BW
1112 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1113 slice, row, bank, subbank);
e3689190 1114
35a85ac6
BW
1115 kfree(parity_event[4]);
1116 kfree(parity_event[3]);
1117 kfree(parity_event[2]);
1118 kfree(parity_event[1]);
1119 }
e3689190 1120
35a85ac6 1121 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
e3689190 1122
35a85ac6
BW
1123out:
1124 WARN_ON(dev_priv->l3_parity.which_slice);
4cb21832 1125 spin_lock_irq(&dev_priv->irq_lock);
480c8033 1126 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
4cb21832 1127 spin_unlock_irq(&dev_priv->irq_lock);
35a85ac6
BW
1128
1129 mutex_unlock(&dev_priv->dev->struct_mutex);
e3689190
BW
1130}
1131
35a85ac6 1132static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
e3689190 1133{
2d1013dd 1134 struct drm_i915_private *dev_priv = dev->dev_private;
e3689190 1135
040d2baa 1136 if (!HAS_L3_DPF(dev))
e3689190
BW
1137 return;
1138
d0ecd7e2 1139 spin_lock(&dev_priv->irq_lock);
480c8033 1140 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
d0ecd7e2 1141 spin_unlock(&dev_priv->irq_lock);
e3689190 1142
35a85ac6
BW
1143 iir &= GT_PARITY_ERROR(dev);
1144 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1145 dev_priv->l3_parity.which_slice |= 1 << 1;
1146
1147 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1148 dev_priv->l3_parity.which_slice |= 1 << 0;
1149
a4da4fa4 1150 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
e3689190
BW
1151}
1152
f1af8fc1
PZ
1153static void ilk_gt_irq_handler(struct drm_device *dev,
1154 struct drm_i915_private *dev_priv,
1155 u32 gt_iir)
1156{
1157 if (gt_iir &
1158 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
74cdb337 1159 notify_ring(&dev_priv->ring[RCS]);
f1af8fc1 1160 if (gt_iir & ILK_BSD_USER_INTERRUPT)
74cdb337 1161 notify_ring(&dev_priv->ring[VCS]);
f1af8fc1
PZ
1162}
1163
e7b4c6b1
DV
1164static void snb_gt_irq_handler(struct drm_device *dev,
1165 struct drm_i915_private *dev_priv,
1166 u32 gt_iir)
1167{
1168
cc609d5d
BW
1169 if (gt_iir &
1170 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
74cdb337 1171 notify_ring(&dev_priv->ring[RCS]);
cc609d5d 1172 if (gt_iir & GT_BSD_USER_INTERRUPT)
74cdb337 1173 notify_ring(&dev_priv->ring[VCS]);
cc609d5d 1174 if (gt_iir & GT_BLT_USER_INTERRUPT)
74cdb337 1175 notify_ring(&dev_priv->ring[BCS]);
e7b4c6b1 1176
cc609d5d
BW
1177 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1178 GT_BSD_CS_ERROR_INTERRUPT |
aaecdf61
DV
1179 GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1180 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
e3689190 1181
35a85ac6
BW
1182 if (gt_iir & GT_PARITY_ERROR(dev))
1183 ivybridge_parity_error_irq_handler(dev, gt_iir);
e7b4c6b1
DV
1184}
1185
74cdb337 1186static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
abd58f01
BW
1187 u32 master_ctl)
1188{
abd58f01
BW
1189 irqreturn_t ret = IRQ_NONE;
1190
1191 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
74cdb337 1192 u32 tmp = I915_READ_FW(GEN8_GT_IIR(0));
abd58f01 1193 if (tmp) {
cb0d205e 1194 I915_WRITE_FW(GEN8_GT_IIR(0), tmp);
abd58f01 1195 ret = IRQ_HANDLED;
e981e7b1 1196
74cdb337
CW
1197 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
1198 intel_lrc_irq_handler(&dev_priv->ring[RCS]);
1199 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
1200 notify_ring(&dev_priv->ring[RCS]);
1201
1202 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
1203 intel_lrc_irq_handler(&dev_priv->ring[BCS]);
1204 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
1205 notify_ring(&dev_priv->ring[BCS]);
abd58f01
BW
1206 } else
1207 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1208 }
1209
85f9b5f9 1210 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
74cdb337 1211 u32 tmp = I915_READ_FW(GEN8_GT_IIR(1));
abd58f01 1212 if (tmp) {
cb0d205e 1213 I915_WRITE_FW(GEN8_GT_IIR(1), tmp);
abd58f01 1214 ret = IRQ_HANDLED;
e981e7b1 1215
74cdb337
CW
1216 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
1217 intel_lrc_irq_handler(&dev_priv->ring[VCS]);
1218 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
1219 notify_ring(&dev_priv->ring[VCS]);
abd58f01 1220
74cdb337
CW
1221 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
1222 intel_lrc_irq_handler(&dev_priv->ring[VCS2]);
1223 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
1224 notify_ring(&dev_priv->ring[VCS2]);
0961021a 1225 } else
abd58f01 1226 DRM_ERROR("The master control interrupt lied (GT1)!\n");
0961021a
BW
1227 }
1228
abd58f01 1229 if (master_ctl & GEN8_GT_VECS_IRQ) {
74cdb337 1230 u32 tmp = I915_READ_FW(GEN8_GT_IIR(3));
abd58f01 1231 if (tmp) {
74cdb337 1232 I915_WRITE_FW(GEN8_GT_IIR(3), tmp);
abd58f01 1233 ret = IRQ_HANDLED;
e981e7b1 1234
74cdb337
CW
1235 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
1236 intel_lrc_irq_handler(&dev_priv->ring[VECS]);
1237 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
1238 notify_ring(&dev_priv->ring[VECS]);
abd58f01
BW
1239 } else
1240 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1241 }
1242
0961021a 1243 if (master_ctl & GEN8_GT_PM_IRQ) {
74cdb337 1244 u32 tmp = I915_READ_FW(GEN8_GT_IIR(2));
0961021a 1245 if (tmp & dev_priv->pm_rps_events) {
cb0d205e
CW
1246 I915_WRITE_FW(GEN8_GT_IIR(2),
1247 tmp & dev_priv->pm_rps_events);
38cc46d7 1248 ret = IRQ_HANDLED;
c9a9a268 1249 gen6_rps_irq_handler(dev_priv, tmp);
0961021a
BW
1250 } else
1251 DRM_ERROR("The master control interrupt lied (PM)!\n");
1252 }
1253
abd58f01
BW
1254 return ret;
1255}
1256
63c88d22
ID
1257static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
1258{
1259 switch (port) {
1260 case PORT_A:
195baa06 1261 return val & PORTA_HOTPLUG_LONG_DETECT;
63c88d22
ID
1262 case PORT_B:
1263 return val & PORTB_HOTPLUG_LONG_DETECT;
1264 case PORT_C:
1265 return val & PORTC_HOTPLUG_LONG_DETECT;
1266 case PORT_D:
1267 return val & PORTD_HOTPLUG_LONG_DETECT;
1268 default:
1269 return false;
1270 }
1271}
1272
6dbf30ce
VS
1273static bool spt_port_hotplug2_long_detect(enum port port, u32 val)
1274{
1275 switch (port) {
1276 case PORT_E:
1277 return val & PORTE_HOTPLUG_LONG_DETECT;
1278 default:
1279 return false;
1280 }
1281}
1282
e4ce95aa
VS
1283static bool ilk_port_hotplug_long_detect(enum port port, u32 val)
1284{
1285 switch (port) {
1286 case PORT_A:
1287 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1288 default:
1289 return false;
1290 }
1291}
1292
676574df 1293static bool pch_port_hotplug_long_detect(enum port port, u32 val)
13cf5504
DA
1294{
1295 switch (port) {
13cf5504 1296 case PORT_B:
676574df 1297 return val & PORTB_HOTPLUG_LONG_DETECT;
13cf5504 1298 case PORT_C:
676574df 1299 return val & PORTC_HOTPLUG_LONG_DETECT;
13cf5504 1300 case PORT_D:
676574df
JN
1301 return val & PORTD_HOTPLUG_LONG_DETECT;
1302 default:
1303 return false;
13cf5504
DA
1304 }
1305}
1306
676574df 1307static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
13cf5504
DA
1308{
1309 switch (port) {
13cf5504 1310 case PORT_B:
676574df 1311 return val & PORTB_HOTPLUG_INT_LONG_PULSE;
13cf5504 1312 case PORT_C:
676574df 1313 return val & PORTC_HOTPLUG_INT_LONG_PULSE;
13cf5504 1314 case PORT_D:
676574df
JN
1315 return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1316 default:
1317 return false;
13cf5504
DA
1318 }
1319}
1320
42db67d6
VS
1321/*
1322 * Get a bit mask of pins that have triggered, and which ones may be long.
1323 * This can be called multiple times with the same masks to accumulate
1324 * hotplug detection results from several registers.
1325 *
1326 * Note that the caller is expected to zero out the masks initially.
1327 */
fd63e2a9 1328static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
8c841e57 1329 u32 hotplug_trigger, u32 dig_hotplug_reg,
fd63e2a9
ID
1330 const u32 hpd[HPD_NUM_PINS],
1331 bool long_pulse_detect(enum port port, u32 val))
676574df 1332{
8c841e57 1333 enum port port;
676574df
JN
1334 int i;
1335
676574df 1336 for_each_hpd_pin(i) {
8c841e57
JN
1337 if ((hpd[i] & hotplug_trigger) == 0)
1338 continue;
676574df 1339
8c841e57
JN
1340 *pin_mask |= BIT(i);
1341
cc24fcdc
ID
1342 if (!intel_hpd_pin_to_port(i, &port))
1343 continue;
1344
fd63e2a9 1345 if (long_pulse_detect(port, dig_hotplug_reg))
8c841e57 1346 *long_mask |= BIT(i);
676574df
JN
1347 }
1348
1349 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
1350 hotplug_trigger, dig_hotplug_reg, *pin_mask);
1351
1352}
1353
515ac2bb
DV
1354static void gmbus_irq_handler(struct drm_device *dev)
1355{
2d1013dd 1356 struct drm_i915_private *dev_priv = dev->dev_private;
28c70f16 1357
28c70f16 1358 wake_up_all(&dev_priv->gmbus_wait_queue);
515ac2bb
DV
1359}
1360
ce99c256
DV
1361static void dp_aux_irq_handler(struct drm_device *dev)
1362{
2d1013dd 1363 struct drm_i915_private *dev_priv = dev->dev_private;
9ee32fea 1364
9ee32fea 1365 wake_up_all(&dev_priv->gmbus_wait_queue);
ce99c256
DV
1366}
1367
8bf1e9f1 1368#if defined(CONFIG_DEBUG_FS)
277de95e
DV
1369static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1370 uint32_t crc0, uint32_t crc1,
1371 uint32_t crc2, uint32_t crc3,
1372 uint32_t crc4)
8bf1e9f1
SH
1373{
1374 struct drm_i915_private *dev_priv = dev->dev_private;
1375 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1376 struct intel_pipe_crc_entry *entry;
ac2300d4 1377 int head, tail;
b2c88f5b 1378
d538bbdf
DL
1379 spin_lock(&pipe_crc->lock);
1380
0c912c79 1381 if (!pipe_crc->entries) {
d538bbdf 1382 spin_unlock(&pipe_crc->lock);
34273620 1383 DRM_DEBUG_KMS("spurious interrupt\n");
0c912c79
DL
1384 return;
1385 }
1386
d538bbdf
DL
1387 head = pipe_crc->head;
1388 tail = pipe_crc->tail;
b2c88f5b
DL
1389
1390 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
d538bbdf 1391 spin_unlock(&pipe_crc->lock);
b2c88f5b
DL
1392 DRM_ERROR("CRC buffer overflowing\n");
1393 return;
1394 }
1395
1396 entry = &pipe_crc->entries[head];
8bf1e9f1 1397
8bc5e955 1398 entry->frame = dev->driver->get_vblank_counter(dev, pipe);
eba94eb9
DV
1399 entry->crc[0] = crc0;
1400 entry->crc[1] = crc1;
1401 entry->crc[2] = crc2;
1402 entry->crc[3] = crc3;
1403 entry->crc[4] = crc4;
b2c88f5b
DL
1404
1405 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
d538bbdf
DL
1406 pipe_crc->head = head;
1407
1408 spin_unlock(&pipe_crc->lock);
07144428
DL
1409
1410 wake_up_interruptible(&pipe_crc->wq);
8bf1e9f1 1411}
277de95e
DV
1412#else
1413static inline void
1414display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1415 uint32_t crc0, uint32_t crc1,
1416 uint32_t crc2, uint32_t crc3,
1417 uint32_t crc4) {}
1418#endif
1419
eba94eb9 1420
277de95e 1421static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
5a69b89f
DV
1422{
1423 struct drm_i915_private *dev_priv = dev->dev_private;
1424
277de95e
DV
1425 display_pipe_crc_irq_handler(dev, pipe,
1426 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1427 0, 0, 0, 0);
5a69b89f
DV
1428}
1429
277de95e 1430static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
eba94eb9
DV
1431{
1432 struct drm_i915_private *dev_priv = dev->dev_private;
1433
277de95e
DV
1434 display_pipe_crc_irq_handler(dev, pipe,
1435 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1436 I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1437 I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1438 I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1439 I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
eba94eb9 1440}
5b3a856b 1441
277de95e 1442static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
5b3a856b
DV
1443{
1444 struct drm_i915_private *dev_priv = dev->dev_private;
0b5c5ed0
DV
1445 uint32_t res1, res2;
1446
1447 if (INTEL_INFO(dev)->gen >= 3)
1448 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1449 else
1450 res1 = 0;
1451
1452 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1453 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1454 else
1455 res2 = 0;
5b3a856b 1456
277de95e
DV
1457 display_pipe_crc_irq_handler(dev, pipe,
1458 I915_READ(PIPE_CRC_RES_RED(pipe)),
1459 I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1460 I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1461 res1, res2);
5b3a856b 1462}
8bf1e9f1 1463
1403c0d4
PZ
1464/* The RPS events need forcewake, so we add them to a work queue and mask their
1465 * IMR bits until the work is done. Other interrupts can be processed without
1466 * the work queue. */
1467static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
baf02a1f 1468{
a6706b45 1469 if (pm_iir & dev_priv->pm_rps_events) {
59cdb63d 1470 spin_lock(&dev_priv->irq_lock);
480c8033 1471 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
d4d70aa5
ID
1472 if (dev_priv->rps.interrupts_enabled) {
1473 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1474 queue_work(dev_priv->wq, &dev_priv->rps.work);
1475 }
59cdb63d 1476 spin_unlock(&dev_priv->irq_lock);
baf02a1f 1477 }
baf02a1f 1478
c9a9a268
ID
1479 if (INTEL_INFO(dev_priv)->gen >= 8)
1480 return;
1481
1403c0d4
PZ
1482 if (HAS_VEBOX(dev_priv->dev)) {
1483 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
74cdb337 1484 notify_ring(&dev_priv->ring[VECS]);
12638c57 1485
aaecdf61
DV
1486 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1487 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
12638c57 1488 }
baf02a1f
BW
1489}
1490
8d7849db
VS
1491static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
1492{
8d7849db
VS
1493 if (!drm_handle_vblank(dev, pipe))
1494 return false;
1495
8d7849db
VS
1496 return true;
1497}
1498
c1874ed7
ID
1499static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
1500{
1501 struct drm_i915_private *dev_priv = dev->dev_private;
91d181dd 1502 u32 pipe_stats[I915_MAX_PIPES] = { };
c1874ed7
ID
1503 int pipe;
1504
58ead0d7 1505 spin_lock(&dev_priv->irq_lock);
055e393f 1506 for_each_pipe(dev_priv, pipe) {
91d181dd 1507 int reg;
bbb5eebf 1508 u32 mask, iir_bit = 0;
91d181dd 1509
bbb5eebf
DV
1510 /*
1511 * PIPESTAT bits get signalled even when the interrupt is
1512 * disabled with the mask bits, and some of the status bits do
1513 * not generate interrupts at all (like the underrun bit). Hence
1514 * we need to be careful that we only handle what we want to
1515 * handle.
1516 */
0f239f4c
DV
1517
1518 /* fifo underruns are filterered in the underrun handler. */
1519 mask = PIPE_FIFO_UNDERRUN_STATUS;
bbb5eebf
DV
1520
1521 switch (pipe) {
1522 case PIPE_A:
1523 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1524 break;
1525 case PIPE_B:
1526 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1527 break;
3278f67f
VS
1528 case PIPE_C:
1529 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1530 break;
bbb5eebf
DV
1531 }
1532 if (iir & iir_bit)
1533 mask |= dev_priv->pipestat_irq_mask[pipe];
1534
1535 if (!mask)
91d181dd
ID
1536 continue;
1537
1538 reg = PIPESTAT(pipe);
bbb5eebf
DV
1539 mask |= PIPESTAT_INT_ENABLE_MASK;
1540 pipe_stats[pipe] = I915_READ(reg) & mask;
c1874ed7
ID
1541
1542 /*
1543 * Clear the PIPE*STAT regs before the IIR
1544 */
91d181dd
ID
1545 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
1546 PIPESTAT_INT_STATUS_MASK))
c1874ed7
ID
1547 I915_WRITE(reg, pipe_stats[pipe]);
1548 }
58ead0d7 1549 spin_unlock(&dev_priv->irq_lock);
c1874ed7 1550
055e393f 1551 for_each_pipe(dev_priv, pipe) {
d6bbafa1
CW
1552 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
1553 intel_pipe_handle_vblank(dev, pipe))
1554 intel_check_page_flip(dev, pipe);
c1874ed7 1555
579a9b0e 1556 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
c1874ed7
ID
1557 intel_prepare_page_flip(dev, pipe);
1558 intel_finish_page_flip(dev, pipe);
1559 }
1560
1561 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1562 i9xx_pipe_crc_irq_handler(dev, pipe);
1563
1f7247c0
DV
1564 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1565 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
c1874ed7
ID
1566 }
1567
1568 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1569 gmbus_irq_handler(dev);
1570}
1571
16c6c56b
VS
1572static void i9xx_hpd_irq_handler(struct drm_device *dev)
1573{
1574 struct drm_i915_private *dev_priv = dev->dev_private;
1575 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
42db67d6 1576 u32 pin_mask = 0, long_mask = 0;
16c6c56b 1577
0d2e4297
JN
1578 if (!hotplug_status)
1579 return;
16c6c56b 1580
0d2e4297
JN
1581 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1582 /*
1583 * Make sure hotplug status is cleared before we clear IIR, or else we
1584 * may miss hotplug events.
1585 */
1586 POSTING_READ(PORT_HOTPLUG_STAT);
16c6c56b 1587
0d2e4297
JN
1588 if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
1589 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
16c6c56b 1590
fd63e2a9
ID
1591 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1592 hotplug_trigger, hpd_status_g4x,
1593 i9xx_port_hotplug_long_detect);
676574df 1594 intel_hpd_irq_handler(dev, pin_mask, long_mask);
369712e8
JN
1595
1596 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1597 dp_aux_irq_handler(dev);
0d2e4297
JN
1598 } else {
1599 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
16c6c56b 1600
fd63e2a9
ID
1601 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1602 hotplug_trigger, hpd_status_g4x,
1603 i9xx_port_hotplug_long_detect);
676574df 1604 intel_hpd_irq_handler(dev, pin_mask, long_mask);
3ff60f89 1605 }
16c6c56b
VS
1606}
1607
ff1f525e 1608static irqreturn_t valleyview_irq_handler(int irq, void *arg)
7e231dbe 1609{
45a83f84 1610 struct drm_device *dev = arg;
2d1013dd 1611 struct drm_i915_private *dev_priv = dev->dev_private;
7e231dbe
JB
1612 u32 iir, gt_iir, pm_iir;
1613 irqreturn_t ret = IRQ_NONE;
7e231dbe 1614
2dd2a883
ID
1615 if (!intel_irqs_enabled(dev_priv))
1616 return IRQ_NONE;
1617
7e231dbe 1618 while (true) {
3ff60f89
OM
1619 /* Find, clear, then process each source of interrupt */
1620
7e231dbe 1621 gt_iir = I915_READ(GTIIR);
3ff60f89
OM
1622 if (gt_iir)
1623 I915_WRITE(GTIIR, gt_iir);
1624
7e231dbe 1625 pm_iir = I915_READ(GEN6_PMIIR);
3ff60f89
OM
1626 if (pm_iir)
1627 I915_WRITE(GEN6_PMIIR, pm_iir);
1628
1629 iir = I915_READ(VLV_IIR);
1630 if (iir) {
1631 /* Consume port before clearing IIR or we'll miss events */
1632 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1633 i9xx_hpd_irq_handler(dev);
1634 I915_WRITE(VLV_IIR, iir);
1635 }
7e231dbe
JB
1636
1637 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1638 goto out;
1639
1640 ret = IRQ_HANDLED;
1641
3ff60f89
OM
1642 if (gt_iir)
1643 snb_gt_irq_handler(dev, dev_priv, gt_iir);
60611c13 1644 if (pm_iir)
d0ecd7e2 1645 gen6_rps_irq_handler(dev_priv, pm_iir);
3ff60f89
OM
1646 /* Call regardless, as some status bits might not be
1647 * signalled in iir */
1648 valleyview_pipestat_irq_handler(dev, iir);
7e231dbe
JB
1649 }
1650
1651out:
1652 return ret;
1653}
1654
43f328d7
VS
1655static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1656{
45a83f84 1657 struct drm_device *dev = arg;
43f328d7
VS
1658 struct drm_i915_private *dev_priv = dev->dev_private;
1659 u32 master_ctl, iir;
1660 irqreturn_t ret = IRQ_NONE;
43f328d7 1661
2dd2a883
ID
1662 if (!intel_irqs_enabled(dev_priv))
1663 return IRQ_NONE;
1664
8e5fd599
VS
1665 for (;;) {
1666 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1667 iir = I915_READ(VLV_IIR);
43f328d7 1668
8e5fd599
VS
1669 if (master_ctl == 0 && iir == 0)
1670 break;
43f328d7 1671
27b6c122
OM
1672 ret = IRQ_HANDLED;
1673
8e5fd599 1674 I915_WRITE(GEN8_MASTER_IRQ, 0);
43f328d7 1675
27b6c122 1676 /* Find, clear, then process each source of interrupt */
43f328d7 1677
27b6c122
OM
1678 if (iir) {
1679 /* Consume port before clearing IIR or we'll miss events */
1680 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1681 i9xx_hpd_irq_handler(dev);
1682 I915_WRITE(VLV_IIR, iir);
1683 }
43f328d7 1684
74cdb337 1685 gen8_gt_irq_handler(dev_priv, master_ctl);
43f328d7 1686
27b6c122
OM
1687 /* Call regardless, as some status bits might not be
1688 * signalled in iir */
1689 valleyview_pipestat_irq_handler(dev, iir);
43f328d7 1690
8e5fd599
VS
1691 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
1692 POSTING_READ(GEN8_MASTER_IRQ);
8e5fd599 1693 }
3278f67f 1694
43f328d7
VS
1695 return ret;
1696}
1697
23e81d69 1698static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
776ad806 1699{
2d1013dd 1700 struct drm_i915_private *dev_priv = dev->dev_private;
9db4a9c7 1701 int pipe;
b543fb04 1702 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
13cf5504 1703
aaf5ec2e 1704 if (hotplug_trigger) {
42db67d6 1705 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
aaf5ec2e
SJ
1706
1707 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1708 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
776ad806 1709
fd63e2a9
ID
1710 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1711 dig_hotplug_reg, hpd_ibx,
1712 pch_port_hotplug_long_detect);
aaf5ec2e
SJ
1713 intel_hpd_irq_handler(dev, pin_mask, long_mask);
1714 }
91d131d2 1715
cfc33bf7
VS
1716 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1717 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1718 SDE_AUDIO_POWER_SHIFT);
776ad806 1719 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
cfc33bf7
VS
1720 port_name(port));
1721 }
776ad806 1722
ce99c256
DV
1723 if (pch_iir & SDE_AUX_MASK)
1724 dp_aux_irq_handler(dev);
1725
776ad806 1726 if (pch_iir & SDE_GMBUS)
515ac2bb 1727 gmbus_irq_handler(dev);
776ad806
JB
1728
1729 if (pch_iir & SDE_AUDIO_HDCP_MASK)
1730 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1731
1732 if (pch_iir & SDE_AUDIO_TRANS_MASK)
1733 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1734
1735 if (pch_iir & SDE_POISON)
1736 DRM_ERROR("PCH poison interrupt\n");
1737
9db4a9c7 1738 if (pch_iir & SDE_FDI_MASK)
055e393f 1739 for_each_pipe(dev_priv, pipe)
9db4a9c7
JB
1740 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1741 pipe_name(pipe),
1742 I915_READ(FDI_RX_IIR(pipe)));
776ad806
JB
1743
1744 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1745 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1746
1747 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1748 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1749
776ad806 1750 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1f7247c0 1751 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
8664281b
PZ
1752
1753 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1f7247c0 1754 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
8664281b
PZ
1755}
1756
1757static void ivb_err_int_handler(struct drm_device *dev)
1758{
1759 struct drm_i915_private *dev_priv = dev->dev_private;
1760 u32 err_int = I915_READ(GEN7_ERR_INT);
5a69b89f 1761 enum pipe pipe;
8664281b 1762
de032bf4
PZ
1763 if (err_int & ERR_INT_POISON)
1764 DRM_ERROR("Poison interrupt\n");
1765
055e393f 1766 for_each_pipe(dev_priv, pipe) {
1f7247c0
DV
1767 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1768 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
8bf1e9f1 1769
5a69b89f
DV
1770 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1771 if (IS_IVYBRIDGE(dev))
277de95e 1772 ivb_pipe_crc_irq_handler(dev, pipe);
5a69b89f 1773 else
277de95e 1774 hsw_pipe_crc_irq_handler(dev, pipe);
5a69b89f
DV
1775 }
1776 }
8bf1e9f1 1777
8664281b
PZ
1778 I915_WRITE(GEN7_ERR_INT, err_int);
1779}
1780
1781static void cpt_serr_int_handler(struct drm_device *dev)
1782{
1783 struct drm_i915_private *dev_priv = dev->dev_private;
1784 u32 serr_int = I915_READ(SERR_INT);
1785
de032bf4
PZ
1786 if (serr_int & SERR_INT_POISON)
1787 DRM_ERROR("PCH poison interrupt\n");
1788
8664281b 1789 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
1f7247c0 1790 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
8664281b
PZ
1791
1792 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
1f7247c0 1793 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
8664281b
PZ
1794
1795 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
1f7247c0 1796 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
8664281b
PZ
1797
1798 I915_WRITE(SERR_INT, serr_int);
776ad806
JB
1799}
1800
23e81d69
AJ
1801static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1802{
2d1013dd 1803 struct drm_i915_private *dev_priv = dev->dev_private;
23e81d69 1804 int pipe;
6dbf30ce 1805 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
13cf5504 1806
aaf5ec2e 1807 if (hotplug_trigger) {
42db67d6 1808 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
23e81d69 1809
aaf5ec2e
SJ
1810 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1811 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
fd63e2a9 1812
6dbf30ce
VS
1813 intel_get_hpd_pins(&pin_mask, &long_mask,
1814 hotplug_trigger,
1815 dig_hotplug_reg, hpd_cpt,
1816 pch_port_hotplug_long_detect);
26951caf 1817
aaf5ec2e
SJ
1818 intel_hpd_irq_handler(dev, pin_mask, long_mask);
1819 }
91d131d2 1820
cfc33bf7
VS
1821 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1822 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1823 SDE_AUDIO_POWER_SHIFT_CPT);
1824 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1825 port_name(port));
1826 }
23e81d69
AJ
1827
1828 if (pch_iir & SDE_AUX_MASK_CPT)
ce99c256 1829 dp_aux_irq_handler(dev);
23e81d69
AJ
1830
1831 if (pch_iir & SDE_GMBUS_CPT)
515ac2bb 1832 gmbus_irq_handler(dev);
23e81d69
AJ
1833
1834 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1835 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
1836
1837 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1838 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
1839
1840 if (pch_iir & SDE_FDI_MASK_CPT)
055e393f 1841 for_each_pipe(dev_priv, pipe)
23e81d69
AJ
1842 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1843 pipe_name(pipe),
1844 I915_READ(FDI_RX_IIR(pipe)));
8664281b
PZ
1845
1846 if (pch_iir & SDE_ERROR_CPT)
1847 cpt_serr_int_handler(dev);
23e81d69
AJ
1848}
1849
6dbf30ce
VS
1850static void spt_irq_handler(struct drm_device *dev, u32 pch_iir)
1851{
1852 struct drm_i915_private *dev_priv = dev->dev_private;
1853 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
1854 ~SDE_PORTE_HOTPLUG_SPT;
1855 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
1856 u32 pin_mask = 0, long_mask = 0;
1857
1858 if (hotplug_trigger) {
1859 u32 dig_hotplug_reg;
1860
1861 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1862 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1863
1864 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1865 dig_hotplug_reg, hpd_spt,
1866 pch_port_hotplug_long_detect);
1867 }
1868
1869 if (hotplug2_trigger) {
1870 u32 dig_hotplug_reg;
1871
1872 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
1873 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
1874
1875 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger,
1876 dig_hotplug_reg, hpd_spt,
1877 spt_port_hotplug2_long_detect);
1878 }
1879
1880 if (pin_mask)
1881 intel_hpd_irq_handler(dev, pin_mask, long_mask);
1882
1883 if (pch_iir & SDE_GMBUS_CPT)
1884 gmbus_irq_handler(dev);
1885}
1886
c008bc6e
PZ
1887static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
1888{
1889 struct drm_i915_private *dev_priv = dev->dev_private;
40da17c2 1890 enum pipe pipe;
e4ce95aa
VS
1891 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
1892
1893 if (hotplug_trigger) {
1894 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1895
1896 dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
1897 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
1898
1899 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1900 dig_hotplug_reg, hpd_ilk,
1901 ilk_port_hotplug_long_detect);
1902 intel_hpd_irq_handler(dev, pin_mask, long_mask);
1903 }
c008bc6e
PZ
1904
1905 if (de_iir & DE_AUX_CHANNEL_A)
1906 dp_aux_irq_handler(dev);
1907
1908 if (de_iir & DE_GSE)
1909 intel_opregion_asle_intr(dev);
1910
c008bc6e
PZ
1911 if (de_iir & DE_POISON)
1912 DRM_ERROR("Poison interrupt\n");
1913
055e393f 1914 for_each_pipe(dev_priv, pipe) {
d6bbafa1
CW
1915 if (de_iir & DE_PIPE_VBLANK(pipe) &&
1916 intel_pipe_handle_vblank(dev, pipe))
1917 intel_check_page_flip(dev, pipe);
5b3a856b 1918
40da17c2 1919 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
1f7247c0 1920 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
5b3a856b 1921
40da17c2
DV
1922 if (de_iir & DE_PIPE_CRC_DONE(pipe))
1923 i9xx_pipe_crc_irq_handler(dev, pipe);
c008bc6e 1924
40da17c2
DV
1925 /* plane/pipes map 1:1 on ilk+ */
1926 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
1927 intel_prepare_page_flip(dev, pipe);
1928 intel_finish_page_flip_plane(dev, pipe);
1929 }
c008bc6e
PZ
1930 }
1931
1932 /* check event from PCH */
1933 if (de_iir & DE_PCH_EVENT) {
1934 u32 pch_iir = I915_READ(SDEIIR);
1935
1936 if (HAS_PCH_CPT(dev))
1937 cpt_irq_handler(dev, pch_iir);
1938 else
1939 ibx_irq_handler(dev, pch_iir);
1940
1941 /* should clear PCH hotplug event before clear CPU irq */
1942 I915_WRITE(SDEIIR, pch_iir);
1943 }
1944
1945 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
1946 ironlake_rps_change_irq_handler(dev);
1947}
1948
9719fb98
PZ
1949static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
1950{
1951 struct drm_i915_private *dev_priv = dev->dev_private;
07d27e20 1952 enum pipe pipe;
23bb4cb5
VS
1953 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
1954
1955 if (hotplug_trigger) {
1956 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1957
1958 dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
1959 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
1960
1961 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1962 dig_hotplug_reg, hpd_ivb,
1963 ilk_port_hotplug_long_detect);
1964 intel_hpd_irq_handler(dev, pin_mask, long_mask);
1965 }
9719fb98
PZ
1966
1967 if (de_iir & DE_ERR_INT_IVB)
1968 ivb_err_int_handler(dev);
1969
1970 if (de_iir & DE_AUX_CHANNEL_A_IVB)
1971 dp_aux_irq_handler(dev);
1972
1973 if (de_iir & DE_GSE_IVB)
1974 intel_opregion_asle_intr(dev);
1975
055e393f 1976 for_each_pipe(dev_priv, pipe) {
d6bbafa1
CW
1977 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
1978 intel_pipe_handle_vblank(dev, pipe))
1979 intel_check_page_flip(dev, pipe);
40da17c2
DV
1980
1981 /* plane/pipes map 1:1 on ilk+ */
07d27e20
DL
1982 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
1983 intel_prepare_page_flip(dev, pipe);
1984 intel_finish_page_flip_plane(dev, pipe);
9719fb98
PZ
1985 }
1986 }
1987
1988 /* check event from PCH */
1989 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
1990 u32 pch_iir = I915_READ(SDEIIR);
1991
1992 cpt_irq_handler(dev, pch_iir);
1993
1994 /* clear PCH hotplug event before clear CPU irq */
1995 I915_WRITE(SDEIIR, pch_iir);
1996 }
1997}
1998
72c90f62
OM
1999/*
2000 * To handle irqs with the minimum potential races with fresh interrupts, we:
2001 * 1 - Disable Master Interrupt Control.
2002 * 2 - Find the source(s) of the interrupt.
2003 * 3 - Clear the Interrupt Identity bits (IIR).
2004 * 4 - Process the interrupt(s) that had bits set in the IIRs.
2005 * 5 - Re-enable Master Interrupt Control.
2006 */
f1af8fc1 2007static irqreturn_t ironlake_irq_handler(int irq, void *arg)
b1f14ad0 2008{
45a83f84 2009 struct drm_device *dev = arg;
2d1013dd 2010 struct drm_i915_private *dev_priv = dev->dev_private;
f1af8fc1 2011 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
0e43406b 2012 irqreturn_t ret = IRQ_NONE;
b1f14ad0 2013
2dd2a883
ID
2014 if (!intel_irqs_enabled(dev_priv))
2015 return IRQ_NONE;
2016
8664281b
PZ
2017 /* We get interrupts on unclaimed registers, so check for this before we
2018 * do any I915_{READ,WRITE}. */
907b28c5 2019 intel_uncore_check_errors(dev);
8664281b 2020
b1f14ad0
JB
2021 /* disable master interrupt before clearing iir */
2022 de_ier = I915_READ(DEIER);
2023 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
23a78516 2024 POSTING_READ(DEIER);
b1f14ad0 2025
44498aea
PZ
2026 /* Disable south interrupts. We'll only write to SDEIIR once, so further
2027 * interrupts will will be stored on its back queue, and then we'll be
2028 * able to process them after we restore SDEIER (as soon as we restore
2029 * it, we'll get an interrupt if SDEIIR still has something to process
2030 * due to its back queue). */
ab5c608b
BW
2031 if (!HAS_PCH_NOP(dev)) {
2032 sde_ier = I915_READ(SDEIER);
2033 I915_WRITE(SDEIER, 0);
2034 POSTING_READ(SDEIER);
2035 }
44498aea 2036
72c90f62
OM
2037 /* Find, clear, then process each source of interrupt */
2038
b1f14ad0 2039 gt_iir = I915_READ(GTIIR);
0e43406b 2040 if (gt_iir) {
72c90f62
OM
2041 I915_WRITE(GTIIR, gt_iir);
2042 ret = IRQ_HANDLED;
d8fc8a47 2043 if (INTEL_INFO(dev)->gen >= 6)
f1af8fc1 2044 snb_gt_irq_handler(dev, dev_priv, gt_iir);
d8fc8a47
PZ
2045 else
2046 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
b1f14ad0
JB
2047 }
2048
0e43406b
CW
2049 de_iir = I915_READ(DEIIR);
2050 if (de_iir) {
72c90f62
OM
2051 I915_WRITE(DEIIR, de_iir);
2052 ret = IRQ_HANDLED;
f1af8fc1
PZ
2053 if (INTEL_INFO(dev)->gen >= 7)
2054 ivb_display_irq_handler(dev, de_iir);
2055 else
2056 ilk_display_irq_handler(dev, de_iir);
b1f14ad0
JB
2057 }
2058
f1af8fc1
PZ
2059 if (INTEL_INFO(dev)->gen >= 6) {
2060 u32 pm_iir = I915_READ(GEN6_PMIIR);
2061 if (pm_iir) {
f1af8fc1
PZ
2062 I915_WRITE(GEN6_PMIIR, pm_iir);
2063 ret = IRQ_HANDLED;
72c90f62 2064 gen6_rps_irq_handler(dev_priv, pm_iir);
f1af8fc1 2065 }
0e43406b 2066 }
b1f14ad0 2067
b1f14ad0
JB
2068 I915_WRITE(DEIER, de_ier);
2069 POSTING_READ(DEIER);
ab5c608b
BW
2070 if (!HAS_PCH_NOP(dev)) {
2071 I915_WRITE(SDEIER, sde_ier);
2072 POSTING_READ(SDEIER);
2073 }
b1f14ad0
JB
2074
2075 return ret;
2076}
2077
d04a492d
SS
2078static void bxt_hpd_handler(struct drm_device *dev, uint32_t iir_status)
2079{
2080 struct drm_i915_private *dev_priv = dev->dev_private;
676574df 2081 u32 hp_control, hp_trigger;
42db67d6 2082 u32 pin_mask = 0, long_mask = 0;
d04a492d
SS
2083
2084 /* Get the status */
2085 hp_trigger = iir_status & BXT_DE_PORT_HOTPLUG_MASK;
2086 hp_control = I915_READ(BXT_HOTPLUG_CTL);
2087
2088 /* Hotplug not enabled ? */
2089 if (!(hp_control & BXT_HOTPLUG_CTL_MASK)) {
2090 DRM_ERROR("Interrupt when HPD disabled\n");
2091 return;
2092 }
2093
475c2e3b
JN
2094 /* Clear sticky bits in hpd status */
2095 I915_WRITE(BXT_HOTPLUG_CTL, hp_control);
d04a492d 2096
fd63e2a9 2097 intel_get_hpd_pins(&pin_mask, &long_mask, hp_trigger, hp_control,
63c88d22 2098 hpd_bxt, bxt_port_hotplug_long_detect);
676574df 2099 intel_hpd_irq_handler(dev, pin_mask, long_mask);
d04a492d
SS
2100}
2101
abd58f01
BW
2102static irqreturn_t gen8_irq_handler(int irq, void *arg)
2103{
2104 struct drm_device *dev = arg;
2105 struct drm_i915_private *dev_priv = dev->dev_private;
2106 u32 master_ctl;
2107 irqreturn_t ret = IRQ_NONE;
2108 uint32_t tmp = 0;
c42664cc 2109 enum pipe pipe;
88e04703
JB
2110 u32 aux_mask = GEN8_AUX_CHANNEL_A;
2111
2dd2a883
ID
2112 if (!intel_irqs_enabled(dev_priv))
2113 return IRQ_NONE;
2114
88e04703
JB
2115 if (IS_GEN9(dev))
2116 aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
2117 GEN9_AUX_CHANNEL_D;
abd58f01 2118
cb0d205e 2119 master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
abd58f01
BW
2120 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2121 if (!master_ctl)
2122 return IRQ_NONE;
2123
cb0d205e 2124 I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
abd58f01 2125
38cc46d7
OM
2126 /* Find, clear, then process each source of interrupt */
2127
74cdb337 2128 ret = gen8_gt_irq_handler(dev_priv, master_ctl);
abd58f01
BW
2129
2130 if (master_ctl & GEN8_DE_MISC_IRQ) {
2131 tmp = I915_READ(GEN8_DE_MISC_IIR);
abd58f01
BW
2132 if (tmp) {
2133 I915_WRITE(GEN8_DE_MISC_IIR, tmp);
2134 ret = IRQ_HANDLED;
38cc46d7
OM
2135 if (tmp & GEN8_DE_MISC_GSE)
2136 intel_opregion_asle_intr(dev);
2137 else
2138 DRM_ERROR("Unexpected DE Misc interrupt\n");
abd58f01 2139 }
38cc46d7
OM
2140 else
2141 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
abd58f01
BW
2142 }
2143
6d766f02
DV
2144 if (master_ctl & GEN8_DE_PORT_IRQ) {
2145 tmp = I915_READ(GEN8_DE_PORT_IIR);
6d766f02 2146 if (tmp) {
d04a492d
SS
2147 bool found = false;
2148
6d766f02
DV
2149 I915_WRITE(GEN8_DE_PORT_IIR, tmp);
2150 ret = IRQ_HANDLED;
88e04703 2151
d04a492d 2152 if (tmp & aux_mask) {
38cc46d7 2153 dp_aux_irq_handler(dev);
d04a492d
SS
2154 found = true;
2155 }
2156
2157 if (IS_BROXTON(dev) && tmp & BXT_DE_PORT_HOTPLUG_MASK) {
2158 bxt_hpd_handler(dev, tmp);
2159 found = true;
2160 }
2161
9e63743e
SS
2162 if (IS_BROXTON(dev) && (tmp & BXT_DE_PORT_GMBUS)) {
2163 gmbus_irq_handler(dev);
2164 found = true;
2165 }
2166
d04a492d 2167 if (!found)
38cc46d7 2168 DRM_ERROR("Unexpected DE Port interrupt\n");
6d766f02 2169 }
38cc46d7
OM
2170 else
2171 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
6d766f02
DV
2172 }
2173
055e393f 2174 for_each_pipe(dev_priv, pipe) {
770de83d 2175 uint32_t pipe_iir, flip_done = 0, fault_errors = 0;
abd58f01 2176
c42664cc
DV
2177 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2178 continue;
abd58f01 2179
c42664cc 2180 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
c42664cc
DV
2181 if (pipe_iir) {
2182 ret = IRQ_HANDLED;
2183 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
770de83d 2184
d6bbafa1
CW
2185 if (pipe_iir & GEN8_PIPE_VBLANK &&
2186 intel_pipe_handle_vblank(dev, pipe))
2187 intel_check_page_flip(dev, pipe);
38cc46d7 2188
770de83d
DL
2189 if (IS_GEN9(dev))
2190 flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
2191 else
2192 flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
2193
2194 if (flip_done) {
38cc46d7
OM
2195 intel_prepare_page_flip(dev, pipe);
2196 intel_finish_page_flip_plane(dev, pipe);
2197 }
2198
2199 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
2200 hsw_pipe_crc_irq_handler(dev, pipe);
2201
1f7247c0
DV
2202 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN)
2203 intel_cpu_fifo_underrun_irq_handler(dev_priv,
2204 pipe);
38cc46d7 2205
770de83d
DL
2206
2207 if (IS_GEN9(dev))
2208 fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2209 else
2210 fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2211
2212 if (fault_errors)
38cc46d7
OM
2213 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2214 pipe_name(pipe),
2215 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
c42664cc 2216 } else
abd58f01
BW
2217 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2218 }
2219
266ea3d9
SS
2220 if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) &&
2221 master_ctl & GEN8_DE_PCH_IRQ) {
92d03a80
DV
2222 /*
2223 * FIXME(BDW): Assume for now that the new interrupt handling
2224 * scheme also closed the SDE interrupt handling race we've seen
2225 * on older pch-split platforms. But this needs testing.
2226 */
2227 u32 pch_iir = I915_READ(SDEIIR);
92d03a80
DV
2228 if (pch_iir) {
2229 I915_WRITE(SDEIIR, pch_iir);
2230 ret = IRQ_HANDLED;
6dbf30ce
VS
2231
2232 if (HAS_PCH_SPT(dev_priv))
2233 spt_irq_handler(dev, pch_iir);
2234 else
2235 cpt_irq_handler(dev, pch_iir);
38cc46d7
OM
2236 } else
2237 DRM_ERROR("The master control interrupt lied (SDE)!\n");
2238
92d03a80
DV
2239 }
2240
cb0d205e
CW
2241 I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2242 POSTING_READ_FW(GEN8_MASTER_IRQ);
abd58f01
BW
2243
2244 return ret;
2245}
2246
17e1df07
DV
2247static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2248 bool reset_completed)
2249{
a4872ba6 2250 struct intel_engine_cs *ring;
17e1df07
DV
2251 int i;
2252
2253 /*
2254 * Notify all waiters for GPU completion events that reset state has
2255 * been changed, and that they need to restart their wait after
2256 * checking for potential errors (and bail out to drop locks if there is
2257 * a gpu reset pending so that i915_error_work_func can acquire them).
2258 */
2259
2260 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2261 for_each_ring(ring, dev_priv, i)
2262 wake_up_all(&ring->irq_queue);
2263
2264 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2265 wake_up_all(&dev_priv->pending_flip_queue);
2266
2267 /*
2268 * Signal tasks blocked in i915_gem_wait_for_error that the pending
2269 * reset state is cleared.
2270 */
2271 if (reset_completed)
2272 wake_up_all(&dev_priv->gpu_error.reset_queue);
2273}
2274
8a905236 2275/**
b8d24a06 2276 * i915_reset_and_wakeup - do process context error handling work
8a905236
JB
2277 *
2278 * Fire an error uevent so userspace can see that a hang or error
2279 * was detected.
2280 */
b8d24a06 2281static void i915_reset_and_wakeup(struct drm_device *dev)
8a905236 2282{
b8d24a06
MK
2283 struct drm_i915_private *dev_priv = to_i915(dev);
2284 struct i915_gpu_error *error = &dev_priv->gpu_error;
cce723ed
BW
2285 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2286 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2287 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
17e1df07 2288 int ret;
8a905236 2289
5bdebb18 2290 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
f316a42c 2291
7db0ba24
DV
2292 /*
2293 * Note that there's only one work item which does gpu resets, so we
2294 * need not worry about concurrent gpu resets potentially incrementing
2295 * error->reset_counter twice. We only need to take care of another
2296 * racing irq/hangcheck declaring the gpu dead for a second time. A
2297 * quick check for that is good enough: schedule_work ensures the
2298 * correct ordering between hang detection and this work item, and since
2299 * the reset in-progress bit is only ever set by code outside of this
2300 * work we don't need to worry about any other races.
2301 */
2302 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
f803aa55 2303 DRM_DEBUG_DRIVER("resetting chip\n");
5bdebb18 2304 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
7db0ba24 2305 reset_event);
1f83fee0 2306
f454c694
ID
2307 /*
2308 * In most cases it's guaranteed that we get here with an RPM
2309 * reference held, for example because there is a pending GPU
2310 * request that won't finish until the reset is done. This
2311 * isn't the case at least when we get here by doing a
2312 * simulated reset via debugs, so get an RPM reference.
2313 */
2314 intel_runtime_pm_get(dev_priv);
7514747d
VS
2315
2316 intel_prepare_reset(dev);
2317
17e1df07
DV
2318 /*
2319 * All state reset _must_ be completed before we update the
2320 * reset counter, for otherwise waiters might miss the reset
2321 * pending state and not properly drop locks, resulting in
2322 * deadlocks with the reset work.
2323 */
f69061be
DV
2324 ret = i915_reset(dev);
2325
7514747d 2326 intel_finish_reset(dev);
17e1df07 2327
f454c694
ID
2328 intel_runtime_pm_put(dev_priv);
2329
f69061be
DV
2330 if (ret == 0) {
2331 /*
2332 * After all the gem state is reset, increment the reset
2333 * counter and wake up everyone waiting for the reset to
2334 * complete.
2335 *
2336 * Since unlock operations are a one-sided barrier only,
2337 * we need to insert a barrier here to order any seqno
2338 * updates before
2339 * the counter increment.
2340 */
4e857c58 2341 smp_mb__before_atomic();
f69061be
DV
2342 atomic_inc(&dev_priv->gpu_error.reset_counter);
2343
5bdebb18 2344 kobject_uevent_env(&dev->primary->kdev->kobj,
f69061be 2345 KOBJ_CHANGE, reset_done_event);
1f83fee0 2346 } else {
2ac0f450 2347 atomic_set_mask(I915_WEDGED, &error->reset_counter);
f316a42c 2348 }
1f83fee0 2349
17e1df07
DV
2350 /*
2351 * Note: The wake_up also serves as a memory barrier so that
2352 * waiters see the update value of the reset counter atomic_t.
2353 */
2354 i915_error_wake_up(dev_priv, true);
f316a42c 2355 }
8a905236
JB
2356}
2357
35aed2e6 2358static void i915_report_and_clear_eir(struct drm_device *dev)
8a905236
JB
2359{
2360 struct drm_i915_private *dev_priv = dev->dev_private;
bd9854f9 2361 uint32_t instdone[I915_NUM_INSTDONE_REG];
8a905236 2362 u32 eir = I915_READ(EIR);
050ee91f 2363 int pipe, i;
8a905236 2364
35aed2e6
CW
2365 if (!eir)
2366 return;
8a905236 2367
a70491cc 2368 pr_err("render error detected, EIR: 0x%08x\n", eir);
8a905236 2369
bd9854f9
BW
2370 i915_get_extra_instdone(dev, instdone);
2371
8a905236
JB
2372 if (IS_G4X(dev)) {
2373 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2374 u32 ipeir = I915_READ(IPEIR_I965);
2375
a70491cc
JP
2376 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2377 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
050ee91f
BW
2378 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2379 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
a70491cc 2380 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
a70491cc 2381 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
8a905236 2382 I915_WRITE(IPEIR_I965, ipeir);
3143a2bf 2383 POSTING_READ(IPEIR_I965);
8a905236
JB
2384 }
2385 if (eir & GM45_ERROR_PAGE_TABLE) {
2386 u32 pgtbl_err = I915_READ(PGTBL_ER);
a70491cc
JP
2387 pr_err("page table error\n");
2388 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
8a905236 2389 I915_WRITE(PGTBL_ER, pgtbl_err);
3143a2bf 2390 POSTING_READ(PGTBL_ER);
8a905236
JB
2391 }
2392 }
2393
a6c45cf0 2394 if (!IS_GEN2(dev)) {
8a905236
JB
2395 if (eir & I915_ERROR_PAGE_TABLE) {
2396 u32 pgtbl_err = I915_READ(PGTBL_ER);
a70491cc
JP
2397 pr_err("page table error\n");
2398 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
8a905236 2399 I915_WRITE(PGTBL_ER, pgtbl_err);
3143a2bf 2400 POSTING_READ(PGTBL_ER);
8a905236
JB
2401 }
2402 }
2403
2404 if (eir & I915_ERROR_MEMORY_REFRESH) {
a70491cc 2405 pr_err("memory refresh error:\n");
055e393f 2406 for_each_pipe(dev_priv, pipe)
a70491cc 2407 pr_err("pipe %c stat: 0x%08x\n",
9db4a9c7 2408 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
8a905236
JB
2409 /* pipestat has already been acked */
2410 }
2411 if (eir & I915_ERROR_INSTRUCTION) {
a70491cc
JP
2412 pr_err("instruction error\n");
2413 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
050ee91f
BW
2414 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2415 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
a6c45cf0 2416 if (INTEL_INFO(dev)->gen < 4) {
8a905236
JB
2417 u32 ipeir = I915_READ(IPEIR);
2418
a70491cc
JP
2419 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
2420 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
a70491cc 2421 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
8a905236 2422 I915_WRITE(IPEIR, ipeir);
3143a2bf 2423 POSTING_READ(IPEIR);
8a905236
JB
2424 } else {
2425 u32 ipeir = I915_READ(IPEIR_I965);
2426
a70491cc
JP
2427 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2428 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
a70491cc 2429 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
a70491cc 2430 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
8a905236 2431 I915_WRITE(IPEIR_I965, ipeir);
3143a2bf 2432 POSTING_READ(IPEIR_I965);
8a905236
JB
2433 }
2434 }
2435
2436 I915_WRITE(EIR, eir);
3143a2bf 2437 POSTING_READ(EIR);
8a905236
JB
2438 eir = I915_READ(EIR);
2439 if (eir) {
2440 /*
2441 * some errors might have become stuck,
2442 * mask them.
2443 */
2444 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2445 I915_WRITE(EMR, I915_READ(EMR) | eir);
2446 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2447 }
35aed2e6
CW
2448}
2449
2450/**
b8d24a06 2451 * i915_handle_error - handle a gpu error
35aed2e6
CW
2452 * @dev: drm device
2453 *
b8d24a06 2454 * Do some basic checking of regsiter state at error time and
35aed2e6
CW
2455 * dump it to the syslog. Also call i915_capture_error_state() to make
2456 * sure we get a record and make it available in debugfs. Fire a uevent
2457 * so userspace knows something bad happened (should trigger collection
2458 * of a ring dump etc.).
2459 */
58174462
MK
2460void i915_handle_error(struct drm_device *dev, bool wedged,
2461 const char *fmt, ...)
35aed2e6
CW
2462{
2463 struct drm_i915_private *dev_priv = dev->dev_private;
58174462
MK
2464 va_list args;
2465 char error_msg[80];
35aed2e6 2466
58174462
MK
2467 va_start(args, fmt);
2468 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2469 va_end(args);
2470
2471 i915_capture_error_state(dev, wedged, error_msg);
35aed2e6 2472 i915_report_and_clear_eir(dev);
8a905236 2473
ba1234d1 2474 if (wedged) {
f69061be
DV
2475 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
2476 &dev_priv->gpu_error.reset_counter);
ba1234d1 2477
11ed50ec 2478 /*
b8d24a06
MK
2479 * Wakeup waiting processes so that the reset function
2480 * i915_reset_and_wakeup doesn't deadlock trying to grab
2481 * various locks. By bumping the reset counter first, the woken
17e1df07
DV
2482 * processes will see a reset in progress and back off,
2483 * releasing their locks and then wait for the reset completion.
2484 * We must do this for _all_ gpu waiters that might hold locks
2485 * that the reset work needs to acquire.
2486 *
2487 * Note: The wake_up serves as the required memory barrier to
2488 * ensure that the waiters see the updated value of the reset
2489 * counter atomic_t.
11ed50ec 2490 */
17e1df07 2491 i915_error_wake_up(dev_priv, false);
11ed50ec
BG
2492 }
2493
b8d24a06 2494 i915_reset_and_wakeup(dev);
8a905236
JB
2495}
2496
42f52ef8
KP
2497/* Called from drm generic code, passed 'crtc' which
2498 * we use as a pipe index
2499 */
f71d4af4 2500static int i915_enable_vblank(struct drm_device *dev, int pipe)
0a3e67a4 2501{
2d1013dd 2502 struct drm_i915_private *dev_priv = dev->dev_private;
e9d21d7f 2503 unsigned long irqflags;
71e0ffa5 2504
1ec14ad3 2505 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
f796cf8f 2506 if (INTEL_INFO(dev)->gen >= 4)
7c463586 2507 i915_enable_pipestat(dev_priv, pipe,
755e9019 2508 PIPE_START_VBLANK_INTERRUPT_STATUS);
e9d21d7f 2509 else
7c463586 2510 i915_enable_pipestat(dev_priv, pipe,
755e9019 2511 PIPE_VBLANK_INTERRUPT_STATUS);
1ec14ad3 2512 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
8692d00e 2513
0a3e67a4
JB
2514 return 0;
2515}
2516
f71d4af4 2517static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
f796cf8f 2518{
2d1013dd 2519 struct drm_i915_private *dev_priv = dev->dev_private;
f796cf8f 2520 unsigned long irqflags;
b518421f 2521 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
40da17c2 2522 DE_PIPE_VBLANK(pipe);
f796cf8f 2523
f796cf8f 2524 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
b518421f 2525 ironlake_enable_display_irq(dev_priv, bit);
b1f14ad0
JB
2526 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2527
2528 return 0;
2529}
2530
7e231dbe
JB
2531static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2532{
2d1013dd 2533 struct drm_i915_private *dev_priv = dev->dev_private;
7e231dbe 2534 unsigned long irqflags;
7e231dbe 2535
7e231dbe 2536 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
31acc7f5 2537 i915_enable_pipestat(dev_priv, pipe,
755e9019 2538 PIPE_START_VBLANK_INTERRUPT_STATUS);
7e231dbe
JB
2539 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2540
2541 return 0;
2542}
2543
abd58f01
BW
2544static int gen8_enable_vblank(struct drm_device *dev, int pipe)
2545{
2546 struct drm_i915_private *dev_priv = dev->dev_private;
2547 unsigned long irqflags;
abd58f01 2548
abd58f01 2549 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
7167d7c6
DV
2550 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
2551 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2552 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
abd58f01
BW
2553 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2554 return 0;
2555}
2556
42f52ef8
KP
2557/* Called from drm generic code, passed 'crtc' which
2558 * we use as a pipe index
2559 */
f71d4af4 2560static void i915_disable_vblank(struct drm_device *dev, int pipe)
0a3e67a4 2561{
2d1013dd 2562 struct drm_i915_private *dev_priv = dev->dev_private;
e9d21d7f 2563 unsigned long irqflags;
0a3e67a4 2564
1ec14ad3 2565 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
f796cf8f 2566 i915_disable_pipestat(dev_priv, pipe,
755e9019
ID
2567 PIPE_VBLANK_INTERRUPT_STATUS |
2568 PIPE_START_VBLANK_INTERRUPT_STATUS);
f796cf8f
JB
2569 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2570}
2571
f71d4af4 2572static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
f796cf8f 2573{
2d1013dd 2574 struct drm_i915_private *dev_priv = dev->dev_private;
f796cf8f 2575 unsigned long irqflags;
b518421f 2576 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
40da17c2 2577 DE_PIPE_VBLANK(pipe);
f796cf8f
JB
2578
2579 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
b518421f 2580 ironlake_disable_display_irq(dev_priv, bit);
b1f14ad0
JB
2581 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2582}
2583
7e231dbe
JB
2584static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
2585{
2d1013dd 2586 struct drm_i915_private *dev_priv = dev->dev_private;
7e231dbe 2587 unsigned long irqflags;
7e231dbe
JB
2588
2589 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
31acc7f5 2590 i915_disable_pipestat(dev_priv, pipe,
755e9019 2591 PIPE_START_VBLANK_INTERRUPT_STATUS);
7e231dbe
JB
2592 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2593}
2594
abd58f01
BW
2595static void gen8_disable_vblank(struct drm_device *dev, int pipe)
2596{
2597 struct drm_i915_private *dev_priv = dev->dev_private;
2598 unsigned long irqflags;
abd58f01 2599
abd58f01 2600 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
7167d7c6
DV
2601 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
2602 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2603 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
abd58f01
BW
2604 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2605}
2606
9107e9d2 2607static bool
94f7bbe1 2608ring_idle(struct intel_engine_cs *ring, u32 seqno)
9107e9d2
CW
2609{
2610 return (list_empty(&ring->request_list) ||
94f7bbe1 2611 i915_seqno_passed(seqno, ring->last_submitted_seqno));
f65d9421
BG
2612}
2613
a028c4b0
DV
2614static bool
2615ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
2616{
2617 if (INTEL_INFO(dev)->gen >= 8) {
a6cdb93a 2618 return (ipehr >> 23) == 0x1c;
a028c4b0
DV
2619 } else {
2620 ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
2621 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
2622 MI_SEMAPHORE_REGISTER);
2623 }
2624}
2625
a4872ba6 2626static struct intel_engine_cs *
a6cdb93a 2627semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
921d42ea
DV
2628{
2629 struct drm_i915_private *dev_priv = ring->dev->dev_private;
a4872ba6 2630 struct intel_engine_cs *signaller;
921d42ea
DV
2631 int i;
2632
2633 if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
a6cdb93a
RV
2634 for_each_ring(signaller, dev_priv, i) {
2635 if (ring == signaller)
2636 continue;
2637
2638 if (offset == signaller->semaphore.signal_ggtt[ring->id])
2639 return signaller;
2640 }
921d42ea
DV
2641 } else {
2642 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
2643
2644 for_each_ring(signaller, dev_priv, i) {
2645 if(ring == signaller)
2646 continue;
2647
ebc348b2 2648 if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
921d42ea
DV
2649 return signaller;
2650 }
2651 }
2652
a6cdb93a
RV
2653 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
2654 ring->id, ipehr, offset);
921d42ea
DV
2655
2656 return NULL;
2657}
2658
a4872ba6
OM
2659static struct intel_engine_cs *
2660semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
a24a11e6
CW
2661{
2662 struct drm_i915_private *dev_priv = ring->dev->dev_private;
88fe429d 2663 u32 cmd, ipehr, head;
a6cdb93a
RV
2664 u64 offset = 0;
2665 int i, backwards;
a24a11e6
CW
2666
2667 ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
a028c4b0 2668 if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
6274f212 2669 return NULL;
a24a11e6 2670
88fe429d
DV
2671 /*
2672 * HEAD is likely pointing to the dword after the actual command,
2673 * so scan backwards until we find the MBOX. But limit it to just 3
a6cdb93a
RV
2674 * or 4 dwords depending on the semaphore wait command size.
2675 * Note that we don't care about ACTHD here since that might
88fe429d
DV
2676 * point at at batch, and semaphores are always emitted into the
2677 * ringbuffer itself.
a24a11e6 2678 */
88fe429d 2679 head = I915_READ_HEAD(ring) & HEAD_ADDR;
a6cdb93a 2680 backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
88fe429d 2681
a6cdb93a 2682 for (i = backwards; i; --i) {
88fe429d
DV
2683 /*
2684 * Be paranoid and presume the hw has gone off into the wild -
2685 * our ring is smaller than what the hardware (and hence
2686 * HEAD_ADDR) allows. Also handles wrap-around.
2687 */
ee1b1e5e 2688 head &= ring->buffer->size - 1;
88fe429d
DV
2689
2690 /* This here seems to blow up */
ee1b1e5e 2691 cmd = ioread32(ring->buffer->virtual_start + head);
a24a11e6
CW
2692 if (cmd == ipehr)
2693 break;
2694
88fe429d
DV
2695 head -= 4;
2696 }
a24a11e6 2697
88fe429d
DV
2698 if (!i)
2699 return NULL;
a24a11e6 2700
ee1b1e5e 2701 *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
a6cdb93a
RV
2702 if (INTEL_INFO(ring->dev)->gen >= 8) {
2703 offset = ioread32(ring->buffer->virtual_start + head + 12);
2704 offset <<= 32;
2705 offset = ioread32(ring->buffer->virtual_start + head + 8);
2706 }
2707 return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
a24a11e6
CW
2708}
2709
a4872ba6 2710static int semaphore_passed(struct intel_engine_cs *ring)
6274f212
CW
2711{
2712 struct drm_i915_private *dev_priv = ring->dev->dev_private;
a4872ba6 2713 struct intel_engine_cs *signaller;
a0d036b0 2714 u32 seqno;
6274f212 2715
4be17381 2716 ring->hangcheck.deadlock++;
6274f212
CW
2717
2718 signaller = semaphore_waits_for(ring, &seqno);
4be17381
CW
2719 if (signaller == NULL)
2720 return -1;
2721
2722 /* Prevent pathological recursion due to driver bugs */
2723 if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
6274f212
CW
2724 return -1;
2725
4be17381
CW
2726 if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
2727 return 1;
2728
a0d036b0
CW
2729 /* cursory check for an unkickable deadlock */
2730 if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
2731 semaphore_passed(signaller) < 0)
4be17381
CW
2732 return -1;
2733
2734 return 0;
6274f212
CW
2735}
2736
2737static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2738{
a4872ba6 2739 struct intel_engine_cs *ring;
6274f212
CW
2740 int i;
2741
2742 for_each_ring(ring, dev_priv, i)
4be17381 2743 ring->hangcheck.deadlock = 0;
6274f212
CW
2744}
2745
ad8beaea 2746static enum intel_ring_hangcheck_action
a4872ba6 2747ring_stuck(struct intel_engine_cs *ring, u64 acthd)
1ec14ad3
CW
2748{
2749 struct drm_device *dev = ring->dev;
2750 struct drm_i915_private *dev_priv = dev->dev_private;
9107e9d2
CW
2751 u32 tmp;
2752
f260fe7b
MK
2753 if (acthd != ring->hangcheck.acthd) {
2754 if (acthd > ring->hangcheck.max_acthd) {
2755 ring->hangcheck.max_acthd = acthd;
2756 return HANGCHECK_ACTIVE;
2757 }
2758
2759 return HANGCHECK_ACTIVE_LOOP;
2760 }
6274f212 2761
9107e9d2 2762 if (IS_GEN2(dev))
f2f4d82f 2763 return HANGCHECK_HUNG;
9107e9d2
CW
2764
2765 /* Is the chip hanging on a WAIT_FOR_EVENT?
2766 * If so we can simply poke the RB_WAIT bit
2767 * and break the hang. This should work on
2768 * all but the second generation chipsets.
2769 */
2770 tmp = I915_READ_CTL(ring);
1ec14ad3 2771 if (tmp & RING_WAIT) {
58174462
MK
2772 i915_handle_error(dev, false,
2773 "Kicking stuck wait on %s",
2774 ring->name);
1ec14ad3 2775 I915_WRITE_CTL(ring, tmp);
f2f4d82f 2776 return HANGCHECK_KICK;
6274f212
CW
2777 }
2778
2779 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
2780 switch (semaphore_passed(ring)) {
2781 default:
f2f4d82f 2782 return HANGCHECK_HUNG;
6274f212 2783 case 1:
58174462
MK
2784 i915_handle_error(dev, false,
2785 "Kicking stuck semaphore on %s",
2786 ring->name);
6274f212 2787 I915_WRITE_CTL(ring, tmp);
f2f4d82f 2788 return HANGCHECK_KICK;
6274f212 2789 case 0:
f2f4d82f 2790 return HANGCHECK_WAIT;
6274f212 2791 }
9107e9d2 2792 }
ed5cbb03 2793
f2f4d82f 2794 return HANGCHECK_HUNG;
ed5cbb03
MK
2795}
2796
737b1506 2797/*
f65d9421 2798 * This is called when the chip hasn't reported back with completed
05407ff8
MK
2799 * batchbuffers in a long time. We keep track per ring seqno progress and
2800 * if there are no progress, hangcheck score for that ring is increased.
2801 * Further, acthd is inspected to see if the ring is stuck. On stuck case
2802 * we kick the ring. If we see no progress on three subsequent calls
2803 * we assume chip is wedged and try to fix it by resetting the chip.
f65d9421 2804 */
737b1506 2805static void i915_hangcheck_elapsed(struct work_struct *work)
f65d9421 2806{
737b1506
CW
2807 struct drm_i915_private *dev_priv =
2808 container_of(work, typeof(*dev_priv),
2809 gpu_error.hangcheck_work.work);
2810 struct drm_device *dev = dev_priv->dev;
a4872ba6 2811 struct intel_engine_cs *ring;
b4519513 2812 int i;
05407ff8 2813 int busy_count = 0, rings_hung = 0;
9107e9d2
CW
2814 bool stuck[I915_NUM_RINGS] = { 0 };
2815#define BUSY 1
2816#define KICK 5
2817#define HUNG 20
893eead0 2818
d330a953 2819 if (!i915.enable_hangcheck)
3e0dc6b0
BW
2820 return;
2821
b4519513 2822 for_each_ring(ring, dev_priv, i) {
50877445
CW
2823 u64 acthd;
2824 u32 seqno;
9107e9d2 2825 bool busy = true;
05407ff8 2826
6274f212
CW
2827 semaphore_clear_deadlocks(dev_priv);
2828
05407ff8
MK
2829 seqno = ring->get_seqno(ring, false);
2830 acthd = intel_ring_get_active_head(ring);
b4519513 2831
9107e9d2 2832 if (ring->hangcheck.seqno == seqno) {
94f7bbe1 2833 if (ring_idle(ring, seqno)) {
da661464
MK
2834 ring->hangcheck.action = HANGCHECK_IDLE;
2835
9107e9d2
CW
2836 if (waitqueue_active(&ring->irq_queue)) {
2837 /* Issue a wake-up to catch stuck h/w. */
094f9a54 2838 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
f4adcd24
DV
2839 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
2840 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2841 ring->name);
2842 else
2843 DRM_INFO("Fake missed irq on %s\n",
2844 ring->name);
094f9a54
CW
2845 wake_up_all(&ring->irq_queue);
2846 }
2847 /* Safeguard against driver failure */
2848 ring->hangcheck.score += BUSY;
9107e9d2
CW
2849 } else
2850 busy = false;
05407ff8 2851 } else {
6274f212
CW
2852 /* We always increment the hangcheck score
2853 * if the ring is busy and still processing
2854 * the same request, so that no single request
2855 * can run indefinitely (such as a chain of
2856 * batches). The only time we do not increment
2857 * the hangcheck score on this ring, if this
2858 * ring is in a legitimate wait for another
2859 * ring. In that case the waiting ring is a
2860 * victim and we want to be sure we catch the
2861 * right culprit. Then every time we do kick
2862 * the ring, add a small increment to the
2863 * score so that we can catch a batch that is
2864 * being repeatedly kicked and so responsible
2865 * for stalling the machine.
2866 */
ad8beaea
MK
2867 ring->hangcheck.action = ring_stuck(ring,
2868 acthd);
2869
2870 switch (ring->hangcheck.action) {
da661464 2871 case HANGCHECK_IDLE:
f2f4d82f 2872 case HANGCHECK_WAIT:
f2f4d82f 2873 case HANGCHECK_ACTIVE:
f260fe7b
MK
2874 break;
2875 case HANGCHECK_ACTIVE_LOOP:
ea04cb31 2876 ring->hangcheck.score += BUSY;
6274f212 2877 break;
f2f4d82f 2878 case HANGCHECK_KICK:
ea04cb31 2879 ring->hangcheck.score += KICK;
6274f212 2880 break;
f2f4d82f 2881 case HANGCHECK_HUNG:
ea04cb31 2882 ring->hangcheck.score += HUNG;
6274f212
CW
2883 stuck[i] = true;
2884 break;
2885 }
05407ff8 2886 }
9107e9d2 2887 } else {
da661464
MK
2888 ring->hangcheck.action = HANGCHECK_ACTIVE;
2889
9107e9d2
CW
2890 /* Gradually reduce the count so that we catch DoS
2891 * attempts across multiple batches.
2892 */
2893 if (ring->hangcheck.score > 0)
2894 ring->hangcheck.score--;
f260fe7b
MK
2895
2896 ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
d1e61e7f
CW
2897 }
2898
05407ff8
MK
2899 ring->hangcheck.seqno = seqno;
2900 ring->hangcheck.acthd = acthd;
9107e9d2 2901 busy_count += busy;
893eead0 2902 }
b9201c14 2903
92cab734 2904 for_each_ring(ring, dev_priv, i) {
b6b0fac0 2905 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
b8d88d1d
DV
2906 DRM_INFO("%s on %s\n",
2907 stuck[i] ? "stuck" : "no progress",
2908 ring->name);
a43adf07 2909 rings_hung++;
92cab734
MK
2910 }
2911 }
2912
05407ff8 2913 if (rings_hung)
58174462 2914 return i915_handle_error(dev, true, "Ring hung");
f65d9421 2915
05407ff8
MK
2916 if (busy_count)
2917 /* Reset timer case chip hangs without another request
2918 * being added */
10cd45b6
MK
2919 i915_queue_hangcheck(dev);
2920}
2921
2922void i915_queue_hangcheck(struct drm_device *dev)
2923{
737b1506 2924 struct i915_gpu_error *e = &to_i915(dev)->gpu_error;
672e7b7c 2925
d330a953 2926 if (!i915.enable_hangcheck)
10cd45b6
MK
2927 return;
2928
737b1506
CW
2929 /* Don't continually defer the hangcheck so that it is always run at
2930 * least once after work has been scheduled on any ring. Otherwise,
2931 * we will ignore a hung ring if a second ring is kept busy.
2932 */
2933
2934 queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work,
2935 round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES));
f65d9421
BG
2936}
2937
1c69eb42 2938static void ibx_irq_reset(struct drm_device *dev)
91738a95
PZ
2939{
2940 struct drm_i915_private *dev_priv = dev->dev_private;
2941
2942 if (HAS_PCH_NOP(dev))
2943 return;
2944
f86f3fb0 2945 GEN5_IRQ_RESET(SDE);
105b122e
PZ
2946
2947 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
2948 I915_WRITE(SERR_INT, 0xffffffff);
622364b6 2949}
105b122e 2950
622364b6
PZ
2951/*
2952 * SDEIER is also touched by the interrupt handler to work around missed PCH
2953 * interrupts. Hence we can't update it after the interrupt handler is enabled -
2954 * instead we unconditionally enable all PCH interrupt sources here, but then
2955 * only unmask them as needed with SDEIMR.
2956 *
2957 * This function needs to be called before interrupts are enabled.
2958 */
2959static void ibx_irq_pre_postinstall(struct drm_device *dev)
2960{
2961 struct drm_i915_private *dev_priv = dev->dev_private;
2962
2963 if (HAS_PCH_NOP(dev))
2964 return;
2965
2966 WARN_ON(I915_READ(SDEIER) != 0);
91738a95
PZ
2967 I915_WRITE(SDEIER, 0xffffffff);
2968 POSTING_READ(SDEIER);
2969}
2970
7c4d664e 2971static void gen5_gt_irq_reset(struct drm_device *dev)
d18ea1b5
DV
2972{
2973 struct drm_i915_private *dev_priv = dev->dev_private;
2974
f86f3fb0 2975 GEN5_IRQ_RESET(GT);
a9d356a6 2976 if (INTEL_INFO(dev)->gen >= 6)
f86f3fb0 2977 GEN5_IRQ_RESET(GEN6_PM);
d18ea1b5
DV
2978}
2979
1da177e4
LT
2980/* drm_dma.h hooks
2981*/
be30b29f 2982static void ironlake_irq_reset(struct drm_device *dev)
036a4a7d 2983{
2d1013dd 2984 struct drm_i915_private *dev_priv = dev->dev_private;
036a4a7d 2985
0c841212 2986 I915_WRITE(HWSTAM, 0xffffffff);
bdfcdb63 2987
f86f3fb0 2988 GEN5_IRQ_RESET(DE);
c6d954c1
PZ
2989 if (IS_GEN7(dev))
2990 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
036a4a7d 2991
7c4d664e 2992 gen5_gt_irq_reset(dev);
c650156a 2993
1c69eb42 2994 ibx_irq_reset(dev);
7d99163d 2995}
c650156a 2996
70591a41
VS
2997static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
2998{
2999 enum pipe pipe;
3000
3001 I915_WRITE(PORT_HOTPLUG_EN, 0);
3002 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3003
3004 for_each_pipe(dev_priv, pipe)
3005 I915_WRITE(PIPESTAT(pipe), 0xffff);
3006
3007 GEN5_IRQ_RESET(VLV_);
3008}
3009
7e231dbe
JB
3010static void valleyview_irq_preinstall(struct drm_device *dev)
3011{
2d1013dd 3012 struct drm_i915_private *dev_priv = dev->dev_private;
7e231dbe 3013
7e231dbe
JB
3014 /* VLV magic */
3015 I915_WRITE(VLV_IMR, 0);
3016 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
3017 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
3018 I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
3019
7c4d664e 3020 gen5_gt_irq_reset(dev);
7e231dbe 3021
7c4cde39 3022 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
7e231dbe 3023
70591a41 3024 vlv_display_irq_reset(dev_priv);
7e231dbe
JB
3025}
3026
d6e3cca3
DV
3027static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3028{
3029 GEN8_IRQ_RESET_NDX(GT, 0);
3030 GEN8_IRQ_RESET_NDX(GT, 1);
3031 GEN8_IRQ_RESET_NDX(GT, 2);
3032 GEN8_IRQ_RESET_NDX(GT, 3);
3033}
3034
823f6b38 3035static void gen8_irq_reset(struct drm_device *dev)
abd58f01
BW
3036{
3037 struct drm_i915_private *dev_priv = dev->dev_private;
3038 int pipe;
3039
abd58f01
BW
3040 I915_WRITE(GEN8_MASTER_IRQ, 0);
3041 POSTING_READ(GEN8_MASTER_IRQ);
3042
d6e3cca3 3043 gen8_gt_irq_reset(dev_priv);
abd58f01 3044
055e393f 3045 for_each_pipe(dev_priv, pipe)
f458ebbc
DV
3046 if (intel_display_power_is_enabled(dev_priv,
3047 POWER_DOMAIN_PIPE(pipe)))
813bde43 3048 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
abd58f01 3049
f86f3fb0
PZ
3050 GEN5_IRQ_RESET(GEN8_DE_PORT_);
3051 GEN5_IRQ_RESET(GEN8_DE_MISC_);
3052 GEN5_IRQ_RESET(GEN8_PCU_);
abd58f01 3053
266ea3d9
SS
3054 if (HAS_PCH_SPLIT(dev))
3055 ibx_irq_reset(dev);
abd58f01 3056}
09f2344d 3057
4c6c03be
DL
3058void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3059 unsigned int pipe_mask)
d49bdb0e 3060{
1180e206 3061 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
d49bdb0e 3062
13321786 3063 spin_lock_irq(&dev_priv->irq_lock);
d14c0343
DL
3064 if (pipe_mask & 1 << PIPE_A)
3065 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_A,
3066 dev_priv->de_irq_mask[PIPE_A],
3067 ~dev_priv->de_irq_mask[PIPE_A] | extra_ier);
4c6c03be
DL
3068 if (pipe_mask & 1 << PIPE_B)
3069 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B,
3070 dev_priv->de_irq_mask[PIPE_B],
3071 ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
3072 if (pipe_mask & 1 << PIPE_C)
3073 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C,
3074 dev_priv->de_irq_mask[PIPE_C],
3075 ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
13321786 3076 spin_unlock_irq(&dev_priv->irq_lock);
d49bdb0e
PZ
3077}
3078
43f328d7
VS
3079static void cherryview_irq_preinstall(struct drm_device *dev)
3080{
3081 struct drm_i915_private *dev_priv = dev->dev_private;
43f328d7
VS
3082
3083 I915_WRITE(GEN8_MASTER_IRQ, 0);
3084 POSTING_READ(GEN8_MASTER_IRQ);
3085
d6e3cca3 3086 gen8_gt_irq_reset(dev_priv);
43f328d7
VS
3087
3088 GEN5_IRQ_RESET(GEN8_PCU_);
3089
43f328d7
VS
3090 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3091
70591a41 3092 vlv_display_irq_reset(dev_priv);
43f328d7
VS
3093}
3094
87a02106
VS
3095static u32 intel_hpd_enabled_irqs(struct drm_device *dev,
3096 const u32 hpd[HPD_NUM_PINS])
3097{
3098 struct drm_i915_private *dev_priv = to_i915(dev);
3099 struct intel_encoder *encoder;
3100 u32 enabled_irqs = 0;
3101
3102 for_each_intel_encoder(dev, encoder)
3103 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
3104 enabled_irqs |= hpd[encoder->hpd_pin];
3105
3106 return enabled_irqs;
3107}
3108
82a28bcf 3109static void ibx_hpd_irq_setup(struct drm_device *dev)
7fe0b973 3110{
2d1013dd 3111 struct drm_i915_private *dev_priv = dev->dev_private;
87a02106 3112 u32 hotplug_irqs, hotplug, enabled_irqs;
82a28bcf
DV
3113
3114 if (HAS_PCH_IBX(dev)) {
fee884ed 3115 hotplug_irqs = SDE_HOTPLUG_MASK;
87a02106 3116 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ibx);
82a28bcf 3117 } else {
fee884ed 3118 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
87a02106 3119 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_cpt);
82a28bcf 3120 }
7fe0b973 3121
fee884ed 3122 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
82a28bcf
DV
3123
3124 /*
3125 * Enable digital hotplug on the PCH, and configure the DP short pulse
6dbf30ce
VS
3126 * duration to 2ms (which is the minimum in the Display Port spec).
3127 * The pulse duration bits are reserved on LPT+.
82a28bcf 3128 */
7fe0b973
KP
3129 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3130 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3131 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3132 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3133 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
0b2eb33e
VS
3134 /*
3135 * When CPU and PCH are on the same package, port A
3136 * HPD must be enabled in both north and south.
3137 */
3138 if (HAS_PCH_LPT_LP(dev))
3139 hotplug |= PORTA_HOTPLUG_ENABLE;
7fe0b973 3140 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
6dbf30ce 3141}
26951caf 3142
6dbf30ce
VS
3143static void spt_hpd_irq_setup(struct drm_device *dev)
3144{
3145 struct drm_i915_private *dev_priv = dev->dev_private;
3146 u32 hotplug_irqs, hotplug, enabled_irqs;
3147
3148 hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
3149 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_spt);
3150
3151 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3152
3153 /* Enable digital hotplug on the PCH */
3154 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3155 hotplug |= PORTD_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE |
3156 PORTB_HOTPLUG_ENABLE;
3157 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3158
3159 hotplug = I915_READ(PCH_PORT_HOTPLUG2);
3160 hotplug |= PORTE_HOTPLUG_ENABLE;
3161 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
7fe0b973
KP
3162}
3163
e4ce95aa
VS
3164static void ilk_hpd_irq_setup(struct drm_device *dev)
3165{
3166 struct drm_i915_private *dev_priv = dev->dev_private;
3167 u32 hotplug_irqs, hotplug, enabled_irqs;
3168
23bb4cb5
VS
3169 if (INTEL_INFO(dev)->gen >= 7) {
3170 hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
3171 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ivb);
3172 } else {
3173 hotplug_irqs = DE_DP_A_HOTPLUG;
3174 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ilk);
3175 }
e4ce95aa
VS
3176
3177 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3178
3179 /*
3180 * Enable digital hotplug on the CPU, and configure the DP short pulse
3181 * duration to 2ms (which is the minimum in the Display Port spec)
23bb4cb5 3182 * The pulse duration bits are reserved on HSW+.
e4ce95aa
VS
3183 */
3184 hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
3185 hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
3186 hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms;
3187 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3188
3189 ibx_hpd_irq_setup(dev);
3190}
3191
e0a20ad7
SS
3192static void bxt_hpd_irq_setup(struct drm_device *dev)
3193{
3194 struct drm_i915_private *dev_priv = dev->dev_private;
87a02106 3195 u32 hotplug_port;
e0a20ad7
SS
3196 u32 hotplug_ctrl;
3197
87a02106 3198 hotplug_port = intel_hpd_enabled_irqs(dev, hpd_bxt);
e0a20ad7 3199
e0a20ad7
SS
3200 hotplug_ctrl = I915_READ(BXT_HOTPLUG_CTL) & ~BXT_HOTPLUG_CTL_MASK;
3201
7f3561be
SJ
3202 if (hotplug_port & BXT_DE_PORT_HP_DDIA)
3203 hotplug_ctrl |= BXT_DDIA_HPD_ENABLE;
e0a20ad7
SS
3204 if (hotplug_port & BXT_DE_PORT_HP_DDIB)
3205 hotplug_ctrl |= BXT_DDIB_HPD_ENABLE;
3206 if (hotplug_port & BXT_DE_PORT_HP_DDIC)
3207 hotplug_ctrl |= BXT_DDIC_HPD_ENABLE;
3208 I915_WRITE(BXT_HOTPLUG_CTL, hotplug_ctrl);
3209
e0a20ad7
SS
3210 hotplug_ctrl = I915_READ(GEN8_DE_PORT_IMR) & ~hotplug_port;
3211 I915_WRITE(GEN8_DE_PORT_IMR, hotplug_ctrl);
3212
e0a20ad7
SS
3213 hotplug_ctrl = I915_READ(GEN8_DE_PORT_IER) | hotplug_port;
3214 I915_WRITE(GEN8_DE_PORT_IER, hotplug_ctrl);
3215 POSTING_READ(GEN8_DE_PORT_IER);
3216}
3217
d46da437
PZ
3218static void ibx_irq_postinstall(struct drm_device *dev)
3219{
2d1013dd 3220 struct drm_i915_private *dev_priv = dev->dev_private;
82a28bcf 3221 u32 mask;
e5868a31 3222
692a04cf
DV
3223 if (HAS_PCH_NOP(dev))
3224 return;
3225
105b122e 3226 if (HAS_PCH_IBX(dev))
5c673b60 3227 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
105b122e 3228 else
5c673b60 3229 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
8664281b 3230
337ba017 3231 GEN5_ASSERT_IIR_IS_ZERO(SDEIIR);
d46da437 3232 I915_WRITE(SDEIMR, ~mask);
d46da437
PZ
3233}
3234
0a9a8c91
DV
3235static void gen5_gt_irq_postinstall(struct drm_device *dev)
3236{
3237 struct drm_i915_private *dev_priv = dev->dev_private;
3238 u32 pm_irqs, gt_irqs;
3239
3240 pm_irqs = gt_irqs = 0;
3241
3242 dev_priv->gt_irq_mask = ~0;
040d2baa 3243 if (HAS_L3_DPF(dev)) {
0a9a8c91 3244 /* L3 parity interrupt is always unmasked. */
35a85ac6
BW
3245 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3246 gt_irqs |= GT_PARITY_ERROR(dev);
0a9a8c91
DV
3247 }
3248
3249 gt_irqs |= GT_RENDER_USER_INTERRUPT;
3250 if (IS_GEN5(dev)) {
3251 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
3252 ILK_BSD_USER_INTERRUPT;
3253 } else {
3254 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3255 }
3256
35079899 3257 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
0a9a8c91
DV
3258
3259 if (INTEL_INFO(dev)->gen >= 6) {
78e68d36
ID
3260 /*
3261 * RPS interrupts will get enabled/disabled on demand when RPS
3262 * itself is enabled/disabled.
3263 */
0a9a8c91
DV
3264 if (HAS_VEBOX(dev))
3265 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3266
605cd25b 3267 dev_priv->pm_irq_mask = 0xffffffff;
35079899 3268 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
0a9a8c91
DV
3269 }
3270}
3271
f71d4af4 3272static int ironlake_irq_postinstall(struct drm_device *dev)
036a4a7d 3273{
2d1013dd 3274 struct drm_i915_private *dev_priv = dev->dev_private;
8e76f8dc
PZ
3275 u32 display_mask, extra_mask;
3276
3277 if (INTEL_INFO(dev)->gen >= 7) {
3278 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3279 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3280 DE_PLANEB_FLIP_DONE_IVB |
5c673b60 3281 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
8e76f8dc 3282 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
23bb4cb5
VS
3283 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3284 DE_DP_A_HOTPLUG_IVB);
8e76f8dc
PZ
3285 } else {
3286 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3287 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
5b3a856b 3288 DE_AUX_CHANNEL_A |
5b3a856b
DV
3289 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3290 DE_POISON);
e4ce95aa
VS
3291 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3292 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3293 DE_DP_A_HOTPLUG);
8e76f8dc 3294 }
036a4a7d 3295
1ec14ad3 3296 dev_priv->irq_mask = ~display_mask;
036a4a7d 3297
0c841212
PZ
3298 I915_WRITE(HWSTAM, 0xeffe);
3299
622364b6
PZ
3300 ibx_irq_pre_postinstall(dev);
3301
35079899 3302 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
036a4a7d 3303
0a9a8c91 3304 gen5_gt_irq_postinstall(dev);
036a4a7d 3305
d46da437 3306 ibx_irq_postinstall(dev);
7fe0b973 3307
f97108d1 3308 if (IS_IRONLAKE_M(dev)) {
6005ce42
DV
3309 /* Enable PCU event interrupts
3310 *
3311 * spinlocking not required here for correctness since interrupt
4bc9d430
DV
3312 * setup is guaranteed to run in single-threaded context. But we
3313 * need it to make the assert_spin_locked happy. */
d6207435 3314 spin_lock_irq(&dev_priv->irq_lock);
f97108d1 3315 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
d6207435 3316 spin_unlock_irq(&dev_priv->irq_lock);
f97108d1
JB
3317 }
3318
036a4a7d
ZW
3319 return 0;
3320}
3321
f8b79e58
ID
3322static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
3323{
3324 u32 pipestat_mask;
3325 u32 iir_mask;
120dda4f 3326 enum pipe pipe;
f8b79e58
ID
3327
3328 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3329 PIPE_FIFO_UNDERRUN_STATUS;
3330
120dda4f
VS
3331 for_each_pipe(dev_priv, pipe)
3332 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
f8b79e58
ID
3333 POSTING_READ(PIPESTAT(PIPE_A));
3334
3335 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3336 PIPE_CRC_DONE_INTERRUPT_STATUS;
3337
120dda4f
VS
3338 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3339 for_each_pipe(dev_priv, pipe)
3340 i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
f8b79e58
ID
3341
3342 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3343 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3344 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
120dda4f
VS
3345 if (IS_CHERRYVIEW(dev_priv))
3346 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
f8b79e58
ID
3347 dev_priv->irq_mask &= ~iir_mask;
3348
3349 I915_WRITE(VLV_IIR, iir_mask);
3350 I915_WRITE(VLV_IIR, iir_mask);
f8b79e58 3351 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
76e41860
VS
3352 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3353 POSTING_READ(VLV_IMR);
f8b79e58
ID
3354}
3355
3356static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
3357{
3358 u32 pipestat_mask;
3359 u32 iir_mask;
120dda4f 3360 enum pipe pipe;
f8b79e58
ID
3361
3362 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3363 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
6c7fba04 3364 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
120dda4f
VS
3365 if (IS_CHERRYVIEW(dev_priv))
3366 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
f8b79e58
ID
3367
3368 dev_priv->irq_mask |= iir_mask;
f8b79e58 3369 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
76e41860 3370 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
f8b79e58
ID
3371 I915_WRITE(VLV_IIR, iir_mask);
3372 I915_WRITE(VLV_IIR, iir_mask);
3373 POSTING_READ(VLV_IIR);
3374
3375 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3376 PIPE_CRC_DONE_INTERRUPT_STATUS;
3377
120dda4f
VS
3378 i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3379 for_each_pipe(dev_priv, pipe)
3380 i915_disable_pipestat(dev_priv, pipe, pipestat_mask);
f8b79e58
ID
3381
3382 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3383 PIPE_FIFO_UNDERRUN_STATUS;
120dda4f
VS
3384
3385 for_each_pipe(dev_priv, pipe)
3386 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
f8b79e58
ID
3387 POSTING_READ(PIPESTAT(PIPE_A));
3388}
3389
3390void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3391{
3392 assert_spin_locked(&dev_priv->irq_lock);
3393
3394 if (dev_priv->display_irqs_enabled)
3395 return;
3396
3397 dev_priv->display_irqs_enabled = true;
3398
950eabaf 3399 if (intel_irqs_enabled(dev_priv))
f8b79e58
ID
3400 valleyview_display_irqs_install(dev_priv);
3401}
3402
3403void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3404{
3405 assert_spin_locked(&dev_priv->irq_lock);
3406
3407 if (!dev_priv->display_irqs_enabled)
3408 return;
3409
3410 dev_priv->display_irqs_enabled = false;
3411
950eabaf 3412 if (intel_irqs_enabled(dev_priv))
f8b79e58
ID
3413 valleyview_display_irqs_uninstall(dev_priv);
3414}
3415
0e6c9a9e 3416static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
7e231dbe 3417{
f8b79e58 3418 dev_priv->irq_mask = ~0;
7e231dbe 3419
20afbda2
DV
3420 I915_WRITE(PORT_HOTPLUG_EN, 0);
3421 POSTING_READ(PORT_HOTPLUG_EN);
3422
7e231dbe 3423 I915_WRITE(VLV_IIR, 0xffffffff);
76e41860
VS
3424 I915_WRITE(VLV_IIR, 0xffffffff);
3425 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3426 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3427 POSTING_READ(VLV_IMR);
7e231dbe 3428
b79480ba
DV
3429 /* Interrupt setup is already guaranteed to be single-threaded, this is
3430 * just to make the assert_spin_locked check happy. */
d6207435 3431 spin_lock_irq(&dev_priv->irq_lock);
f8b79e58
ID
3432 if (dev_priv->display_irqs_enabled)
3433 valleyview_display_irqs_install(dev_priv);
d6207435 3434 spin_unlock_irq(&dev_priv->irq_lock);
0e6c9a9e
VS
3435}
3436
3437static int valleyview_irq_postinstall(struct drm_device *dev)
3438{
3439 struct drm_i915_private *dev_priv = dev->dev_private;
3440
3441 vlv_display_irq_postinstall(dev_priv);
7e231dbe 3442
0a9a8c91 3443 gen5_gt_irq_postinstall(dev);
7e231dbe
JB
3444
3445 /* ack & enable invalid PTE error interrupts */
3446#if 0 /* FIXME: add support to irq handler for checking these bits */
3447 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3448 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
3449#endif
3450
3451 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
20afbda2
DV
3452
3453 return 0;
3454}
3455
abd58f01
BW
3456static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3457{
abd58f01
BW
3458 /* These are interrupts we'll toggle with the ring mask register */
3459 uint32_t gt_interrupts[] = {
3460 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
73d477f6 3461 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
abd58f01 3462 GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
73d477f6
OM
3463 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3464 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
abd58f01 3465 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
73d477f6
OM
3466 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3467 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3468 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
abd58f01 3469 0,
73d477f6
OM
3470 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3471 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
abd58f01
BW
3472 };
3473
0961021a 3474 dev_priv->pm_irq_mask = 0xffffffff;
9a2d2d87
D
3475 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3476 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
78e68d36
ID
3477 /*
3478 * RPS interrupts will get enabled/disabled on demand when RPS itself
3479 * is enabled/disabled.
3480 */
3481 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
9a2d2d87 3482 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
abd58f01
BW
3483}
3484
3485static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3486{
770de83d
DL
3487 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3488 uint32_t de_pipe_enables;
abd58f01 3489 int pipe;
9e63743e 3490 u32 de_port_en = GEN8_AUX_CHANNEL_A;
770de83d 3491
88e04703 3492 if (IS_GEN9(dev_priv)) {
770de83d
DL
3493 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3494 GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
9e63743e 3495 de_port_en |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
88e04703 3496 GEN9_AUX_CHANNEL_D;
9e63743e
SS
3497
3498 if (IS_BROXTON(dev_priv))
3499 de_port_en |= BXT_DE_PORT_GMBUS;
88e04703 3500 } else
770de83d
DL
3501 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
3502 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3503
3504 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3505 GEN8_PIPE_FIFO_UNDERRUN;
3506
13b3a0a7
DV
3507 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3508 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3509 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
abd58f01 3510
055e393f 3511 for_each_pipe(dev_priv, pipe)
f458ebbc 3512 if (intel_display_power_is_enabled(dev_priv,
813bde43
PZ
3513 POWER_DOMAIN_PIPE(pipe)))
3514 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3515 dev_priv->de_irq_mask[pipe],
3516 de_pipe_enables);
abd58f01 3517
9e63743e 3518 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_en, de_port_en);
abd58f01
BW
3519}
3520
3521static int gen8_irq_postinstall(struct drm_device *dev)
3522{
3523 struct drm_i915_private *dev_priv = dev->dev_private;
3524
266ea3d9
SS
3525 if (HAS_PCH_SPLIT(dev))
3526 ibx_irq_pre_postinstall(dev);
622364b6 3527
abd58f01
BW
3528 gen8_gt_irq_postinstall(dev_priv);
3529 gen8_de_irq_postinstall(dev_priv);
3530
266ea3d9
SS
3531 if (HAS_PCH_SPLIT(dev))
3532 ibx_irq_postinstall(dev);
abd58f01
BW
3533
3534 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
3535 POSTING_READ(GEN8_MASTER_IRQ);
3536
3537 return 0;
3538}
3539
43f328d7
VS
3540static int cherryview_irq_postinstall(struct drm_device *dev)
3541{
3542 struct drm_i915_private *dev_priv = dev->dev_private;
43f328d7 3543
c2b66797 3544 vlv_display_irq_postinstall(dev_priv);
43f328d7
VS
3545
3546 gen8_gt_irq_postinstall(dev_priv);
3547
3548 I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
3549 POSTING_READ(GEN8_MASTER_IRQ);
3550
3551 return 0;
3552}
3553
abd58f01
BW
3554static void gen8_irq_uninstall(struct drm_device *dev)
3555{
3556 struct drm_i915_private *dev_priv = dev->dev_private;
abd58f01
BW
3557
3558 if (!dev_priv)
3559 return;
3560
823f6b38 3561 gen8_irq_reset(dev);
abd58f01
BW
3562}
3563
8ea0be4f
VS
3564static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
3565{
3566 /* Interrupt setup is already guaranteed to be single-threaded, this is
3567 * just to make the assert_spin_locked check happy. */
3568 spin_lock_irq(&dev_priv->irq_lock);
3569 if (dev_priv->display_irqs_enabled)
3570 valleyview_display_irqs_uninstall(dev_priv);
3571 spin_unlock_irq(&dev_priv->irq_lock);
3572
3573 vlv_display_irq_reset(dev_priv);
3574
c352d1ba 3575 dev_priv->irq_mask = ~0;
8ea0be4f
VS
3576}
3577
7e231dbe
JB
3578static void valleyview_irq_uninstall(struct drm_device *dev)
3579{
2d1013dd 3580 struct drm_i915_private *dev_priv = dev->dev_private;
7e231dbe
JB
3581
3582 if (!dev_priv)
3583 return;
3584
843d0e7d
ID
3585 I915_WRITE(VLV_MASTER_IER, 0);
3586
893fce8e
VS
3587 gen5_gt_irq_reset(dev);
3588
7e231dbe 3589 I915_WRITE(HWSTAM, 0xffffffff);
f8b79e58 3590
8ea0be4f 3591 vlv_display_irq_uninstall(dev_priv);
7e231dbe
JB
3592}
3593
43f328d7
VS
3594static void cherryview_irq_uninstall(struct drm_device *dev)
3595{
3596 struct drm_i915_private *dev_priv = dev->dev_private;
43f328d7
VS
3597
3598 if (!dev_priv)
3599 return;
3600
3601 I915_WRITE(GEN8_MASTER_IRQ, 0);
3602 POSTING_READ(GEN8_MASTER_IRQ);
3603
a2c30fba 3604 gen8_gt_irq_reset(dev_priv);
43f328d7 3605
a2c30fba 3606 GEN5_IRQ_RESET(GEN8_PCU_);
43f328d7 3607
c2b66797 3608 vlv_display_irq_uninstall(dev_priv);
43f328d7
VS
3609}
3610
f71d4af4 3611static void ironlake_irq_uninstall(struct drm_device *dev)
036a4a7d 3612{
2d1013dd 3613 struct drm_i915_private *dev_priv = dev->dev_private;
4697995b
JB
3614
3615 if (!dev_priv)
3616 return;
3617
be30b29f 3618 ironlake_irq_reset(dev);
036a4a7d
ZW
3619}
3620
a266c7d5 3621static void i8xx_irq_preinstall(struct drm_device * dev)
1da177e4 3622{
2d1013dd 3623 struct drm_i915_private *dev_priv = dev->dev_private;
9db4a9c7 3624 int pipe;
91e3738e 3625
055e393f 3626 for_each_pipe(dev_priv, pipe)
9db4a9c7 3627 I915_WRITE(PIPESTAT(pipe), 0);
a266c7d5
CW
3628 I915_WRITE16(IMR, 0xffff);
3629 I915_WRITE16(IER, 0x0);
3630 POSTING_READ16(IER);
c2798b19
CW
3631}
3632
3633static int i8xx_irq_postinstall(struct drm_device *dev)
3634{
2d1013dd 3635 struct drm_i915_private *dev_priv = dev->dev_private;
c2798b19 3636
c2798b19
CW
3637 I915_WRITE16(EMR,
3638 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3639
3640 /* Unmask the interrupts that we always want on. */
3641 dev_priv->irq_mask =
3642 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3643 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3644 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
37ef01ab 3645 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
c2798b19
CW
3646 I915_WRITE16(IMR, dev_priv->irq_mask);
3647
3648 I915_WRITE16(IER,
3649 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3650 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
c2798b19
CW
3651 I915_USER_INTERRUPT);
3652 POSTING_READ16(IER);
3653
379ef82d
DV
3654 /* Interrupt setup is already guaranteed to be single-threaded, this is
3655 * just to make the assert_spin_locked check happy. */
d6207435 3656 spin_lock_irq(&dev_priv->irq_lock);
755e9019
ID
3657 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3658 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
d6207435 3659 spin_unlock_irq(&dev_priv->irq_lock);
379ef82d 3660
c2798b19
CW
3661 return 0;
3662}
3663
90a72f87
VS
3664/*
3665 * Returns true when a page flip has completed.
3666 */
3667static bool i8xx_handle_vblank(struct drm_device *dev,
1f1c2e24 3668 int plane, int pipe, u32 iir)
90a72f87 3669{
2d1013dd 3670 struct drm_i915_private *dev_priv = dev->dev_private;
1f1c2e24 3671 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
90a72f87 3672
8d7849db 3673 if (!intel_pipe_handle_vblank(dev, pipe))
90a72f87
VS
3674 return false;
3675
3676 if ((iir & flip_pending) == 0)
d6bbafa1 3677 goto check_page_flip;
90a72f87 3678
90a72f87
VS
3679 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3680 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3681 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3682 * the flip is completed (no longer pending). Since this doesn't raise
3683 * an interrupt per se, we watch for the change at vblank.
3684 */
3685 if (I915_READ16(ISR) & flip_pending)
d6bbafa1 3686 goto check_page_flip;
90a72f87 3687
7d47559e 3688 intel_prepare_page_flip(dev, plane);
90a72f87 3689 intel_finish_page_flip(dev, pipe);
90a72f87 3690 return true;
d6bbafa1
CW
3691
3692check_page_flip:
3693 intel_check_page_flip(dev, pipe);
3694 return false;
90a72f87
VS
3695}
3696
ff1f525e 3697static irqreturn_t i8xx_irq_handler(int irq, void *arg)
c2798b19 3698{
45a83f84 3699 struct drm_device *dev = arg;
2d1013dd 3700 struct drm_i915_private *dev_priv = dev->dev_private;
c2798b19
CW
3701 u16 iir, new_iir;
3702 u32 pipe_stats[2];
c2798b19
CW
3703 int pipe;
3704 u16 flip_mask =
3705 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3706 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3707
2dd2a883
ID
3708 if (!intel_irqs_enabled(dev_priv))
3709 return IRQ_NONE;
3710
c2798b19
CW
3711 iir = I915_READ16(IIR);
3712 if (iir == 0)
3713 return IRQ_NONE;
3714
3715 while (iir & ~flip_mask) {
3716 /* Can't rely on pipestat interrupt bit in iir as it might
3717 * have been cleared after the pipestat interrupt was received.
3718 * It doesn't set the bit in iir again, but it still produces
3719 * interrupts (for non-MSI).
3720 */
222c7f51 3721 spin_lock(&dev_priv->irq_lock);
c2798b19 3722 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
aaecdf61 3723 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
c2798b19 3724
055e393f 3725 for_each_pipe(dev_priv, pipe) {
c2798b19
CW
3726 int reg = PIPESTAT(pipe);
3727 pipe_stats[pipe] = I915_READ(reg);
3728
3729 /*
3730 * Clear the PIPE*STAT regs before the IIR
3731 */
2d9d2b0b 3732 if (pipe_stats[pipe] & 0x8000ffff)
c2798b19 3733 I915_WRITE(reg, pipe_stats[pipe]);
c2798b19 3734 }
222c7f51 3735 spin_unlock(&dev_priv->irq_lock);
c2798b19
CW
3736
3737 I915_WRITE16(IIR, iir & ~flip_mask);
3738 new_iir = I915_READ16(IIR); /* Flush posted writes */
3739
c2798b19 3740 if (iir & I915_USER_INTERRUPT)
74cdb337 3741 notify_ring(&dev_priv->ring[RCS]);
c2798b19 3742
055e393f 3743 for_each_pipe(dev_priv, pipe) {
1f1c2e24 3744 int plane = pipe;
3a77c4c4 3745 if (HAS_FBC(dev))
1f1c2e24
VS
3746 plane = !plane;
3747
4356d586 3748 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
1f1c2e24
VS
3749 i8xx_handle_vblank(dev, plane, pipe, iir))
3750 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
c2798b19 3751
4356d586 3752 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
277de95e 3753 i9xx_pipe_crc_irq_handler(dev, pipe);
2d9d2b0b 3754
1f7247c0
DV
3755 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3756 intel_cpu_fifo_underrun_irq_handler(dev_priv,
3757 pipe);
4356d586 3758 }
c2798b19
CW
3759
3760 iir = new_iir;
3761 }
3762
3763 return IRQ_HANDLED;
3764}
3765
3766static void i8xx_irq_uninstall(struct drm_device * dev)
3767{
2d1013dd 3768 struct drm_i915_private *dev_priv = dev->dev_private;
c2798b19
CW
3769 int pipe;
3770
055e393f 3771 for_each_pipe(dev_priv, pipe) {
c2798b19
CW
3772 /* Clear enable bits; then clear status bits */
3773 I915_WRITE(PIPESTAT(pipe), 0);
3774 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3775 }
3776 I915_WRITE16(IMR, 0xffff);
3777 I915_WRITE16(IER, 0x0);
3778 I915_WRITE16(IIR, I915_READ16(IIR));
3779}
3780
a266c7d5
CW
3781static void i915_irq_preinstall(struct drm_device * dev)
3782{
2d1013dd 3783 struct drm_i915_private *dev_priv = dev->dev_private;
a266c7d5
CW
3784 int pipe;
3785
a266c7d5
CW
3786 if (I915_HAS_HOTPLUG(dev)) {
3787 I915_WRITE(PORT_HOTPLUG_EN, 0);
3788 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3789 }
3790
00d98ebd 3791 I915_WRITE16(HWSTAM, 0xeffe);
055e393f 3792 for_each_pipe(dev_priv, pipe)
a266c7d5
CW
3793 I915_WRITE(PIPESTAT(pipe), 0);
3794 I915_WRITE(IMR, 0xffffffff);
3795 I915_WRITE(IER, 0x0);
3796 POSTING_READ(IER);
3797}
3798
3799static int i915_irq_postinstall(struct drm_device *dev)
3800{
2d1013dd 3801 struct drm_i915_private *dev_priv = dev->dev_private;
38bde180 3802 u32 enable_mask;
a266c7d5 3803
38bde180
CW
3804 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3805
3806 /* Unmask the interrupts that we always want on. */
3807 dev_priv->irq_mask =
3808 ~(I915_ASLE_INTERRUPT |
3809 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3810 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3811 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
37ef01ab 3812 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
38bde180
CW
3813
3814 enable_mask =
3815 I915_ASLE_INTERRUPT |
3816 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3817 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
38bde180
CW
3818 I915_USER_INTERRUPT;
3819
a266c7d5 3820 if (I915_HAS_HOTPLUG(dev)) {
20afbda2
DV
3821 I915_WRITE(PORT_HOTPLUG_EN, 0);
3822 POSTING_READ(PORT_HOTPLUG_EN);
3823
a266c7d5
CW
3824 /* Enable in IER... */
3825 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3826 /* and unmask in IMR */
3827 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3828 }
3829
a266c7d5
CW
3830 I915_WRITE(IMR, dev_priv->irq_mask);
3831 I915_WRITE(IER, enable_mask);
3832 POSTING_READ(IER);
3833
f49e38dd 3834 i915_enable_asle_pipestat(dev);
20afbda2 3835
379ef82d
DV
3836 /* Interrupt setup is already guaranteed to be single-threaded, this is
3837 * just to make the assert_spin_locked check happy. */
d6207435 3838 spin_lock_irq(&dev_priv->irq_lock);
755e9019
ID
3839 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3840 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
d6207435 3841 spin_unlock_irq(&dev_priv->irq_lock);
379ef82d 3842
20afbda2
DV
3843 return 0;
3844}
3845
90a72f87
VS
3846/*
3847 * Returns true when a page flip has completed.
3848 */
3849static bool i915_handle_vblank(struct drm_device *dev,
3850 int plane, int pipe, u32 iir)
3851{
2d1013dd 3852 struct drm_i915_private *dev_priv = dev->dev_private;
90a72f87
VS
3853 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3854
8d7849db 3855 if (!intel_pipe_handle_vblank(dev, pipe))
90a72f87
VS
3856 return false;
3857
3858 if ((iir & flip_pending) == 0)
d6bbafa1 3859 goto check_page_flip;
90a72f87 3860
90a72f87
VS
3861 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3862 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3863 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3864 * the flip is completed (no longer pending). Since this doesn't raise
3865 * an interrupt per se, we watch for the change at vblank.
3866 */
3867 if (I915_READ(ISR) & flip_pending)
d6bbafa1 3868 goto check_page_flip;
90a72f87 3869
7d47559e 3870 intel_prepare_page_flip(dev, plane);
90a72f87 3871 intel_finish_page_flip(dev, pipe);
90a72f87 3872 return true;
d6bbafa1
CW
3873
3874check_page_flip:
3875 intel_check_page_flip(dev, pipe);
3876 return false;
90a72f87
VS
3877}
3878
ff1f525e 3879static irqreturn_t i915_irq_handler(int irq, void *arg)
a266c7d5 3880{
45a83f84 3881 struct drm_device *dev = arg;
2d1013dd 3882 struct drm_i915_private *dev_priv = dev->dev_private;
8291ee90 3883 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
38bde180
CW
3884 u32 flip_mask =
3885 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3886 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
38bde180 3887 int pipe, ret = IRQ_NONE;
a266c7d5 3888
2dd2a883
ID
3889 if (!intel_irqs_enabled(dev_priv))
3890 return IRQ_NONE;
3891
a266c7d5 3892 iir = I915_READ(IIR);
38bde180
CW
3893 do {
3894 bool irq_received = (iir & ~flip_mask) != 0;
8291ee90 3895 bool blc_event = false;
a266c7d5
CW
3896
3897 /* Can't rely on pipestat interrupt bit in iir as it might
3898 * have been cleared after the pipestat interrupt was received.
3899 * It doesn't set the bit in iir again, but it still produces
3900 * interrupts (for non-MSI).
3901 */
222c7f51 3902 spin_lock(&dev_priv->irq_lock);
a266c7d5 3903 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
aaecdf61 3904 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
a266c7d5 3905
055e393f 3906 for_each_pipe(dev_priv, pipe) {
a266c7d5
CW
3907 int reg = PIPESTAT(pipe);
3908 pipe_stats[pipe] = I915_READ(reg);
3909
38bde180 3910 /* Clear the PIPE*STAT regs before the IIR */
a266c7d5 3911 if (pipe_stats[pipe] & 0x8000ffff) {
a266c7d5 3912 I915_WRITE(reg, pipe_stats[pipe]);
38bde180 3913 irq_received = true;
a266c7d5
CW
3914 }
3915 }
222c7f51 3916 spin_unlock(&dev_priv->irq_lock);
a266c7d5
CW
3917
3918 if (!irq_received)
3919 break;
3920
a266c7d5 3921 /* Consume port. Then clear IIR or we'll miss events */
16c6c56b
VS
3922 if (I915_HAS_HOTPLUG(dev) &&
3923 iir & I915_DISPLAY_PORT_INTERRUPT)
3924 i9xx_hpd_irq_handler(dev);
a266c7d5 3925
38bde180 3926 I915_WRITE(IIR, iir & ~flip_mask);
a266c7d5
CW
3927 new_iir = I915_READ(IIR); /* Flush posted writes */
3928
a266c7d5 3929 if (iir & I915_USER_INTERRUPT)
74cdb337 3930 notify_ring(&dev_priv->ring[RCS]);
a266c7d5 3931
055e393f 3932 for_each_pipe(dev_priv, pipe) {
38bde180 3933 int plane = pipe;
3a77c4c4 3934 if (HAS_FBC(dev))
38bde180 3935 plane = !plane;
90a72f87 3936
8291ee90 3937 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
90a72f87
VS
3938 i915_handle_vblank(dev, plane, pipe, iir))
3939 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
a266c7d5
CW
3940
3941 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3942 blc_event = true;
4356d586
DV
3943
3944 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
277de95e 3945 i9xx_pipe_crc_irq_handler(dev, pipe);
2d9d2b0b 3946
1f7247c0
DV
3947 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3948 intel_cpu_fifo_underrun_irq_handler(dev_priv,
3949 pipe);
a266c7d5
CW
3950 }
3951
a266c7d5
CW
3952 if (blc_event || (iir & I915_ASLE_INTERRUPT))
3953 intel_opregion_asle_intr(dev);
3954
3955 /* With MSI, interrupts are only generated when iir
3956 * transitions from zero to nonzero. If another bit got
3957 * set while we were handling the existing iir bits, then
3958 * we would never get another interrupt.
3959 *
3960 * This is fine on non-MSI as well, as if we hit this path
3961 * we avoid exiting the interrupt handler only to generate
3962 * another one.
3963 *
3964 * Note that for MSI this could cause a stray interrupt report
3965 * if an interrupt landed in the time between writing IIR and
3966 * the posting read. This should be rare enough to never
3967 * trigger the 99% of 100,000 interrupts test for disabling
3968 * stray interrupts.
3969 */
38bde180 3970 ret = IRQ_HANDLED;
a266c7d5 3971 iir = new_iir;
38bde180 3972 } while (iir & ~flip_mask);
a266c7d5
CW
3973
3974 return ret;
3975}
3976
3977static void i915_irq_uninstall(struct drm_device * dev)
3978{
2d1013dd 3979 struct drm_i915_private *dev_priv = dev->dev_private;
a266c7d5
CW
3980 int pipe;
3981
a266c7d5
CW
3982 if (I915_HAS_HOTPLUG(dev)) {
3983 I915_WRITE(PORT_HOTPLUG_EN, 0);
3984 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3985 }
3986
00d98ebd 3987 I915_WRITE16(HWSTAM, 0xffff);
055e393f 3988 for_each_pipe(dev_priv, pipe) {
55b39755 3989 /* Clear enable bits; then clear status bits */
a266c7d5 3990 I915_WRITE(PIPESTAT(pipe), 0);
55b39755
CW
3991 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3992 }
a266c7d5
CW
3993 I915_WRITE(IMR, 0xffffffff);
3994 I915_WRITE(IER, 0x0);
3995
a266c7d5
CW
3996 I915_WRITE(IIR, I915_READ(IIR));
3997}
3998
3999static void i965_irq_preinstall(struct drm_device * dev)
4000{
2d1013dd 4001 struct drm_i915_private *dev_priv = dev->dev_private;
a266c7d5
CW
4002 int pipe;
4003
adca4730
CW
4004 I915_WRITE(PORT_HOTPLUG_EN, 0);
4005 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
a266c7d5
CW
4006
4007 I915_WRITE(HWSTAM, 0xeffe);
055e393f 4008 for_each_pipe(dev_priv, pipe)
a266c7d5
CW
4009 I915_WRITE(PIPESTAT(pipe), 0);
4010 I915_WRITE(IMR, 0xffffffff);
4011 I915_WRITE(IER, 0x0);
4012 POSTING_READ(IER);
4013}
4014
4015static int i965_irq_postinstall(struct drm_device *dev)
4016{
2d1013dd 4017 struct drm_i915_private *dev_priv = dev->dev_private;
bbba0a97 4018 u32 enable_mask;
a266c7d5
CW
4019 u32 error_mask;
4020
a266c7d5 4021 /* Unmask the interrupts that we always want on. */
bbba0a97 4022 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
adca4730 4023 I915_DISPLAY_PORT_INTERRUPT |
bbba0a97
CW
4024 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4025 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4026 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4027 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4028 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4029
4030 enable_mask = ~dev_priv->irq_mask;
21ad8330
VS
4031 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4032 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
bbba0a97
CW
4033 enable_mask |= I915_USER_INTERRUPT;
4034
4035 if (IS_G4X(dev))
4036 enable_mask |= I915_BSD_USER_INTERRUPT;
a266c7d5 4037
b79480ba
DV
4038 /* Interrupt setup is already guaranteed to be single-threaded, this is
4039 * just to make the assert_spin_locked check happy. */
d6207435 4040 spin_lock_irq(&dev_priv->irq_lock);
755e9019
ID
4041 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4042 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4043 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
d6207435 4044 spin_unlock_irq(&dev_priv->irq_lock);
a266c7d5 4045
a266c7d5
CW
4046 /*
4047 * Enable some error detection, note the instruction error mask
4048 * bit is reserved, so we leave it masked.
4049 */
4050 if (IS_G4X(dev)) {
4051 error_mask = ~(GM45_ERROR_PAGE_TABLE |
4052 GM45_ERROR_MEM_PRIV |
4053 GM45_ERROR_CP_PRIV |
4054 I915_ERROR_MEMORY_REFRESH);
4055 } else {
4056 error_mask = ~(I915_ERROR_PAGE_TABLE |
4057 I915_ERROR_MEMORY_REFRESH);
4058 }
4059 I915_WRITE(EMR, error_mask);
4060
4061 I915_WRITE(IMR, dev_priv->irq_mask);
4062 I915_WRITE(IER, enable_mask);
4063 POSTING_READ(IER);
4064
20afbda2
DV
4065 I915_WRITE(PORT_HOTPLUG_EN, 0);
4066 POSTING_READ(PORT_HOTPLUG_EN);
4067
f49e38dd 4068 i915_enable_asle_pipestat(dev);
20afbda2
DV
4069
4070 return 0;
4071}
4072
bac56d5b 4073static void i915_hpd_irq_setup(struct drm_device *dev)
20afbda2 4074{
2d1013dd 4075 struct drm_i915_private *dev_priv = dev->dev_private;
20afbda2
DV
4076 u32 hotplug_en;
4077
b5ea2d56
DV
4078 assert_spin_locked(&dev_priv->irq_lock);
4079
778eb334
VS
4080 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
4081 hotplug_en &= ~HOTPLUG_INT_EN_MASK;
4082 /* Note HDMI and DP share hotplug bits */
4083 /* enable bits are the same for all generations */
87a02106 4084 hotplug_en |= intel_hpd_enabled_irqs(dev, hpd_mask_i915);
778eb334
VS
4085 /* Programming the CRT detection parameters tends
4086 to generate a spurious hotplug event about three
4087 seconds later. So just do it once.
4088 */
4089 if (IS_G4X(dev))
4090 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4091 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
4092 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4093
4094 /* Ignore TV since it's buggy */
4095 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
a266c7d5
CW
4096}
4097
ff1f525e 4098static irqreturn_t i965_irq_handler(int irq, void *arg)
a266c7d5 4099{
45a83f84 4100 struct drm_device *dev = arg;
2d1013dd 4101 struct drm_i915_private *dev_priv = dev->dev_private;
a266c7d5
CW
4102 u32 iir, new_iir;
4103 u32 pipe_stats[I915_MAX_PIPES];
a266c7d5 4104 int ret = IRQ_NONE, pipe;
21ad8330
VS
4105 u32 flip_mask =
4106 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4107 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
a266c7d5 4108
2dd2a883
ID
4109 if (!intel_irqs_enabled(dev_priv))
4110 return IRQ_NONE;
4111
a266c7d5
CW
4112 iir = I915_READ(IIR);
4113
a266c7d5 4114 for (;;) {
501e01d7 4115 bool irq_received = (iir & ~flip_mask) != 0;
2c8ba29f
CW
4116 bool blc_event = false;
4117
a266c7d5
CW
4118 /* Can't rely on pipestat interrupt bit in iir as it might
4119 * have been cleared after the pipestat interrupt was received.
4120 * It doesn't set the bit in iir again, but it still produces
4121 * interrupts (for non-MSI).
4122 */
222c7f51 4123 spin_lock(&dev_priv->irq_lock);
a266c7d5 4124 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
aaecdf61 4125 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
a266c7d5 4126
055e393f 4127 for_each_pipe(dev_priv, pipe) {
a266c7d5
CW
4128 int reg = PIPESTAT(pipe);
4129 pipe_stats[pipe] = I915_READ(reg);
4130
4131 /*
4132 * Clear the PIPE*STAT regs before the IIR
4133 */
4134 if (pipe_stats[pipe] & 0x8000ffff) {
a266c7d5 4135 I915_WRITE(reg, pipe_stats[pipe]);
501e01d7 4136 irq_received = true;
a266c7d5
CW
4137 }
4138 }
222c7f51 4139 spin_unlock(&dev_priv->irq_lock);
a266c7d5
CW
4140
4141 if (!irq_received)
4142 break;
4143
4144 ret = IRQ_HANDLED;
4145
4146 /* Consume port. Then clear IIR or we'll miss events */
16c6c56b
VS
4147 if (iir & I915_DISPLAY_PORT_INTERRUPT)
4148 i9xx_hpd_irq_handler(dev);
a266c7d5 4149
21ad8330 4150 I915_WRITE(IIR, iir & ~flip_mask);
a266c7d5
CW
4151 new_iir = I915_READ(IIR); /* Flush posted writes */
4152
a266c7d5 4153 if (iir & I915_USER_INTERRUPT)
74cdb337 4154 notify_ring(&dev_priv->ring[RCS]);
a266c7d5 4155 if (iir & I915_BSD_USER_INTERRUPT)
74cdb337 4156 notify_ring(&dev_priv->ring[VCS]);
a266c7d5 4157
055e393f 4158 for_each_pipe(dev_priv, pipe) {
2c8ba29f 4159 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
90a72f87
VS
4160 i915_handle_vblank(dev, pipe, pipe, iir))
4161 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
a266c7d5
CW
4162
4163 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4164 blc_event = true;
4356d586
DV
4165
4166 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
277de95e 4167 i9xx_pipe_crc_irq_handler(dev, pipe);
a266c7d5 4168
1f7247c0
DV
4169 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4170 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2d9d2b0b 4171 }
a266c7d5
CW
4172
4173 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4174 intel_opregion_asle_intr(dev);
4175
515ac2bb
DV
4176 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4177 gmbus_irq_handler(dev);
4178
a266c7d5
CW
4179 /* With MSI, interrupts are only generated when iir
4180 * transitions from zero to nonzero. If another bit got
4181 * set while we were handling the existing iir bits, then
4182 * we would never get another interrupt.
4183 *
4184 * This is fine on non-MSI as well, as if we hit this path
4185 * we avoid exiting the interrupt handler only to generate
4186 * another one.
4187 *
4188 * Note that for MSI this could cause a stray interrupt report
4189 * if an interrupt landed in the time between writing IIR and
4190 * the posting read. This should be rare enough to never
4191 * trigger the 99% of 100,000 interrupts test for disabling
4192 * stray interrupts.
4193 */
4194 iir = new_iir;
4195 }
4196
4197 return ret;
4198}
4199
4200static void i965_irq_uninstall(struct drm_device * dev)
4201{
2d1013dd 4202 struct drm_i915_private *dev_priv = dev->dev_private;
a266c7d5
CW
4203 int pipe;
4204
4205 if (!dev_priv)
4206 return;
4207
adca4730
CW
4208 I915_WRITE(PORT_HOTPLUG_EN, 0);
4209 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
a266c7d5
CW
4210
4211 I915_WRITE(HWSTAM, 0xffffffff);
055e393f 4212 for_each_pipe(dev_priv, pipe)
a266c7d5
CW
4213 I915_WRITE(PIPESTAT(pipe), 0);
4214 I915_WRITE(IMR, 0xffffffff);
4215 I915_WRITE(IER, 0x0);
4216
055e393f 4217 for_each_pipe(dev_priv, pipe)
a266c7d5
CW
4218 I915_WRITE(PIPESTAT(pipe),
4219 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4220 I915_WRITE(IIR, I915_READ(IIR));
4221}
4222
fca52a55
DV
4223/**
4224 * intel_irq_init - initializes irq support
4225 * @dev_priv: i915 device instance
4226 *
4227 * This function initializes all the irq support including work items, timers
4228 * and all the vtables. It does not setup the interrupt itself though.
4229 */
b963291c 4230void intel_irq_init(struct drm_i915_private *dev_priv)
f71d4af4 4231{
b963291c 4232 struct drm_device *dev = dev_priv->dev;
8b2e326d 4233
77913b39
JN
4234 intel_hpd_init_work(dev_priv);
4235
c6a828d3 4236 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
a4da4fa4 4237 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
8b2e326d 4238
a6706b45 4239 /* Let's track the enabled rps events */
b963291c 4240 if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
6c65a587 4241 /* WaGsvRC0ResidencyMethod:vlv */
6f4b12f8 4242 dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
31685c25
D
4243 else
4244 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
a6706b45 4245
737b1506
CW
4246 INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
4247 i915_hangcheck_elapsed);
61bac78e 4248
97a19a24 4249 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
9ee32fea 4250
b963291c 4251 if (IS_GEN2(dev_priv)) {
4cdb83ec
VS
4252 dev->max_vblank_count = 0;
4253 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
b963291c 4254 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
f71d4af4
JB
4255 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4256 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
391f75e2
VS
4257 } else {
4258 dev->driver->get_vblank_counter = i915_get_vblank_counter;
4259 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
f71d4af4
JB
4260 }
4261
21da2700
VS
4262 /*
4263 * Opt out of the vblank disable timer on everything except gen2.
4264 * Gen2 doesn't have a hardware frame counter and so depends on
4265 * vblank interrupts to produce sane vblank seuquence numbers.
4266 */
b963291c 4267 if (!IS_GEN2(dev_priv))
21da2700
VS
4268 dev->vblank_disable_immediate = true;
4269
f3a5c3f6
DV
4270 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4271 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
f71d4af4 4272
b963291c 4273 if (IS_CHERRYVIEW(dev_priv)) {
43f328d7
VS
4274 dev->driver->irq_handler = cherryview_irq_handler;
4275 dev->driver->irq_preinstall = cherryview_irq_preinstall;
4276 dev->driver->irq_postinstall = cherryview_irq_postinstall;
4277 dev->driver->irq_uninstall = cherryview_irq_uninstall;
4278 dev->driver->enable_vblank = valleyview_enable_vblank;
4279 dev->driver->disable_vblank = valleyview_disable_vblank;
4280 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
b963291c 4281 } else if (IS_VALLEYVIEW(dev_priv)) {
7e231dbe
JB
4282 dev->driver->irq_handler = valleyview_irq_handler;
4283 dev->driver->irq_preinstall = valleyview_irq_preinstall;
4284 dev->driver->irq_postinstall = valleyview_irq_postinstall;
4285 dev->driver->irq_uninstall = valleyview_irq_uninstall;
4286 dev->driver->enable_vblank = valleyview_enable_vblank;
4287 dev->driver->disable_vblank = valleyview_disable_vblank;
fa00abe0 4288 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
b963291c 4289 } else if (INTEL_INFO(dev_priv)->gen >= 8) {
abd58f01 4290 dev->driver->irq_handler = gen8_irq_handler;
723761b8 4291 dev->driver->irq_preinstall = gen8_irq_reset;
abd58f01
BW
4292 dev->driver->irq_postinstall = gen8_irq_postinstall;
4293 dev->driver->irq_uninstall = gen8_irq_uninstall;
4294 dev->driver->enable_vblank = gen8_enable_vblank;
4295 dev->driver->disable_vblank = gen8_disable_vblank;
6dbf30ce 4296 if (IS_BROXTON(dev))
e0a20ad7 4297 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
6dbf30ce
VS
4298 else if (HAS_PCH_SPT(dev))
4299 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
4300 else
4301 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
f71d4af4
JB
4302 } else if (HAS_PCH_SPLIT(dev)) {
4303 dev->driver->irq_handler = ironlake_irq_handler;
723761b8 4304 dev->driver->irq_preinstall = ironlake_irq_reset;
f71d4af4
JB
4305 dev->driver->irq_postinstall = ironlake_irq_postinstall;
4306 dev->driver->irq_uninstall = ironlake_irq_uninstall;
4307 dev->driver->enable_vblank = ironlake_enable_vblank;
4308 dev->driver->disable_vblank = ironlake_disable_vblank;
23bb4cb5 4309 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
f71d4af4 4310 } else {
b963291c 4311 if (INTEL_INFO(dev_priv)->gen == 2) {
c2798b19
CW
4312 dev->driver->irq_preinstall = i8xx_irq_preinstall;
4313 dev->driver->irq_postinstall = i8xx_irq_postinstall;
4314 dev->driver->irq_handler = i8xx_irq_handler;
4315 dev->driver->irq_uninstall = i8xx_irq_uninstall;
b963291c 4316 } else if (INTEL_INFO(dev_priv)->gen == 3) {
a266c7d5
CW
4317 dev->driver->irq_preinstall = i915_irq_preinstall;
4318 dev->driver->irq_postinstall = i915_irq_postinstall;
4319 dev->driver->irq_uninstall = i915_irq_uninstall;
4320 dev->driver->irq_handler = i915_irq_handler;
c2798b19 4321 } else {
a266c7d5
CW
4322 dev->driver->irq_preinstall = i965_irq_preinstall;
4323 dev->driver->irq_postinstall = i965_irq_postinstall;
4324 dev->driver->irq_uninstall = i965_irq_uninstall;
4325 dev->driver->irq_handler = i965_irq_handler;
c2798b19 4326 }
778eb334
VS
4327 if (I915_HAS_HOTPLUG(dev_priv))
4328 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
f71d4af4
JB
4329 dev->driver->enable_vblank = i915_enable_vblank;
4330 dev->driver->disable_vblank = i915_disable_vblank;
4331 }
4332}
20afbda2 4333
fca52a55
DV
4334/**
4335 * intel_irq_install - enables the hardware interrupt
4336 * @dev_priv: i915 device instance
4337 *
4338 * This function enables the hardware interrupt handling, but leaves the hotplug
4339 * handling still disabled. It is called after intel_irq_init().
4340 *
4341 * In the driver load and resume code we need working interrupts in a few places
4342 * but don't want to deal with the hassle of concurrent probe and hotplug
4343 * workers. Hence the split into this two-stage approach.
4344 */
2aeb7d3a
DV
4345int intel_irq_install(struct drm_i915_private *dev_priv)
4346{
4347 /*
4348 * We enable some interrupt sources in our postinstall hooks, so mark
4349 * interrupts as enabled _before_ actually enabling them to avoid
4350 * special cases in our ordering checks.
4351 */
4352 dev_priv->pm.irqs_enabled = true;
4353
4354 return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
4355}
4356
fca52a55
DV
4357/**
4358 * intel_irq_uninstall - finilizes all irq handling
4359 * @dev_priv: i915 device instance
4360 *
4361 * This stops interrupt and hotplug handling and unregisters and frees all
4362 * resources acquired in the init functions.
4363 */
2aeb7d3a
DV
4364void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4365{
4366 drm_irq_uninstall(dev_priv->dev);
4367 intel_hpd_cancel_work(dev_priv);
4368 dev_priv->pm.irqs_enabled = false;
4369}
4370
fca52a55
DV
4371/**
4372 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4373 * @dev_priv: i915 device instance
4374 *
4375 * This function is used to disable interrupts at runtime, both in the runtime
4376 * pm and the system suspend/resume code.
4377 */
b963291c 4378void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
c67a470b 4379{
b963291c 4380 dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
2aeb7d3a 4381 dev_priv->pm.irqs_enabled = false;
2dd2a883 4382 synchronize_irq(dev_priv->dev->irq);
c67a470b
PZ
4383}
4384
fca52a55
DV
4385/**
4386 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4387 * @dev_priv: i915 device instance
4388 *
4389 * This function is used to enable interrupts at runtime, both in the runtime
4390 * pm and the system suspend/resume code.
4391 */
b963291c 4392void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
c67a470b 4393{
2aeb7d3a 4394 dev_priv->pm.irqs_enabled = true;
b963291c
DV
4395 dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
4396 dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
c67a470b 4397}
This page took 1.770353 seconds and 5 git commands to generate.