drm/i915: Simplify vlv/chv rc6 residency calculation
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_irq.c
CommitLineData
0d6aa60b 1/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
1da177e4 2 */
0d6aa60b 3/*
1da177e4
LT
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
bc54fd1a
DA
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
0d6aa60b 27 */
1da177e4 28
a70491cc
JP
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
63eeaf38 31#include <linux/sysrq.h>
5a0e3ad6 32#include <linux/slab.h>
b2c88f5b 33#include <linux/circ_buf.h>
760285e7
DH
34#include <drm/drmP.h>
35#include <drm/i915_drm.h>
1da177e4 36#include "i915_drv.h"
1c5d22f7 37#include "i915_trace.h"
79e53945 38#include "intel_drv.h"
1da177e4 39
fca52a55
DV
40/**
41 * DOC: interrupt handling
42 *
43 * These functions provide the basic support for enabling and disabling the
44 * interrupt handling support. There's a lot more functionality in i915_irq.c
45 * and related files, but that will be described in separate chapters.
46 */
47
e4ce95aa
VS
48static const u32 hpd_ilk[HPD_NUM_PINS] = {
49 [HPD_PORT_A] = DE_DP_A_HOTPLUG,
50};
51
23bb4cb5
VS
52static const u32 hpd_ivb[HPD_NUM_PINS] = {
53 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
54};
55
3a3b3c7d
VS
56static const u32 hpd_bdw[HPD_NUM_PINS] = {
57 [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
58};
59
7c7e10db 60static const u32 hpd_ibx[HPD_NUM_PINS] = {
e5868a31
EE
61 [HPD_CRT] = SDE_CRT_HOTPLUG,
62 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
63 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
64 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
65 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
66};
67
7c7e10db 68static const u32 hpd_cpt[HPD_NUM_PINS] = {
e5868a31 69 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
73c352a2 70 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
e5868a31
EE
71 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
72 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
73 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
74};
75
26951caf 76static const u32 hpd_spt[HPD_NUM_PINS] = {
74c0b395 77 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
26951caf
XZ
78 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
79 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
80 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
81 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
82};
83
7c7e10db 84static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
e5868a31
EE
85 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
86 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
87 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
88 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
89 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
90 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
91};
92
7c7e10db 93static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
e5868a31
EE
94 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
95 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
96 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
97 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
98 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
99 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
100};
101
4bca26d0 102static const u32 hpd_status_i915[HPD_NUM_PINS] = {
e5868a31
EE
103 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
104 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
105 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
106 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
107 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
108 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
109};
110
e0a20ad7
SS
111/* BXT hpd list */
112static const u32 hpd_bxt[HPD_NUM_PINS] = {
7f3561be 113 [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
e0a20ad7
SS
114 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
115 [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
116};
117
5c502442 118/* IIR can theoretically queue up two events. Be paranoid. */
f86f3fb0 119#define GEN8_IRQ_RESET_NDX(type, which) do { \
5c502442
PZ
120 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
121 POSTING_READ(GEN8_##type##_IMR(which)); \
122 I915_WRITE(GEN8_##type##_IER(which), 0); \
123 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
124 POSTING_READ(GEN8_##type##_IIR(which)); \
125 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
126 POSTING_READ(GEN8_##type##_IIR(which)); \
127} while (0)
128
f86f3fb0 129#define GEN5_IRQ_RESET(type) do { \
a9d356a6 130 I915_WRITE(type##IMR, 0xffffffff); \
5c502442 131 POSTING_READ(type##IMR); \
a9d356a6 132 I915_WRITE(type##IER, 0); \
5c502442
PZ
133 I915_WRITE(type##IIR, 0xffffffff); \
134 POSTING_READ(type##IIR); \
135 I915_WRITE(type##IIR, 0xffffffff); \
136 POSTING_READ(type##IIR); \
a9d356a6
PZ
137} while (0)
138
337ba017
PZ
139/*
140 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
141 */
142#define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \
143 u32 val = I915_READ(reg); \
144 if (val) { \
145 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \
146 (reg), val); \
147 I915_WRITE((reg), 0xffffffff); \
148 POSTING_READ(reg); \
149 I915_WRITE((reg), 0xffffffff); \
150 POSTING_READ(reg); \
151 } \
152} while (0)
153
35079899 154#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
337ba017 155 GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \
35079899 156 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
7d1bd539
VS
157 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
158 POSTING_READ(GEN8_##type##_IMR(which)); \
35079899
PZ
159} while (0)
160
161#define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
337ba017 162 GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \
35079899 163 I915_WRITE(type##IER, (ier_val)); \
7d1bd539
VS
164 I915_WRITE(type##IMR, (imr_val)); \
165 POSTING_READ(type##IMR); \
35079899
PZ
166} while (0)
167
c9a9a268
ID
168static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
169
0706f17c
EE
170/* For display hotplug interrupt */
171static inline void
172i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
173 uint32_t mask,
174 uint32_t bits)
175{
176 uint32_t val;
177
178 assert_spin_locked(&dev_priv->irq_lock);
179 WARN_ON(bits & ~mask);
180
181 val = I915_READ(PORT_HOTPLUG_EN);
182 val &= ~mask;
183 val |= bits;
184 I915_WRITE(PORT_HOTPLUG_EN, val);
185}
186
187/**
188 * i915_hotplug_interrupt_update - update hotplug interrupt enable
189 * @dev_priv: driver private
190 * @mask: bits to update
191 * @bits: bits to enable
192 * NOTE: the HPD enable bits are modified both inside and outside
193 * of an interrupt context. To avoid that read-modify-write cycles
194 * interfer, these bits are protected by a spinlock. Since this
195 * function is usually not called from a context where the lock is
196 * held already, this function acquires the lock itself. A non-locking
197 * version is also available.
198 */
199void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
200 uint32_t mask,
201 uint32_t bits)
202{
203 spin_lock_irq(&dev_priv->irq_lock);
204 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
205 spin_unlock_irq(&dev_priv->irq_lock);
206}
207
d9dc34f1
VS
208/**
209 * ilk_update_display_irq - update DEIMR
210 * @dev_priv: driver private
211 * @interrupt_mask: mask of interrupt bits to update
212 * @enabled_irq_mask: mask of interrupt bits to enable
213 */
214static void ilk_update_display_irq(struct drm_i915_private *dev_priv,
215 uint32_t interrupt_mask,
216 uint32_t enabled_irq_mask)
036a4a7d 217{
d9dc34f1
VS
218 uint32_t new_val;
219
4bc9d430
DV
220 assert_spin_locked(&dev_priv->irq_lock);
221
d9dc34f1
VS
222 WARN_ON(enabled_irq_mask & ~interrupt_mask);
223
9df7575f 224 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
c67a470b 225 return;
c67a470b 226
d9dc34f1
VS
227 new_val = dev_priv->irq_mask;
228 new_val &= ~interrupt_mask;
229 new_val |= (~enabled_irq_mask & interrupt_mask);
230
231 if (new_val != dev_priv->irq_mask) {
232 dev_priv->irq_mask = new_val;
1ec14ad3 233 I915_WRITE(DEIMR, dev_priv->irq_mask);
3143a2bf 234 POSTING_READ(DEIMR);
036a4a7d
ZW
235 }
236}
237
47339cd9 238void
d9dc34f1 239ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
036a4a7d 240{
d9dc34f1
VS
241 ilk_update_display_irq(dev_priv, mask, mask);
242}
c67a470b 243
d9dc34f1
VS
244void
245ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
246{
247 ilk_update_display_irq(dev_priv, mask, 0);
036a4a7d
ZW
248}
249
43eaea13
PZ
250/**
251 * ilk_update_gt_irq - update GTIMR
252 * @dev_priv: driver private
253 * @interrupt_mask: mask of interrupt bits to update
254 * @enabled_irq_mask: mask of interrupt bits to enable
255 */
256static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
257 uint32_t interrupt_mask,
258 uint32_t enabled_irq_mask)
259{
260 assert_spin_locked(&dev_priv->irq_lock);
261
15a17aae
DV
262 WARN_ON(enabled_irq_mask & ~interrupt_mask);
263
9df7575f 264 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
c67a470b 265 return;
c67a470b 266
43eaea13
PZ
267 dev_priv->gt_irq_mask &= ~interrupt_mask;
268 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
269 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
270 POSTING_READ(GTIMR);
271}
272
480c8033 273void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
43eaea13
PZ
274{
275 ilk_update_gt_irq(dev_priv, mask, mask);
276}
277
480c8033 278void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
43eaea13
PZ
279{
280 ilk_update_gt_irq(dev_priv, mask, 0);
281}
282
b900b949
ID
283static u32 gen6_pm_iir(struct drm_i915_private *dev_priv)
284{
285 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
286}
287
a72fbc3a
ID
288static u32 gen6_pm_imr(struct drm_i915_private *dev_priv)
289{
290 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
291}
292
b900b949
ID
293static u32 gen6_pm_ier(struct drm_i915_private *dev_priv)
294{
295 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
296}
297
edbfdb45
PZ
298/**
299 * snb_update_pm_irq - update GEN6_PMIMR
300 * @dev_priv: driver private
301 * @interrupt_mask: mask of interrupt bits to update
302 * @enabled_irq_mask: mask of interrupt bits to enable
303 */
304static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
305 uint32_t interrupt_mask,
306 uint32_t enabled_irq_mask)
307{
605cd25b 308 uint32_t new_val;
edbfdb45 309
15a17aae
DV
310 WARN_ON(enabled_irq_mask & ~interrupt_mask);
311
edbfdb45
PZ
312 assert_spin_locked(&dev_priv->irq_lock);
313
605cd25b 314 new_val = dev_priv->pm_irq_mask;
f52ecbcf
PZ
315 new_val &= ~interrupt_mask;
316 new_val |= (~enabled_irq_mask & interrupt_mask);
317
605cd25b
PZ
318 if (new_val != dev_priv->pm_irq_mask) {
319 dev_priv->pm_irq_mask = new_val;
a72fbc3a
ID
320 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
321 POSTING_READ(gen6_pm_imr(dev_priv));
f52ecbcf 322 }
edbfdb45
PZ
323}
324
480c8033 325void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
edbfdb45 326{
9939fba2
ID
327 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
328 return;
329
edbfdb45
PZ
330 snb_update_pm_irq(dev_priv, mask, mask);
331}
332
9939fba2
ID
333static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
334 uint32_t mask)
edbfdb45
PZ
335{
336 snb_update_pm_irq(dev_priv, mask, 0);
337}
338
9939fba2
ID
339void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
340{
341 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
342 return;
343
344 __gen6_disable_pm_irq(dev_priv, mask);
345}
346
3cc134e3
ID
347void gen6_reset_rps_interrupts(struct drm_device *dev)
348{
349 struct drm_i915_private *dev_priv = dev->dev_private;
350 uint32_t reg = gen6_pm_iir(dev_priv);
351
352 spin_lock_irq(&dev_priv->irq_lock);
353 I915_WRITE(reg, dev_priv->pm_rps_events);
354 I915_WRITE(reg, dev_priv->pm_rps_events);
355 POSTING_READ(reg);
096fad9e 356 dev_priv->rps.pm_iir = 0;
3cc134e3
ID
357 spin_unlock_irq(&dev_priv->irq_lock);
358}
359
b900b949
ID
360void gen6_enable_rps_interrupts(struct drm_device *dev)
361{
362 struct drm_i915_private *dev_priv = dev->dev_private;
363
364 spin_lock_irq(&dev_priv->irq_lock);
78e68d36 365
b900b949 366 WARN_ON(dev_priv->rps.pm_iir);
3cc134e3 367 WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
d4d70aa5 368 dev_priv->rps.interrupts_enabled = true;
78e68d36
ID
369 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
370 dev_priv->pm_rps_events);
b900b949 371 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
78e68d36 372
b900b949
ID
373 spin_unlock_irq(&dev_priv->irq_lock);
374}
375
59d02a1f
ID
376u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
377{
378 /*
f24eeb19 379 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
59d02a1f 380 * if GEN6_PM_UP_EI_EXPIRED is masked.
f24eeb19
ID
381 *
382 * TODO: verify if this can be reproduced on VLV,CHV.
59d02a1f
ID
383 */
384 if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
385 mask &= ~GEN6_PM_RP_UP_EI_EXPIRED;
386
387 if (INTEL_INFO(dev_priv)->gen >= 8)
388 mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
389
390 return mask;
391}
392
b900b949
ID
393void gen6_disable_rps_interrupts(struct drm_device *dev)
394{
395 struct drm_i915_private *dev_priv = dev->dev_private;
396
d4d70aa5
ID
397 spin_lock_irq(&dev_priv->irq_lock);
398 dev_priv->rps.interrupts_enabled = false;
399 spin_unlock_irq(&dev_priv->irq_lock);
400
401 cancel_work_sync(&dev_priv->rps.work);
402
9939fba2
ID
403 spin_lock_irq(&dev_priv->irq_lock);
404
59d02a1f 405 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
9939fba2
ID
406
407 __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
b900b949
ID
408 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
409 ~dev_priv->pm_rps_events);
58072ccb
ID
410
411 spin_unlock_irq(&dev_priv->irq_lock);
412
413 synchronize_irq(dev->irq);
b900b949
ID
414}
415
3a3b3c7d
VS
416/**
417 * bdw_update_port_irq - update DE port interrupt
418 * @dev_priv: driver private
419 * @interrupt_mask: mask of interrupt bits to update
420 * @enabled_irq_mask: mask of interrupt bits to enable
421 */
422static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
423 uint32_t interrupt_mask,
424 uint32_t enabled_irq_mask)
425{
426 uint32_t new_val;
427 uint32_t old_val;
428
429 assert_spin_locked(&dev_priv->irq_lock);
430
431 WARN_ON(enabled_irq_mask & ~interrupt_mask);
432
433 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
434 return;
435
436 old_val = I915_READ(GEN8_DE_PORT_IMR);
437
438 new_val = old_val;
439 new_val &= ~interrupt_mask;
440 new_val |= (~enabled_irq_mask & interrupt_mask);
441
442 if (new_val != old_val) {
443 I915_WRITE(GEN8_DE_PORT_IMR, new_val);
444 POSTING_READ(GEN8_DE_PORT_IMR);
445 }
446}
447
fee884ed
DV
448/**
449 * ibx_display_interrupt_update - update SDEIMR
450 * @dev_priv: driver private
451 * @interrupt_mask: mask of interrupt bits to update
452 * @enabled_irq_mask: mask of interrupt bits to enable
453 */
47339cd9
DV
454void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
455 uint32_t interrupt_mask,
456 uint32_t enabled_irq_mask)
fee884ed
DV
457{
458 uint32_t sdeimr = I915_READ(SDEIMR);
459 sdeimr &= ~interrupt_mask;
460 sdeimr |= (~enabled_irq_mask & interrupt_mask);
461
15a17aae
DV
462 WARN_ON(enabled_irq_mask & ~interrupt_mask);
463
fee884ed
DV
464 assert_spin_locked(&dev_priv->irq_lock);
465
9df7575f 466 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
c67a470b 467 return;
c67a470b 468
fee884ed
DV
469 I915_WRITE(SDEIMR, sdeimr);
470 POSTING_READ(SDEIMR);
471}
8664281b 472
b5ea642a 473static void
755e9019
ID
474__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
475 u32 enable_mask, u32 status_mask)
7c463586 476{
46c06a30 477 u32 reg = PIPESTAT(pipe);
755e9019 478 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
7c463586 479
b79480ba 480 assert_spin_locked(&dev_priv->irq_lock);
d518ce50 481 WARN_ON(!intel_irqs_enabled(dev_priv));
b79480ba 482
04feced9
VS
483 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
484 status_mask & ~PIPESTAT_INT_STATUS_MASK,
485 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
486 pipe_name(pipe), enable_mask, status_mask))
755e9019
ID
487 return;
488
489 if ((pipestat & enable_mask) == enable_mask)
46c06a30
VS
490 return;
491
91d181dd
ID
492 dev_priv->pipestat_irq_mask[pipe] |= status_mask;
493
46c06a30 494 /* Enable the interrupt, clear any pending status */
755e9019 495 pipestat |= enable_mask | status_mask;
46c06a30
VS
496 I915_WRITE(reg, pipestat);
497 POSTING_READ(reg);
7c463586
KP
498}
499
b5ea642a 500static void
755e9019
ID
501__i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
502 u32 enable_mask, u32 status_mask)
7c463586 503{
46c06a30 504 u32 reg = PIPESTAT(pipe);
755e9019 505 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
7c463586 506
b79480ba 507 assert_spin_locked(&dev_priv->irq_lock);
d518ce50 508 WARN_ON(!intel_irqs_enabled(dev_priv));
b79480ba 509
04feced9
VS
510 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
511 status_mask & ~PIPESTAT_INT_STATUS_MASK,
512 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
513 pipe_name(pipe), enable_mask, status_mask))
46c06a30
VS
514 return;
515
755e9019
ID
516 if ((pipestat & enable_mask) == 0)
517 return;
518
91d181dd
ID
519 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
520
755e9019 521 pipestat &= ~enable_mask;
46c06a30
VS
522 I915_WRITE(reg, pipestat);
523 POSTING_READ(reg);
7c463586
KP
524}
525
10c59c51
ID
526static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
527{
528 u32 enable_mask = status_mask << 16;
529
530 /*
724a6905
VS
531 * On pipe A we don't support the PSR interrupt yet,
532 * on pipe B and C the same bit MBZ.
10c59c51
ID
533 */
534 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
535 return 0;
724a6905
VS
536 /*
537 * On pipe B and C we don't support the PSR interrupt yet, on pipe
538 * A the same bit is for perf counters which we don't use either.
539 */
540 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
541 return 0;
10c59c51
ID
542
543 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
544 SPRITE0_FLIP_DONE_INT_EN_VLV |
545 SPRITE1_FLIP_DONE_INT_EN_VLV);
546 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
547 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
548 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
549 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
550
551 return enable_mask;
552}
553
755e9019
ID
554void
555i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
556 u32 status_mask)
557{
558 u32 enable_mask;
559
10c59c51
ID
560 if (IS_VALLEYVIEW(dev_priv->dev))
561 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
562 status_mask);
563 else
564 enable_mask = status_mask << 16;
755e9019
ID
565 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
566}
567
568void
569i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
570 u32 status_mask)
571{
572 u32 enable_mask;
573
10c59c51
ID
574 if (IS_VALLEYVIEW(dev_priv->dev))
575 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
576 status_mask);
577 else
578 enable_mask = status_mask << 16;
755e9019
ID
579 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
580}
581
01c66889 582/**
f49e38dd 583 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
01c66889 584 */
f49e38dd 585static void i915_enable_asle_pipestat(struct drm_device *dev)
01c66889 586{
2d1013dd 587 struct drm_i915_private *dev_priv = dev->dev_private;
1ec14ad3 588
f49e38dd
JN
589 if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
590 return;
591
13321786 592 spin_lock_irq(&dev_priv->irq_lock);
01c66889 593
755e9019 594 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
f898780b 595 if (INTEL_INFO(dev)->gen >= 4)
3b6c42e8 596 i915_enable_pipestat(dev_priv, PIPE_A,
755e9019 597 PIPE_LEGACY_BLC_EVENT_STATUS);
1ec14ad3 598
13321786 599 spin_unlock_irq(&dev_priv->irq_lock);
01c66889
ZY
600}
601
f75f3746
VS
602/*
603 * This timing diagram depicts the video signal in and
604 * around the vertical blanking period.
605 *
606 * Assumptions about the fictitious mode used in this example:
607 * vblank_start >= 3
608 * vsync_start = vblank_start + 1
609 * vsync_end = vblank_start + 2
610 * vtotal = vblank_start + 3
611 *
612 * start of vblank:
613 * latch double buffered registers
614 * increment frame counter (ctg+)
615 * generate start of vblank interrupt (gen4+)
616 * |
617 * | frame start:
618 * | generate frame start interrupt (aka. vblank interrupt) (gmch)
619 * | may be shifted forward 1-3 extra lines via PIPECONF
620 * | |
621 * | | start of vsync:
622 * | | generate vsync interrupt
623 * | | |
624 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
625 * . \hs/ . \hs/ \hs/ \hs/ . \hs/
626 * ----va---> <-----------------vb--------------------> <--------va-------------
627 * | | <----vs-----> |
628 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
629 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
630 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
631 * | | |
632 * last visible pixel first visible pixel
633 * | increment frame counter (gen3/4)
634 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
635 *
636 * x = horizontal active
637 * _ = horizontal blanking
638 * hs = horizontal sync
639 * va = vertical active
640 * vb = vertical blanking
641 * vs = vertical sync
642 * vbs = vblank_start (number)
643 *
644 * Summary:
645 * - most events happen at the start of horizontal sync
646 * - frame start happens at the start of horizontal blank, 1-4 lines
647 * (depending on PIPECONF settings) after the start of vblank
648 * - gen3/4 pixel and frame counter are synchronized with the start
649 * of horizontal active on the first line of vertical active
650 */
651
4cdb83ec
VS
652static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
653{
654 /* Gen2 doesn't have a hardware frame counter */
655 return 0;
656}
657
42f52ef8
KP
658/* Called from drm generic code, passed a 'crtc', which
659 * we use as a pipe index
660 */
f71d4af4 661static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
0a3e67a4 662{
2d1013dd 663 struct drm_i915_private *dev_priv = dev->dev_private;
0a3e67a4
JB
664 unsigned long high_frame;
665 unsigned long low_frame;
0b2a8e09 666 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
f3a5c3f6
DV
667 struct intel_crtc *intel_crtc =
668 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
fc467a22 669 const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
0a3e67a4 670
f3a5c3f6
DV
671 htotal = mode->crtc_htotal;
672 hsync_start = mode->crtc_hsync_start;
673 vbl_start = mode->crtc_vblank_start;
674 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
675 vbl_start = DIV_ROUND_UP(vbl_start, 2);
391f75e2 676
0b2a8e09
VS
677 /* Convert to pixel count */
678 vbl_start *= htotal;
679
680 /* Start of vblank event occurs at start of hsync */
681 vbl_start -= htotal - hsync_start;
682
9db4a9c7
JB
683 high_frame = PIPEFRAME(pipe);
684 low_frame = PIPEFRAMEPIXEL(pipe);
5eddb70b 685
0a3e67a4
JB
686 /*
687 * High & low register fields aren't synchronized, so make sure
688 * we get a low value that's stable across two reads of the high
689 * register.
690 */
691 do {
5eddb70b 692 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
391f75e2 693 low = I915_READ(low_frame);
5eddb70b 694 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
0a3e67a4
JB
695 } while (high1 != high2);
696
5eddb70b 697 high1 >>= PIPE_FRAME_HIGH_SHIFT;
391f75e2 698 pixel = low & PIPE_PIXEL_MASK;
5eddb70b 699 low >>= PIPE_FRAME_LOW_SHIFT;
391f75e2
VS
700
701 /*
702 * The frame counter increments at beginning of active.
703 * Cook up a vblank counter by also checking the pixel
704 * counter against vblank start.
705 */
edc08d0a 706 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
0a3e67a4
JB
707}
708
f71d4af4 709static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
9880b7a5 710{
2d1013dd 711 struct drm_i915_private *dev_priv = dev->dev_private;
9db4a9c7 712 int reg = PIPE_FRMCOUNT_GM45(pipe);
9880b7a5 713
9880b7a5
JB
714 return I915_READ(reg);
715}
716
ad3543ed
MK
717/* raw reads, only for fast reads of display block, no need for forcewake etc. */
718#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
ad3543ed 719
a225f079
VS
720static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
721{
722 struct drm_device *dev = crtc->base.dev;
723 struct drm_i915_private *dev_priv = dev->dev_private;
fc467a22 724 const struct drm_display_mode *mode = &crtc->base.hwmode;
a225f079 725 enum pipe pipe = crtc->pipe;
80715b2f 726 int position, vtotal;
a225f079 727
80715b2f 728 vtotal = mode->crtc_vtotal;
a225f079
VS
729 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
730 vtotal /= 2;
731
732 if (IS_GEN2(dev))
733 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
734 else
735 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
736
41b578fb
JB
737 /*
738 * On HSW, the DSL reg (0x70000) appears to return 0 if we
739 * read it just before the start of vblank. So try it again
740 * so we don't accidentally end up spanning a vblank frame
741 * increment, causing the pipe_update_end() code to squak at us.
742 *
743 * The nature of this problem means we can't simply check the ISR
744 * bit and return the vblank start value; nor can we use the scanline
745 * debug register in the transcoder as it appears to have the same
746 * problem. We may need to extend this to include other platforms,
747 * but so far testing only shows the problem on HSW.
748 */
749 if (IS_HASWELL(dev) && !position) {
750 int i, temp;
751
752 for (i = 0; i < 100; i++) {
753 udelay(1);
754 temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) &
755 DSL_LINEMASK_GEN3;
756 if (temp != position) {
757 position = temp;
758 break;
759 }
760 }
761 }
762
a225f079 763 /*
80715b2f
VS
764 * See update_scanline_offset() for the details on the
765 * scanline_offset adjustment.
a225f079 766 */
80715b2f 767 return (position + crtc->scanline_offset) % vtotal;
a225f079
VS
768}
769
f71d4af4 770static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
abca9e45 771 unsigned int flags, int *vpos, int *hpos,
3bb403bf
VS
772 ktime_t *stime, ktime_t *etime,
773 const struct drm_display_mode *mode)
0af7e4df 774{
c2baf4b7
VS
775 struct drm_i915_private *dev_priv = dev->dev_private;
776 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
777 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3aa18df8 778 int position;
78e8fc6b 779 int vbl_start, vbl_end, hsync_start, htotal, vtotal;
0af7e4df
MK
780 bool in_vbl = true;
781 int ret = 0;
ad3543ed 782 unsigned long irqflags;
0af7e4df 783
fc467a22 784 if (WARN_ON(!mode->crtc_clock)) {
0af7e4df 785 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
9db4a9c7 786 "pipe %c\n", pipe_name(pipe));
0af7e4df
MK
787 return 0;
788 }
789
c2baf4b7 790 htotal = mode->crtc_htotal;
78e8fc6b 791 hsync_start = mode->crtc_hsync_start;
c2baf4b7
VS
792 vtotal = mode->crtc_vtotal;
793 vbl_start = mode->crtc_vblank_start;
794 vbl_end = mode->crtc_vblank_end;
0af7e4df 795
d31faf65
VS
796 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
797 vbl_start = DIV_ROUND_UP(vbl_start, 2);
798 vbl_end /= 2;
799 vtotal /= 2;
800 }
801
c2baf4b7
VS
802 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
803
ad3543ed
MK
804 /*
805 * Lock uncore.lock, as we will do multiple timing critical raw
806 * register reads, potentially with preemption disabled, so the
807 * following code must not block on uncore.lock.
808 */
809 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
78e8fc6b 810
ad3543ed
MK
811 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
812
813 /* Get optional system timestamp before query. */
814 if (stime)
815 *stime = ktime_get();
816
7c06b08a 817 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
0af7e4df
MK
818 /* No obvious pixelcount register. Only query vertical
819 * scanout position from Display scan line register.
820 */
a225f079 821 position = __intel_get_crtc_scanline(intel_crtc);
0af7e4df
MK
822 } else {
823 /* Have access to pixelcount since start of frame.
824 * We can split this into vertical and horizontal
825 * scanout position.
826 */
ad3543ed 827 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
0af7e4df 828
3aa18df8
VS
829 /* convert to pixel counts */
830 vbl_start *= htotal;
831 vbl_end *= htotal;
832 vtotal *= htotal;
78e8fc6b 833
7e78f1cb
VS
834 /*
835 * In interlaced modes, the pixel counter counts all pixels,
836 * so one field will have htotal more pixels. In order to avoid
837 * the reported position from jumping backwards when the pixel
838 * counter is beyond the length of the shorter field, just
839 * clamp the position the length of the shorter field. This
840 * matches how the scanline counter based position works since
841 * the scanline counter doesn't count the two half lines.
842 */
843 if (position >= vtotal)
844 position = vtotal - 1;
845
78e8fc6b
VS
846 /*
847 * Start of vblank interrupt is triggered at start of hsync,
848 * just prior to the first active line of vblank. However we
849 * consider lines to start at the leading edge of horizontal
850 * active. So, should we get here before we've crossed into
851 * the horizontal active of the first line in vblank, we would
852 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
853 * always add htotal-hsync_start to the current pixel position.
854 */
855 position = (position + htotal - hsync_start) % vtotal;
0af7e4df
MK
856 }
857
ad3543ed
MK
858 /* Get optional system timestamp after query. */
859 if (etime)
860 *etime = ktime_get();
861
862 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
863
864 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
865
3aa18df8
VS
866 in_vbl = position >= vbl_start && position < vbl_end;
867
868 /*
869 * While in vblank, position will be negative
870 * counting up towards 0 at vbl_end. And outside
871 * vblank, position will be positive counting
872 * up since vbl_end.
873 */
874 if (position >= vbl_start)
875 position -= vbl_end;
876 else
877 position += vtotal - vbl_end;
0af7e4df 878
7c06b08a 879 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
3aa18df8
VS
880 *vpos = position;
881 *hpos = 0;
882 } else {
883 *vpos = position / htotal;
884 *hpos = position - (*vpos * htotal);
885 }
0af7e4df 886
0af7e4df
MK
887 /* In vblank? */
888 if (in_vbl)
3d3cbd84 889 ret |= DRM_SCANOUTPOS_IN_VBLANK;
0af7e4df
MK
890
891 return ret;
892}
893
a225f079
VS
894int intel_get_crtc_scanline(struct intel_crtc *crtc)
895{
896 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
897 unsigned long irqflags;
898 int position;
899
900 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
901 position = __intel_get_crtc_scanline(crtc);
902 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
903
904 return position;
905}
906
f71d4af4 907static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
0af7e4df
MK
908 int *max_error,
909 struct timeval *vblank_time,
910 unsigned flags)
911{
4041b853 912 struct drm_crtc *crtc;
0af7e4df 913
7eb552ae 914 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
4041b853 915 DRM_ERROR("Invalid crtc %d\n", pipe);
0af7e4df
MK
916 return -EINVAL;
917 }
918
919 /* Get drm_crtc to timestamp: */
4041b853
CW
920 crtc = intel_get_crtc_for_pipe(dev, pipe);
921 if (crtc == NULL) {
922 DRM_ERROR("Invalid crtc %d\n", pipe);
923 return -EINVAL;
924 }
925
fc467a22 926 if (!crtc->hwmode.crtc_clock) {
4041b853
CW
927 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
928 return -EBUSY;
929 }
0af7e4df
MK
930
931 /* Helper routine in DRM core does all the work: */
4041b853
CW
932 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
933 vblank_time, flags,
fc467a22 934 &crtc->hwmode);
0af7e4df
MK
935}
936
d0ecd7e2 937static void ironlake_rps_change_irq_handler(struct drm_device *dev)
f97108d1 938{
2d1013dd 939 struct drm_i915_private *dev_priv = dev->dev_private;
b5b72e89 940 u32 busy_up, busy_down, max_avg, min_avg;
9270388e 941 u8 new_delay;
9270388e 942
d0ecd7e2 943 spin_lock(&mchdev_lock);
f97108d1 944
73edd18f
DV
945 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
946
20e4d407 947 new_delay = dev_priv->ips.cur_delay;
9270388e 948
7648fa99 949 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
b5b72e89
MG
950 busy_up = I915_READ(RCPREVBSYTUPAVG);
951 busy_down = I915_READ(RCPREVBSYTDNAVG);
f97108d1
JB
952 max_avg = I915_READ(RCBMAXAVG);
953 min_avg = I915_READ(RCBMINAVG);
954
955 /* Handle RCS change request from hw */
b5b72e89 956 if (busy_up > max_avg) {
20e4d407
DV
957 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
958 new_delay = dev_priv->ips.cur_delay - 1;
959 if (new_delay < dev_priv->ips.max_delay)
960 new_delay = dev_priv->ips.max_delay;
b5b72e89 961 } else if (busy_down < min_avg) {
20e4d407
DV
962 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
963 new_delay = dev_priv->ips.cur_delay + 1;
964 if (new_delay > dev_priv->ips.min_delay)
965 new_delay = dev_priv->ips.min_delay;
f97108d1
JB
966 }
967
7648fa99 968 if (ironlake_set_drps(dev, new_delay))
20e4d407 969 dev_priv->ips.cur_delay = new_delay;
f97108d1 970
d0ecd7e2 971 spin_unlock(&mchdev_lock);
9270388e 972
f97108d1
JB
973 return;
974}
975
74cdb337 976static void notify_ring(struct intel_engine_cs *ring)
549f7365 977{
93b0a4e0 978 if (!intel_ring_initialized(ring))
475553de
CW
979 return;
980
bcfcc8ba 981 trace_i915_gem_request_notify(ring);
9862e600 982
549f7365 983 wake_up_all(&ring->irq_queue);
549f7365
CW
984}
985
43cf3bf0
CW
986static void vlv_c0_read(struct drm_i915_private *dev_priv,
987 struct intel_rps_ei *ei)
31685c25 988{
43cf3bf0
CW
989 ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
990 ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
991 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
992}
31685c25 993
43cf3bf0
CW
994static bool vlv_c0_above(struct drm_i915_private *dev_priv,
995 const struct intel_rps_ei *old,
996 const struct intel_rps_ei *now,
997 int threshold)
998{
999 u64 time, c0;
31685c25 1000
43cf3bf0
CW
1001 if (old->cz_clock == 0)
1002 return false;
31685c25 1003
43cf3bf0
CW
1004 time = now->cz_clock - old->cz_clock;
1005 time *= threshold * dev_priv->mem_freq;
31685c25 1006
43cf3bf0
CW
1007 /* Workload can be split between render + media, e.g. SwapBuffers
1008 * being blitted in X after being rendered in mesa. To account for
1009 * this we need to combine both engines into our activity counter.
31685c25 1010 */
43cf3bf0
CW
1011 c0 = now->render_c0 - old->render_c0;
1012 c0 += now->media_c0 - old->media_c0;
1013 c0 *= 100 * VLV_CZ_CLOCK_TO_MILLI_SEC * 4 / 1000;
31685c25 1014
43cf3bf0 1015 return c0 >= time;
31685c25
D
1016}
1017
43cf3bf0 1018void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
31685c25 1019{
43cf3bf0
CW
1020 vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
1021 dev_priv->rps.up_ei = dev_priv->rps.down_ei;
43cf3bf0 1022}
31685c25 1023
43cf3bf0
CW
1024static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
1025{
1026 struct intel_rps_ei now;
1027 u32 events = 0;
31685c25 1028
6f4b12f8 1029 if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
43cf3bf0 1030 return 0;
31685c25 1031
43cf3bf0
CW
1032 vlv_c0_read(dev_priv, &now);
1033 if (now.cz_clock == 0)
1034 return 0;
31685c25 1035
43cf3bf0
CW
1036 if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
1037 if (!vlv_c0_above(dev_priv,
1038 &dev_priv->rps.down_ei, &now,
8fb55197 1039 dev_priv->rps.down_threshold))
43cf3bf0
CW
1040 events |= GEN6_PM_RP_DOWN_THRESHOLD;
1041 dev_priv->rps.down_ei = now;
1042 }
31685c25 1043
43cf3bf0
CW
1044 if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
1045 if (vlv_c0_above(dev_priv,
1046 &dev_priv->rps.up_ei, &now,
8fb55197 1047 dev_priv->rps.up_threshold))
43cf3bf0
CW
1048 events |= GEN6_PM_RP_UP_THRESHOLD;
1049 dev_priv->rps.up_ei = now;
31685c25
D
1050 }
1051
43cf3bf0 1052 return events;
31685c25
D
1053}
1054
f5a4c67d
CW
1055static bool any_waiters(struct drm_i915_private *dev_priv)
1056{
1057 struct intel_engine_cs *ring;
1058 int i;
1059
1060 for_each_ring(ring, dev_priv, i)
1061 if (ring->irq_refcount)
1062 return true;
1063
1064 return false;
1065}
1066
4912d041 1067static void gen6_pm_rps_work(struct work_struct *work)
3b8d8d91 1068{
2d1013dd
JN
1069 struct drm_i915_private *dev_priv =
1070 container_of(work, struct drm_i915_private, rps.work);
8d3afd7d
CW
1071 bool client_boost;
1072 int new_delay, adj, min, max;
edbfdb45 1073 u32 pm_iir;
4912d041 1074
59cdb63d 1075 spin_lock_irq(&dev_priv->irq_lock);
d4d70aa5
ID
1076 /* Speed up work cancelation during disabling rps interrupts. */
1077 if (!dev_priv->rps.interrupts_enabled) {
1078 spin_unlock_irq(&dev_priv->irq_lock);
1079 return;
1080 }
c6a828d3
DV
1081 pm_iir = dev_priv->rps.pm_iir;
1082 dev_priv->rps.pm_iir = 0;
a72fbc3a
ID
1083 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
1084 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
8d3afd7d
CW
1085 client_boost = dev_priv->rps.client_boost;
1086 dev_priv->rps.client_boost = false;
59cdb63d 1087 spin_unlock_irq(&dev_priv->irq_lock);
3b8d8d91 1088
60611c13 1089 /* Make sure we didn't queue anything we're not going to process. */
a6706b45 1090 WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
60611c13 1091
8d3afd7d 1092 if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
3b8d8d91
JB
1093 return;
1094
4fc688ce 1095 mutex_lock(&dev_priv->rps.hw_lock);
7b9e0ae6 1096
43cf3bf0
CW
1097 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
1098
dd75fdc8 1099 adj = dev_priv->rps.last_adj;
edcf284b 1100 new_delay = dev_priv->rps.cur_freq;
8d3afd7d
CW
1101 min = dev_priv->rps.min_freq_softlimit;
1102 max = dev_priv->rps.max_freq_softlimit;
1103
1104 if (client_boost) {
1105 new_delay = dev_priv->rps.max_freq_softlimit;
1106 adj = 0;
1107 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
dd75fdc8
CW
1108 if (adj > 0)
1109 adj *= 2;
edcf284b
CW
1110 else /* CHV needs even encode values */
1111 adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
7425034a
VS
1112 /*
1113 * For better performance, jump directly
1114 * to RPe if we're below it.
1115 */
edcf284b 1116 if (new_delay < dev_priv->rps.efficient_freq - adj) {
b39fb297 1117 new_delay = dev_priv->rps.efficient_freq;
edcf284b
CW
1118 adj = 0;
1119 }
f5a4c67d
CW
1120 } else if (any_waiters(dev_priv)) {
1121 adj = 0;
dd75fdc8 1122 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
b39fb297
BW
1123 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1124 new_delay = dev_priv->rps.efficient_freq;
dd75fdc8 1125 else
b39fb297 1126 new_delay = dev_priv->rps.min_freq_softlimit;
dd75fdc8
CW
1127 adj = 0;
1128 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1129 if (adj < 0)
1130 adj *= 2;
edcf284b
CW
1131 else /* CHV needs even encode values */
1132 adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
dd75fdc8 1133 } else { /* unknown event */
edcf284b 1134 adj = 0;
dd75fdc8 1135 }
3b8d8d91 1136
edcf284b
CW
1137 dev_priv->rps.last_adj = adj;
1138
79249636
BW
1139 /* sysfs frequency interfaces may have snuck in while servicing the
1140 * interrupt
1141 */
edcf284b 1142 new_delay += adj;
8d3afd7d 1143 new_delay = clamp_t(int, new_delay, min, max);
27544369 1144
ffe02b40 1145 intel_set_rps(dev_priv->dev, new_delay);
3b8d8d91 1146
4fc688ce 1147 mutex_unlock(&dev_priv->rps.hw_lock);
3b8d8d91
JB
1148}
1149
e3689190
BW
1150
1151/**
1152 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1153 * occurred.
1154 * @work: workqueue struct
1155 *
1156 * Doesn't actually do anything except notify userspace. As a consequence of
1157 * this event, userspace should try to remap the bad rows since statistically
1158 * it is likely the same row is more likely to go bad again.
1159 */
1160static void ivybridge_parity_work(struct work_struct *work)
1161{
2d1013dd
JN
1162 struct drm_i915_private *dev_priv =
1163 container_of(work, struct drm_i915_private, l3_parity.error_work);
e3689190 1164 u32 error_status, row, bank, subbank;
35a85ac6 1165 char *parity_event[6];
e3689190 1166 uint32_t misccpctl;
35a85ac6 1167 uint8_t slice = 0;
e3689190
BW
1168
1169 /* We must turn off DOP level clock gating to access the L3 registers.
1170 * In order to prevent a get/put style interface, acquire struct mutex
1171 * any time we access those registers.
1172 */
1173 mutex_lock(&dev_priv->dev->struct_mutex);
1174
35a85ac6
BW
1175 /* If we've screwed up tracking, just let the interrupt fire again */
1176 if (WARN_ON(!dev_priv->l3_parity.which_slice))
1177 goto out;
1178
e3689190
BW
1179 misccpctl = I915_READ(GEN7_MISCCPCTL);
1180 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1181 POSTING_READ(GEN7_MISCCPCTL);
1182
35a85ac6
BW
1183 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1184 u32 reg;
e3689190 1185
35a85ac6
BW
1186 slice--;
1187 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
1188 break;
e3689190 1189
35a85ac6 1190 dev_priv->l3_parity.which_slice &= ~(1<<slice);
e3689190 1191
35a85ac6 1192 reg = GEN7_L3CDERRST1 + (slice * 0x200);
e3689190 1193
35a85ac6
BW
1194 error_status = I915_READ(reg);
1195 row = GEN7_PARITY_ERROR_ROW(error_status);
1196 bank = GEN7_PARITY_ERROR_BANK(error_status);
1197 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1198
1199 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1200 POSTING_READ(reg);
1201
1202 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1203 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1204 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1205 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1206 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1207 parity_event[5] = NULL;
1208
5bdebb18 1209 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
35a85ac6 1210 KOBJ_CHANGE, parity_event);
e3689190 1211
35a85ac6
BW
1212 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1213 slice, row, bank, subbank);
e3689190 1214
35a85ac6
BW
1215 kfree(parity_event[4]);
1216 kfree(parity_event[3]);
1217 kfree(parity_event[2]);
1218 kfree(parity_event[1]);
1219 }
e3689190 1220
35a85ac6 1221 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
e3689190 1222
35a85ac6
BW
1223out:
1224 WARN_ON(dev_priv->l3_parity.which_slice);
4cb21832 1225 spin_lock_irq(&dev_priv->irq_lock);
480c8033 1226 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
4cb21832 1227 spin_unlock_irq(&dev_priv->irq_lock);
35a85ac6
BW
1228
1229 mutex_unlock(&dev_priv->dev->struct_mutex);
e3689190
BW
1230}
1231
35a85ac6 1232static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
e3689190 1233{
2d1013dd 1234 struct drm_i915_private *dev_priv = dev->dev_private;
e3689190 1235
040d2baa 1236 if (!HAS_L3_DPF(dev))
e3689190
BW
1237 return;
1238
d0ecd7e2 1239 spin_lock(&dev_priv->irq_lock);
480c8033 1240 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
d0ecd7e2 1241 spin_unlock(&dev_priv->irq_lock);
e3689190 1242
35a85ac6
BW
1243 iir &= GT_PARITY_ERROR(dev);
1244 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1245 dev_priv->l3_parity.which_slice |= 1 << 1;
1246
1247 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1248 dev_priv->l3_parity.which_slice |= 1 << 0;
1249
a4da4fa4 1250 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
e3689190
BW
1251}
1252
f1af8fc1
PZ
1253static void ilk_gt_irq_handler(struct drm_device *dev,
1254 struct drm_i915_private *dev_priv,
1255 u32 gt_iir)
1256{
1257 if (gt_iir &
1258 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
74cdb337 1259 notify_ring(&dev_priv->ring[RCS]);
f1af8fc1 1260 if (gt_iir & ILK_BSD_USER_INTERRUPT)
74cdb337 1261 notify_ring(&dev_priv->ring[VCS]);
f1af8fc1
PZ
1262}
1263
e7b4c6b1
DV
1264static void snb_gt_irq_handler(struct drm_device *dev,
1265 struct drm_i915_private *dev_priv,
1266 u32 gt_iir)
1267{
1268
cc609d5d
BW
1269 if (gt_iir &
1270 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
74cdb337 1271 notify_ring(&dev_priv->ring[RCS]);
cc609d5d 1272 if (gt_iir & GT_BSD_USER_INTERRUPT)
74cdb337 1273 notify_ring(&dev_priv->ring[VCS]);
cc609d5d 1274 if (gt_iir & GT_BLT_USER_INTERRUPT)
74cdb337 1275 notify_ring(&dev_priv->ring[BCS]);
e7b4c6b1 1276
cc609d5d
BW
1277 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1278 GT_BSD_CS_ERROR_INTERRUPT |
aaecdf61
DV
1279 GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1280 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
e3689190 1281
35a85ac6
BW
1282 if (gt_iir & GT_PARITY_ERROR(dev))
1283 ivybridge_parity_error_irq_handler(dev, gt_iir);
e7b4c6b1
DV
1284}
1285
74cdb337 1286static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
abd58f01
BW
1287 u32 master_ctl)
1288{
abd58f01
BW
1289 irqreturn_t ret = IRQ_NONE;
1290
1291 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
74cdb337 1292 u32 tmp = I915_READ_FW(GEN8_GT_IIR(0));
abd58f01 1293 if (tmp) {
cb0d205e 1294 I915_WRITE_FW(GEN8_GT_IIR(0), tmp);
abd58f01 1295 ret = IRQ_HANDLED;
e981e7b1 1296
74cdb337
CW
1297 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
1298 intel_lrc_irq_handler(&dev_priv->ring[RCS]);
1299 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
1300 notify_ring(&dev_priv->ring[RCS]);
1301
1302 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
1303 intel_lrc_irq_handler(&dev_priv->ring[BCS]);
1304 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
1305 notify_ring(&dev_priv->ring[BCS]);
abd58f01
BW
1306 } else
1307 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1308 }
1309
85f9b5f9 1310 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
74cdb337 1311 u32 tmp = I915_READ_FW(GEN8_GT_IIR(1));
abd58f01 1312 if (tmp) {
cb0d205e 1313 I915_WRITE_FW(GEN8_GT_IIR(1), tmp);
abd58f01 1314 ret = IRQ_HANDLED;
e981e7b1 1315
74cdb337
CW
1316 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
1317 intel_lrc_irq_handler(&dev_priv->ring[VCS]);
1318 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
1319 notify_ring(&dev_priv->ring[VCS]);
abd58f01 1320
74cdb337
CW
1321 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
1322 intel_lrc_irq_handler(&dev_priv->ring[VCS2]);
1323 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
1324 notify_ring(&dev_priv->ring[VCS2]);
0961021a 1325 } else
abd58f01 1326 DRM_ERROR("The master control interrupt lied (GT1)!\n");
0961021a
BW
1327 }
1328
abd58f01 1329 if (master_ctl & GEN8_GT_VECS_IRQ) {
74cdb337 1330 u32 tmp = I915_READ_FW(GEN8_GT_IIR(3));
abd58f01 1331 if (tmp) {
74cdb337 1332 I915_WRITE_FW(GEN8_GT_IIR(3), tmp);
abd58f01 1333 ret = IRQ_HANDLED;
e981e7b1 1334
74cdb337
CW
1335 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
1336 intel_lrc_irq_handler(&dev_priv->ring[VECS]);
1337 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
1338 notify_ring(&dev_priv->ring[VECS]);
abd58f01
BW
1339 } else
1340 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1341 }
1342
0961021a 1343 if (master_ctl & GEN8_GT_PM_IRQ) {
74cdb337 1344 u32 tmp = I915_READ_FW(GEN8_GT_IIR(2));
0961021a 1345 if (tmp & dev_priv->pm_rps_events) {
cb0d205e
CW
1346 I915_WRITE_FW(GEN8_GT_IIR(2),
1347 tmp & dev_priv->pm_rps_events);
38cc46d7 1348 ret = IRQ_HANDLED;
c9a9a268 1349 gen6_rps_irq_handler(dev_priv, tmp);
0961021a
BW
1350 } else
1351 DRM_ERROR("The master control interrupt lied (PM)!\n");
1352 }
1353
abd58f01
BW
1354 return ret;
1355}
1356
63c88d22
ID
1357static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
1358{
1359 switch (port) {
1360 case PORT_A:
195baa06 1361 return val & PORTA_HOTPLUG_LONG_DETECT;
63c88d22
ID
1362 case PORT_B:
1363 return val & PORTB_HOTPLUG_LONG_DETECT;
1364 case PORT_C:
1365 return val & PORTC_HOTPLUG_LONG_DETECT;
63c88d22
ID
1366 default:
1367 return false;
1368 }
1369}
1370
6dbf30ce
VS
1371static bool spt_port_hotplug2_long_detect(enum port port, u32 val)
1372{
1373 switch (port) {
1374 case PORT_E:
1375 return val & PORTE_HOTPLUG_LONG_DETECT;
1376 default:
1377 return false;
1378 }
1379}
1380
74c0b395
VS
1381static bool spt_port_hotplug_long_detect(enum port port, u32 val)
1382{
1383 switch (port) {
1384 case PORT_A:
1385 return val & PORTA_HOTPLUG_LONG_DETECT;
1386 case PORT_B:
1387 return val & PORTB_HOTPLUG_LONG_DETECT;
1388 case PORT_C:
1389 return val & PORTC_HOTPLUG_LONG_DETECT;
1390 case PORT_D:
1391 return val & PORTD_HOTPLUG_LONG_DETECT;
1392 default:
1393 return false;
1394 }
1395}
1396
e4ce95aa
VS
1397static bool ilk_port_hotplug_long_detect(enum port port, u32 val)
1398{
1399 switch (port) {
1400 case PORT_A:
1401 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1402 default:
1403 return false;
1404 }
1405}
1406
676574df 1407static bool pch_port_hotplug_long_detect(enum port port, u32 val)
13cf5504
DA
1408{
1409 switch (port) {
13cf5504 1410 case PORT_B:
676574df 1411 return val & PORTB_HOTPLUG_LONG_DETECT;
13cf5504 1412 case PORT_C:
676574df 1413 return val & PORTC_HOTPLUG_LONG_DETECT;
13cf5504 1414 case PORT_D:
676574df
JN
1415 return val & PORTD_HOTPLUG_LONG_DETECT;
1416 default:
1417 return false;
13cf5504
DA
1418 }
1419}
1420
676574df 1421static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
13cf5504
DA
1422{
1423 switch (port) {
13cf5504 1424 case PORT_B:
676574df 1425 return val & PORTB_HOTPLUG_INT_LONG_PULSE;
13cf5504 1426 case PORT_C:
676574df 1427 return val & PORTC_HOTPLUG_INT_LONG_PULSE;
13cf5504 1428 case PORT_D:
676574df
JN
1429 return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1430 default:
1431 return false;
13cf5504
DA
1432 }
1433}
1434
42db67d6
VS
1435/*
1436 * Get a bit mask of pins that have triggered, and which ones may be long.
1437 * This can be called multiple times with the same masks to accumulate
1438 * hotplug detection results from several registers.
1439 *
1440 * Note that the caller is expected to zero out the masks initially.
1441 */
fd63e2a9 1442static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
8c841e57 1443 u32 hotplug_trigger, u32 dig_hotplug_reg,
fd63e2a9
ID
1444 const u32 hpd[HPD_NUM_PINS],
1445 bool long_pulse_detect(enum port port, u32 val))
676574df 1446{
8c841e57 1447 enum port port;
676574df
JN
1448 int i;
1449
676574df 1450 for_each_hpd_pin(i) {
8c841e57
JN
1451 if ((hpd[i] & hotplug_trigger) == 0)
1452 continue;
676574df 1453
8c841e57
JN
1454 *pin_mask |= BIT(i);
1455
cc24fcdc
ID
1456 if (!intel_hpd_pin_to_port(i, &port))
1457 continue;
1458
fd63e2a9 1459 if (long_pulse_detect(port, dig_hotplug_reg))
8c841e57 1460 *long_mask |= BIT(i);
676574df
JN
1461 }
1462
1463 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
1464 hotplug_trigger, dig_hotplug_reg, *pin_mask);
1465
1466}
1467
515ac2bb
DV
1468static void gmbus_irq_handler(struct drm_device *dev)
1469{
2d1013dd 1470 struct drm_i915_private *dev_priv = dev->dev_private;
28c70f16 1471
28c70f16 1472 wake_up_all(&dev_priv->gmbus_wait_queue);
515ac2bb
DV
1473}
1474
ce99c256
DV
1475static void dp_aux_irq_handler(struct drm_device *dev)
1476{
2d1013dd 1477 struct drm_i915_private *dev_priv = dev->dev_private;
9ee32fea 1478
9ee32fea 1479 wake_up_all(&dev_priv->gmbus_wait_queue);
ce99c256
DV
1480}
1481
8bf1e9f1 1482#if defined(CONFIG_DEBUG_FS)
277de95e
DV
1483static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1484 uint32_t crc0, uint32_t crc1,
1485 uint32_t crc2, uint32_t crc3,
1486 uint32_t crc4)
8bf1e9f1
SH
1487{
1488 struct drm_i915_private *dev_priv = dev->dev_private;
1489 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1490 struct intel_pipe_crc_entry *entry;
ac2300d4 1491 int head, tail;
b2c88f5b 1492
d538bbdf
DL
1493 spin_lock(&pipe_crc->lock);
1494
0c912c79 1495 if (!pipe_crc->entries) {
d538bbdf 1496 spin_unlock(&pipe_crc->lock);
34273620 1497 DRM_DEBUG_KMS("spurious interrupt\n");
0c912c79
DL
1498 return;
1499 }
1500
d538bbdf
DL
1501 head = pipe_crc->head;
1502 tail = pipe_crc->tail;
b2c88f5b
DL
1503
1504 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
d538bbdf 1505 spin_unlock(&pipe_crc->lock);
b2c88f5b
DL
1506 DRM_ERROR("CRC buffer overflowing\n");
1507 return;
1508 }
1509
1510 entry = &pipe_crc->entries[head];
8bf1e9f1 1511
8bc5e955 1512 entry->frame = dev->driver->get_vblank_counter(dev, pipe);
eba94eb9
DV
1513 entry->crc[0] = crc0;
1514 entry->crc[1] = crc1;
1515 entry->crc[2] = crc2;
1516 entry->crc[3] = crc3;
1517 entry->crc[4] = crc4;
b2c88f5b
DL
1518
1519 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
d538bbdf
DL
1520 pipe_crc->head = head;
1521
1522 spin_unlock(&pipe_crc->lock);
07144428
DL
1523
1524 wake_up_interruptible(&pipe_crc->wq);
8bf1e9f1 1525}
277de95e
DV
1526#else
1527static inline void
1528display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1529 uint32_t crc0, uint32_t crc1,
1530 uint32_t crc2, uint32_t crc3,
1531 uint32_t crc4) {}
1532#endif
1533
eba94eb9 1534
277de95e 1535static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
5a69b89f
DV
1536{
1537 struct drm_i915_private *dev_priv = dev->dev_private;
1538
277de95e
DV
1539 display_pipe_crc_irq_handler(dev, pipe,
1540 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1541 0, 0, 0, 0);
5a69b89f
DV
1542}
1543
277de95e 1544static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
eba94eb9
DV
1545{
1546 struct drm_i915_private *dev_priv = dev->dev_private;
1547
277de95e
DV
1548 display_pipe_crc_irq_handler(dev, pipe,
1549 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1550 I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1551 I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1552 I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1553 I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
eba94eb9 1554}
5b3a856b 1555
277de95e 1556static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
5b3a856b
DV
1557{
1558 struct drm_i915_private *dev_priv = dev->dev_private;
0b5c5ed0
DV
1559 uint32_t res1, res2;
1560
1561 if (INTEL_INFO(dev)->gen >= 3)
1562 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1563 else
1564 res1 = 0;
1565
1566 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1567 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1568 else
1569 res2 = 0;
5b3a856b 1570
277de95e
DV
1571 display_pipe_crc_irq_handler(dev, pipe,
1572 I915_READ(PIPE_CRC_RES_RED(pipe)),
1573 I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1574 I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1575 res1, res2);
5b3a856b 1576}
8bf1e9f1 1577
1403c0d4
PZ
1578/* The RPS events need forcewake, so we add them to a work queue and mask their
1579 * IMR bits until the work is done. Other interrupts can be processed without
1580 * the work queue. */
1581static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
baf02a1f 1582{
a6706b45 1583 if (pm_iir & dev_priv->pm_rps_events) {
59cdb63d 1584 spin_lock(&dev_priv->irq_lock);
480c8033 1585 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
d4d70aa5
ID
1586 if (dev_priv->rps.interrupts_enabled) {
1587 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1588 queue_work(dev_priv->wq, &dev_priv->rps.work);
1589 }
59cdb63d 1590 spin_unlock(&dev_priv->irq_lock);
baf02a1f 1591 }
baf02a1f 1592
c9a9a268
ID
1593 if (INTEL_INFO(dev_priv)->gen >= 8)
1594 return;
1595
1403c0d4
PZ
1596 if (HAS_VEBOX(dev_priv->dev)) {
1597 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
74cdb337 1598 notify_ring(&dev_priv->ring[VECS]);
12638c57 1599
aaecdf61
DV
1600 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1601 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
12638c57 1602 }
baf02a1f
BW
1603}
1604
8d7849db
VS
1605static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
1606{
8d7849db
VS
1607 if (!drm_handle_vblank(dev, pipe))
1608 return false;
1609
8d7849db
VS
1610 return true;
1611}
1612
c1874ed7
ID
1613static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
1614{
1615 struct drm_i915_private *dev_priv = dev->dev_private;
91d181dd 1616 u32 pipe_stats[I915_MAX_PIPES] = { };
c1874ed7
ID
1617 int pipe;
1618
58ead0d7 1619 spin_lock(&dev_priv->irq_lock);
055e393f 1620 for_each_pipe(dev_priv, pipe) {
91d181dd 1621 int reg;
bbb5eebf 1622 u32 mask, iir_bit = 0;
91d181dd 1623
bbb5eebf
DV
1624 /*
1625 * PIPESTAT bits get signalled even when the interrupt is
1626 * disabled with the mask bits, and some of the status bits do
1627 * not generate interrupts at all (like the underrun bit). Hence
1628 * we need to be careful that we only handle what we want to
1629 * handle.
1630 */
0f239f4c
DV
1631
1632 /* fifo underruns are filterered in the underrun handler. */
1633 mask = PIPE_FIFO_UNDERRUN_STATUS;
bbb5eebf
DV
1634
1635 switch (pipe) {
1636 case PIPE_A:
1637 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1638 break;
1639 case PIPE_B:
1640 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1641 break;
3278f67f
VS
1642 case PIPE_C:
1643 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1644 break;
bbb5eebf
DV
1645 }
1646 if (iir & iir_bit)
1647 mask |= dev_priv->pipestat_irq_mask[pipe];
1648
1649 if (!mask)
91d181dd
ID
1650 continue;
1651
1652 reg = PIPESTAT(pipe);
bbb5eebf
DV
1653 mask |= PIPESTAT_INT_ENABLE_MASK;
1654 pipe_stats[pipe] = I915_READ(reg) & mask;
c1874ed7
ID
1655
1656 /*
1657 * Clear the PIPE*STAT regs before the IIR
1658 */
91d181dd
ID
1659 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
1660 PIPESTAT_INT_STATUS_MASK))
c1874ed7
ID
1661 I915_WRITE(reg, pipe_stats[pipe]);
1662 }
58ead0d7 1663 spin_unlock(&dev_priv->irq_lock);
c1874ed7 1664
055e393f 1665 for_each_pipe(dev_priv, pipe) {
d6bbafa1
CW
1666 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
1667 intel_pipe_handle_vblank(dev, pipe))
1668 intel_check_page_flip(dev, pipe);
c1874ed7 1669
579a9b0e 1670 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
c1874ed7
ID
1671 intel_prepare_page_flip(dev, pipe);
1672 intel_finish_page_flip(dev, pipe);
1673 }
1674
1675 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1676 i9xx_pipe_crc_irq_handler(dev, pipe);
1677
1f7247c0
DV
1678 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1679 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
c1874ed7
ID
1680 }
1681
1682 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1683 gmbus_irq_handler(dev);
1684}
1685
16c6c56b
VS
1686static void i9xx_hpd_irq_handler(struct drm_device *dev)
1687{
1688 struct drm_i915_private *dev_priv = dev->dev_private;
1689 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
42db67d6 1690 u32 pin_mask = 0, long_mask = 0;
16c6c56b 1691
0d2e4297
JN
1692 if (!hotplug_status)
1693 return;
16c6c56b 1694
0d2e4297
JN
1695 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1696 /*
1697 * Make sure hotplug status is cleared before we clear IIR, or else we
1698 * may miss hotplug events.
1699 */
1700 POSTING_READ(PORT_HOTPLUG_STAT);
16c6c56b 1701
0d2e4297
JN
1702 if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
1703 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
16c6c56b 1704
58f2cf24
VS
1705 if (hotplug_trigger) {
1706 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1707 hotplug_trigger, hpd_status_g4x,
1708 i9xx_port_hotplug_long_detect);
1709
1710 intel_hpd_irq_handler(dev, pin_mask, long_mask);
1711 }
369712e8
JN
1712
1713 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1714 dp_aux_irq_handler(dev);
0d2e4297
JN
1715 } else {
1716 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
16c6c56b 1717
58f2cf24
VS
1718 if (hotplug_trigger) {
1719 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
44cc6c08 1720 hotplug_trigger, hpd_status_i915,
58f2cf24 1721 i9xx_port_hotplug_long_detect);
58f2cf24
VS
1722 intel_hpd_irq_handler(dev, pin_mask, long_mask);
1723 }
3ff60f89 1724 }
16c6c56b
VS
1725}
1726
ff1f525e 1727static irqreturn_t valleyview_irq_handler(int irq, void *arg)
7e231dbe 1728{
45a83f84 1729 struct drm_device *dev = arg;
2d1013dd 1730 struct drm_i915_private *dev_priv = dev->dev_private;
7e231dbe
JB
1731 u32 iir, gt_iir, pm_iir;
1732 irqreturn_t ret = IRQ_NONE;
7e231dbe 1733
2dd2a883
ID
1734 if (!intel_irqs_enabled(dev_priv))
1735 return IRQ_NONE;
1736
7e231dbe 1737 while (true) {
3ff60f89
OM
1738 /* Find, clear, then process each source of interrupt */
1739
7e231dbe 1740 gt_iir = I915_READ(GTIIR);
3ff60f89
OM
1741 if (gt_iir)
1742 I915_WRITE(GTIIR, gt_iir);
1743
7e231dbe 1744 pm_iir = I915_READ(GEN6_PMIIR);
3ff60f89
OM
1745 if (pm_iir)
1746 I915_WRITE(GEN6_PMIIR, pm_iir);
1747
1748 iir = I915_READ(VLV_IIR);
1749 if (iir) {
1750 /* Consume port before clearing IIR or we'll miss events */
1751 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1752 i9xx_hpd_irq_handler(dev);
1753 I915_WRITE(VLV_IIR, iir);
1754 }
7e231dbe
JB
1755
1756 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1757 goto out;
1758
1759 ret = IRQ_HANDLED;
1760
3ff60f89
OM
1761 if (gt_iir)
1762 snb_gt_irq_handler(dev, dev_priv, gt_iir);
60611c13 1763 if (pm_iir)
d0ecd7e2 1764 gen6_rps_irq_handler(dev_priv, pm_iir);
3ff60f89
OM
1765 /* Call regardless, as some status bits might not be
1766 * signalled in iir */
1767 valleyview_pipestat_irq_handler(dev, iir);
7e231dbe
JB
1768 }
1769
1770out:
1771 return ret;
1772}
1773
43f328d7
VS
1774static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1775{
45a83f84 1776 struct drm_device *dev = arg;
43f328d7
VS
1777 struct drm_i915_private *dev_priv = dev->dev_private;
1778 u32 master_ctl, iir;
1779 irqreturn_t ret = IRQ_NONE;
43f328d7 1780
2dd2a883
ID
1781 if (!intel_irqs_enabled(dev_priv))
1782 return IRQ_NONE;
1783
8e5fd599
VS
1784 for (;;) {
1785 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1786 iir = I915_READ(VLV_IIR);
43f328d7 1787
8e5fd599
VS
1788 if (master_ctl == 0 && iir == 0)
1789 break;
43f328d7 1790
27b6c122
OM
1791 ret = IRQ_HANDLED;
1792
8e5fd599 1793 I915_WRITE(GEN8_MASTER_IRQ, 0);
43f328d7 1794
27b6c122 1795 /* Find, clear, then process each source of interrupt */
43f328d7 1796
27b6c122
OM
1797 if (iir) {
1798 /* Consume port before clearing IIR or we'll miss events */
1799 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1800 i9xx_hpd_irq_handler(dev);
1801 I915_WRITE(VLV_IIR, iir);
1802 }
43f328d7 1803
74cdb337 1804 gen8_gt_irq_handler(dev_priv, master_ctl);
43f328d7 1805
27b6c122
OM
1806 /* Call regardless, as some status bits might not be
1807 * signalled in iir */
1808 valleyview_pipestat_irq_handler(dev, iir);
43f328d7 1809
8e5fd599
VS
1810 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
1811 POSTING_READ(GEN8_MASTER_IRQ);
8e5fd599 1812 }
3278f67f 1813
43f328d7
VS
1814 return ret;
1815}
1816
40e56410
VS
1817static void ibx_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
1818 const u32 hpd[HPD_NUM_PINS])
1819{
1820 struct drm_i915_private *dev_priv = to_i915(dev);
1821 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1822
1823 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1824 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1825
1826 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1827 dig_hotplug_reg, hpd,
1828 pch_port_hotplug_long_detect);
1829
1830 intel_hpd_irq_handler(dev, pin_mask, long_mask);
1831}
1832
23e81d69 1833static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
776ad806 1834{
2d1013dd 1835 struct drm_i915_private *dev_priv = dev->dev_private;
9db4a9c7 1836 int pipe;
b543fb04 1837 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
13cf5504 1838
40e56410
VS
1839 if (hotplug_trigger)
1840 ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
91d131d2 1841
cfc33bf7
VS
1842 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1843 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1844 SDE_AUDIO_POWER_SHIFT);
776ad806 1845 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
cfc33bf7
VS
1846 port_name(port));
1847 }
776ad806 1848
ce99c256
DV
1849 if (pch_iir & SDE_AUX_MASK)
1850 dp_aux_irq_handler(dev);
1851
776ad806 1852 if (pch_iir & SDE_GMBUS)
515ac2bb 1853 gmbus_irq_handler(dev);
776ad806
JB
1854
1855 if (pch_iir & SDE_AUDIO_HDCP_MASK)
1856 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1857
1858 if (pch_iir & SDE_AUDIO_TRANS_MASK)
1859 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1860
1861 if (pch_iir & SDE_POISON)
1862 DRM_ERROR("PCH poison interrupt\n");
1863
9db4a9c7 1864 if (pch_iir & SDE_FDI_MASK)
055e393f 1865 for_each_pipe(dev_priv, pipe)
9db4a9c7
JB
1866 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1867 pipe_name(pipe),
1868 I915_READ(FDI_RX_IIR(pipe)));
776ad806
JB
1869
1870 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1871 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1872
1873 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1874 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1875
776ad806 1876 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1f7247c0 1877 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
8664281b
PZ
1878
1879 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1f7247c0 1880 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
8664281b
PZ
1881}
1882
1883static void ivb_err_int_handler(struct drm_device *dev)
1884{
1885 struct drm_i915_private *dev_priv = dev->dev_private;
1886 u32 err_int = I915_READ(GEN7_ERR_INT);
5a69b89f 1887 enum pipe pipe;
8664281b 1888
de032bf4
PZ
1889 if (err_int & ERR_INT_POISON)
1890 DRM_ERROR("Poison interrupt\n");
1891
055e393f 1892 for_each_pipe(dev_priv, pipe) {
1f7247c0
DV
1893 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1894 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
8bf1e9f1 1895
5a69b89f
DV
1896 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1897 if (IS_IVYBRIDGE(dev))
277de95e 1898 ivb_pipe_crc_irq_handler(dev, pipe);
5a69b89f 1899 else
277de95e 1900 hsw_pipe_crc_irq_handler(dev, pipe);
5a69b89f
DV
1901 }
1902 }
8bf1e9f1 1903
8664281b
PZ
1904 I915_WRITE(GEN7_ERR_INT, err_int);
1905}
1906
1907static void cpt_serr_int_handler(struct drm_device *dev)
1908{
1909 struct drm_i915_private *dev_priv = dev->dev_private;
1910 u32 serr_int = I915_READ(SERR_INT);
1911
de032bf4
PZ
1912 if (serr_int & SERR_INT_POISON)
1913 DRM_ERROR("PCH poison interrupt\n");
1914
8664281b 1915 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
1f7247c0 1916 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
8664281b
PZ
1917
1918 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
1f7247c0 1919 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
8664281b
PZ
1920
1921 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
1f7247c0 1922 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
8664281b
PZ
1923
1924 I915_WRITE(SERR_INT, serr_int);
776ad806
JB
1925}
1926
23e81d69
AJ
1927static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1928{
2d1013dd 1929 struct drm_i915_private *dev_priv = dev->dev_private;
23e81d69 1930 int pipe;
6dbf30ce 1931 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
13cf5504 1932
40e56410
VS
1933 if (hotplug_trigger)
1934 ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
91d131d2 1935
cfc33bf7
VS
1936 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1937 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1938 SDE_AUDIO_POWER_SHIFT_CPT);
1939 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1940 port_name(port));
1941 }
23e81d69
AJ
1942
1943 if (pch_iir & SDE_AUX_MASK_CPT)
ce99c256 1944 dp_aux_irq_handler(dev);
23e81d69
AJ
1945
1946 if (pch_iir & SDE_GMBUS_CPT)
515ac2bb 1947 gmbus_irq_handler(dev);
23e81d69
AJ
1948
1949 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1950 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
1951
1952 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1953 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
1954
1955 if (pch_iir & SDE_FDI_MASK_CPT)
055e393f 1956 for_each_pipe(dev_priv, pipe)
23e81d69
AJ
1957 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1958 pipe_name(pipe),
1959 I915_READ(FDI_RX_IIR(pipe)));
8664281b
PZ
1960
1961 if (pch_iir & SDE_ERROR_CPT)
1962 cpt_serr_int_handler(dev);
23e81d69
AJ
1963}
1964
6dbf30ce
VS
1965static void spt_irq_handler(struct drm_device *dev, u32 pch_iir)
1966{
1967 struct drm_i915_private *dev_priv = dev->dev_private;
1968 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
1969 ~SDE_PORTE_HOTPLUG_SPT;
1970 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
1971 u32 pin_mask = 0, long_mask = 0;
1972
1973 if (hotplug_trigger) {
1974 u32 dig_hotplug_reg;
1975
1976 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1977 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1978
1979 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1980 dig_hotplug_reg, hpd_spt,
74c0b395 1981 spt_port_hotplug_long_detect);
6dbf30ce
VS
1982 }
1983
1984 if (hotplug2_trigger) {
1985 u32 dig_hotplug_reg;
1986
1987 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
1988 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
1989
1990 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger,
1991 dig_hotplug_reg, hpd_spt,
1992 spt_port_hotplug2_long_detect);
1993 }
1994
1995 if (pin_mask)
1996 intel_hpd_irq_handler(dev, pin_mask, long_mask);
1997
1998 if (pch_iir & SDE_GMBUS_CPT)
1999 gmbus_irq_handler(dev);
2000}
2001
40e56410
VS
2002static void ilk_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
2003 const u32 hpd[HPD_NUM_PINS])
2004{
2005 struct drm_i915_private *dev_priv = to_i915(dev);
2006 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2007
2008 dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
2009 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
2010
2011 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2012 dig_hotplug_reg, hpd,
2013 ilk_port_hotplug_long_detect);
2014
2015 intel_hpd_irq_handler(dev, pin_mask, long_mask);
2016}
2017
c008bc6e
PZ
2018static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
2019{
2020 struct drm_i915_private *dev_priv = dev->dev_private;
40da17c2 2021 enum pipe pipe;
e4ce95aa
VS
2022 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
2023
40e56410
VS
2024 if (hotplug_trigger)
2025 ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ilk);
c008bc6e
PZ
2026
2027 if (de_iir & DE_AUX_CHANNEL_A)
2028 dp_aux_irq_handler(dev);
2029
2030 if (de_iir & DE_GSE)
2031 intel_opregion_asle_intr(dev);
2032
c008bc6e
PZ
2033 if (de_iir & DE_POISON)
2034 DRM_ERROR("Poison interrupt\n");
2035
055e393f 2036 for_each_pipe(dev_priv, pipe) {
d6bbafa1
CW
2037 if (de_iir & DE_PIPE_VBLANK(pipe) &&
2038 intel_pipe_handle_vblank(dev, pipe))
2039 intel_check_page_flip(dev, pipe);
5b3a856b 2040
40da17c2 2041 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
1f7247c0 2042 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
5b3a856b 2043
40da17c2
DV
2044 if (de_iir & DE_PIPE_CRC_DONE(pipe))
2045 i9xx_pipe_crc_irq_handler(dev, pipe);
c008bc6e 2046
40da17c2
DV
2047 /* plane/pipes map 1:1 on ilk+ */
2048 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
2049 intel_prepare_page_flip(dev, pipe);
2050 intel_finish_page_flip_plane(dev, pipe);
2051 }
c008bc6e
PZ
2052 }
2053
2054 /* check event from PCH */
2055 if (de_iir & DE_PCH_EVENT) {
2056 u32 pch_iir = I915_READ(SDEIIR);
2057
2058 if (HAS_PCH_CPT(dev))
2059 cpt_irq_handler(dev, pch_iir);
2060 else
2061 ibx_irq_handler(dev, pch_iir);
2062
2063 /* should clear PCH hotplug event before clear CPU irq */
2064 I915_WRITE(SDEIIR, pch_iir);
2065 }
2066
2067 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
2068 ironlake_rps_change_irq_handler(dev);
2069}
2070
9719fb98
PZ
2071static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
2072{
2073 struct drm_i915_private *dev_priv = dev->dev_private;
07d27e20 2074 enum pipe pipe;
23bb4cb5
VS
2075 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2076
40e56410
VS
2077 if (hotplug_trigger)
2078 ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ivb);
9719fb98
PZ
2079
2080 if (de_iir & DE_ERR_INT_IVB)
2081 ivb_err_int_handler(dev);
2082
2083 if (de_iir & DE_AUX_CHANNEL_A_IVB)
2084 dp_aux_irq_handler(dev);
2085
2086 if (de_iir & DE_GSE_IVB)
2087 intel_opregion_asle_intr(dev);
2088
055e393f 2089 for_each_pipe(dev_priv, pipe) {
d6bbafa1
CW
2090 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
2091 intel_pipe_handle_vblank(dev, pipe))
2092 intel_check_page_flip(dev, pipe);
40da17c2
DV
2093
2094 /* plane/pipes map 1:1 on ilk+ */
07d27e20
DL
2095 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
2096 intel_prepare_page_flip(dev, pipe);
2097 intel_finish_page_flip_plane(dev, pipe);
9719fb98
PZ
2098 }
2099 }
2100
2101 /* check event from PCH */
2102 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
2103 u32 pch_iir = I915_READ(SDEIIR);
2104
2105 cpt_irq_handler(dev, pch_iir);
2106
2107 /* clear PCH hotplug event before clear CPU irq */
2108 I915_WRITE(SDEIIR, pch_iir);
2109 }
2110}
2111
72c90f62
OM
2112/*
2113 * To handle irqs with the minimum potential races with fresh interrupts, we:
2114 * 1 - Disable Master Interrupt Control.
2115 * 2 - Find the source(s) of the interrupt.
2116 * 3 - Clear the Interrupt Identity bits (IIR).
2117 * 4 - Process the interrupt(s) that had bits set in the IIRs.
2118 * 5 - Re-enable Master Interrupt Control.
2119 */
f1af8fc1 2120static irqreturn_t ironlake_irq_handler(int irq, void *arg)
b1f14ad0 2121{
45a83f84 2122 struct drm_device *dev = arg;
2d1013dd 2123 struct drm_i915_private *dev_priv = dev->dev_private;
f1af8fc1 2124 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
0e43406b 2125 irqreturn_t ret = IRQ_NONE;
b1f14ad0 2126
2dd2a883
ID
2127 if (!intel_irqs_enabled(dev_priv))
2128 return IRQ_NONE;
2129
8664281b
PZ
2130 /* We get interrupts on unclaimed registers, so check for this before we
2131 * do any I915_{READ,WRITE}. */
907b28c5 2132 intel_uncore_check_errors(dev);
8664281b 2133
b1f14ad0
JB
2134 /* disable master interrupt before clearing iir */
2135 de_ier = I915_READ(DEIER);
2136 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
23a78516 2137 POSTING_READ(DEIER);
b1f14ad0 2138
44498aea
PZ
2139 /* Disable south interrupts. We'll only write to SDEIIR once, so further
2140 * interrupts will will be stored on its back queue, and then we'll be
2141 * able to process them after we restore SDEIER (as soon as we restore
2142 * it, we'll get an interrupt if SDEIIR still has something to process
2143 * due to its back queue). */
ab5c608b
BW
2144 if (!HAS_PCH_NOP(dev)) {
2145 sde_ier = I915_READ(SDEIER);
2146 I915_WRITE(SDEIER, 0);
2147 POSTING_READ(SDEIER);
2148 }
44498aea 2149
72c90f62
OM
2150 /* Find, clear, then process each source of interrupt */
2151
b1f14ad0 2152 gt_iir = I915_READ(GTIIR);
0e43406b 2153 if (gt_iir) {
72c90f62
OM
2154 I915_WRITE(GTIIR, gt_iir);
2155 ret = IRQ_HANDLED;
d8fc8a47 2156 if (INTEL_INFO(dev)->gen >= 6)
f1af8fc1 2157 snb_gt_irq_handler(dev, dev_priv, gt_iir);
d8fc8a47
PZ
2158 else
2159 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
b1f14ad0
JB
2160 }
2161
0e43406b
CW
2162 de_iir = I915_READ(DEIIR);
2163 if (de_iir) {
72c90f62
OM
2164 I915_WRITE(DEIIR, de_iir);
2165 ret = IRQ_HANDLED;
f1af8fc1
PZ
2166 if (INTEL_INFO(dev)->gen >= 7)
2167 ivb_display_irq_handler(dev, de_iir);
2168 else
2169 ilk_display_irq_handler(dev, de_iir);
b1f14ad0
JB
2170 }
2171
f1af8fc1
PZ
2172 if (INTEL_INFO(dev)->gen >= 6) {
2173 u32 pm_iir = I915_READ(GEN6_PMIIR);
2174 if (pm_iir) {
f1af8fc1
PZ
2175 I915_WRITE(GEN6_PMIIR, pm_iir);
2176 ret = IRQ_HANDLED;
72c90f62 2177 gen6_rps_irq_handler(dev_priv, pm_iir);
f1af8fc1 2178 }
0e43406b 2179 }
b1f14ad0 2180
b1f14ad0
JB
2181 I915_WRITE(DEIER, de_ier);
2182 POSTING_READ(DEIER);
ab5c608b
BW
2183 if (!HAS_PCH_NOP(dev)) {
2184 I915_WRITE(SDEIER, sde_ier);
2185 POSTING_READ(SDEIER);
2186 }
b1f14ad0
JB
2187
2188 return ret;
2189}
2190
40e56410
VS
2191static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
2192 const u32 hpd[HPD_NUM_PINS])
d04a492d 2193{
cebd87a0
VS
2194 struct drm_i915_private *dev_priv = to_i915(dev);
2195 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
d04a492d 2196
a52bb15b
VS
2197 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2198 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
d04a492d 2199
cebd87a0 2200 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
40e56410 2201 dig_hotplug_reg, hpd,
cebd87a0 2202 bxt_port_hotplug_long_detect);
40e56410 2203
676574df 2204 intel_hpd_irq_handler(dev, pin_mask, long_mask);
d04a492d
SS
2205}
2206
abd58f01
BW
2207static irqreturn_t gen8_irq_handler(int irq, void *arg)
2208{
2209 struct drm_device *dev = arg;
2210 struct drm_i915_private *dev_priv = dev->dev_private;
2211 u32 master_ctl;
2212 irqreturn_t ret = IRQ_NONE;
2213 uint32_t tmp = 0;
c42664cc 2214 enum pipe pipe;
88e04703
JB
2215 u32 aux_mask = GEN8_AUX_CHANNEL_A;
2216
2dd2a883
ID
2217 if (!intel_irqs_enabled(dev_priv))
2218 return IRQ_NONE;
2219
b4834a50 2220 if (INTEL_INFO(dev_priv)->gen >= 9)
88e04703
JB
2221 aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
2222 GEN9_AUX_CHANNEL_D;
abd58f01 2223
cb0d205e 2224 master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
abd58f01
BW
2225 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2226 if (!master_ctl)
2227 return IRQ_NONE;
2228
cb0d205e 2229 I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
abd58f01 2230
38cc46d7
OM
2231 /* Find, clear, then process each source of interrupt */
2232
74cdb337 2233 ret = gen8_gt_irq_handler(dev_priv, master_ctl);
abd58f01
BW
2234
2235 if (master_ctl & GEN8_DE_MISC_IRQ) {
2236 tmp = I915_READ(GEN8_DE_MISC_IIR);
abd58f01
BW
2237 if (tmp) {
2238 I915_WRITE(GEN8_DE_MISC_IIR, tmp);
2239 ret = IRQ_HANDLED;
38cc46d7
OM
2240 if (tmp & GEN8_DE_MISC_GSE)
2241 intel_opregion_asle_intr(dev);
2242 else
2243 DRM_ERROR("Unexpected DE Misc interrupt\n");
abd58f01 2244 }
38cc46d7
OM
2245 else
2246 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
abd58f01
BW
2247 }
2248
6d766f02
DV
2249 if (master_ctl & GEN8_DE_PORT_IRQ) {
2250 tmp = I915_READ(GEN8_DE_PORT_IIR);
6d766f02 2251 if (tmp) {
d04a492d 2252 bool found = false;
cebd87a0
VS
2253 u32 hotplug_trigger = 0;
2254
2255 if (IS_BROXTON(dev_priv))
2256 hotplug_trigger = tmp & BXT_DE_PORT_HOTPLUG_MASK;
2257 else if (IS_BROADWELL(dev_priv))
2258 hotplug_trigger = tmp & GEN8_PORT_DP_A_HOTPLUG;
d04a492d 2259
6d766f02
DV
2260 I915_WRITE(GEN8_DE_PORT_IIR, tmp);
2261 ret = IRQ_HANDLED;
88e04703 2262
d04a492d 2263 if (tmp & aux_mask) {
38cc46d7 2264 dp_aux_irq_handler(dev);
d04a492d
SS
2265 found = true;
2266 }
2267
40e56410
VS
2268 if (hotplug_trigger) {
2269 if (IS_BROXTON(dev))
2270 bxt_hpd_irq_handler(dev, hotplug_trigger, hpd_bxt);
2271 else
2272 ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_bdw);
d04a492d
SS
2273 found = true;
2274 }
2275
9e63743e
SS
2276 if (IS_BROXTON(dev) && (tmp & BXT_DE_PORT_GMBUS)) {
2277 gmbus_irq_handler(dev);
2278 found = true;
2279 }
2280
d04a492d 2281 if (!found)
38cc46d7 2282 DRM_ERROR("Unexpected DE Port interrupt\n");
6d766f02 2283 }
38cc46d7
OM
2284 else
2285 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
6d766f02
DV
2286 }
2287
055e393f 2288 for_each_pipe(dev_priv, pipe) {
770de83d 2289 uint32_t pipe_iir, flip_done = 0, fault_errors = 0;
abd58f01 2290
c42664cc
DV
2291 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2292 continue;
abd58f01 2293
c42664cc 2294 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
c42664cc
DV
2295 if (pipe_iir) {
2296 ret = IRQ_HANDLED;
2297 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
770de83d 2298
d6bbafa1
CW
2299 if (pipe_iir & GEN8_PIPE_VBLANK &&
2300 intel_pipe_handle_vblank(dev, pipe))
2301 intel_check_page_flip(dev, pipe);
38cc46d7 2302
b4834a50 2303 if (INTEL_INFO(dev_priv)->gen >= 9)
770de83d
DL
2304 flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
2305 else
2306 flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
2307
2308 if (flip_done) {
38cc46d7
OM
2309 intel_prepare_page_flip(dev, pipe);
2310 intel_finish_page_flip_plane(dev, pipe);
2311 }
2312
2313 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
2314 hsw_pipe_crc_irq_handler(dev, pipe);
2315
1f7247c0
DV
2316 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN)
2317 intel_cpu_fifo_underrun_irq_handler(dev_priv,
2318 pipe);
38cc46d7 2319
770de83d 2320
b4834a50 2321 if (INTEL_INFO(dev_priv)->gen >= 9)
770de83d
DL
2322 fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2323 else
2324 fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2325
2326 if (fault_errors)
38cc46d7
OM
2327 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2328 pipe_name(pipe),
2329 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
c42664cc 2330 } else
abd58f01
BW
2331 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2332 }
2333
266ea3d9
SS
2334 if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) &&
2335 master_ctl & GEN8_DE_PCH_IRQ) {
92d03a80
DV
2336 /*
2337 * FIXME(BDW): Assume for now that the new interrupt handling
2338 * scheme also closed the SDE interrupt handling race we've seen
2339 * on older pch-split platforms. But this needs testing.
2340 */
2341 u32 pch_iir = I915_READ(SDEIIR);
92d03a80
DV
2342 if (pch_iir) {
2343 I915_WRITE(SDEIIR, pch_iir);
2344 ret = IRQ_HANDLED;
6dbf30ce
VS
2345
2346 if (HAS_PCH_SPT(dev_priv))
2347 spt_irq_handler(dev, pch_iir);
2348 else
2349 cpt_irq_handler(dev, pch_iir);
38cc46d7
OM
2350 } else
2351 DRM_ERROR("The master control interrupt lied (SDE)!\n");
2352
92d03a80
DV
2353 }
2354
cb0d205e
CW
2355 I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2356 POSTING_READ_FW(GEN8_MASTER_IRQ);
abd58f01
BW
2357
2358 return ret;
2359}
2360
17e1df07
DV
2361static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2362 bool reset_completed)
2363{
a4872ba6 2364 struct intel_engine_cs *ring;
17e1df07
DV
2365 int i;
2366
2367 /*
2368 * Notify all waiters for GPU completion events that reset state has
2369 * been changed, and that they need to restart their wait after
2370 * checking for potential errors (and bail out to drop locks if there is
2371 * a gpu reset pending so that i915_error_work_func can acquire them).
2372 */
2373
2374 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2375 for_each_ring(ring, dev_priv, i)
2376 wake_up_all(&ring->irq_queue);
2377
2378 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2379 wake_up_all(&dev_priv->pending_flip_queue);
2380
2381 /*
2382 * Signal tasks blocked in i915_gem_wait_for_error that the pending
2383 * reset state is cleared.
2384 */
2385 if (reset_completed)
2386 wake_up_all(&dev_priv->gpu_error.reset_queue);
2387}
2388
8a905236 2389/**
b8d24a06 2390 * i915_reset_and_wakeup - do process context error handling work
8a905236
JB
2391 *
2392 * Fire an error uevent so userspace can see that a hang or error
2393 * was detected.
2394 */
b8d24a06 2395static void i915_reset_and_wakeup(struct drm_device *dev)
8a905236 2396{
b8d24a06
MK
2397 struct drm_i915_private *dev_priv = to_i915(dev);
2398 struct i915_gpu_error *error = &dev_priv->gpu_error;
cce723ed
BW
2399 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2400 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2401 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
17e1df07 2402 int ret;
8a905236 2403
5bdebb18 2404 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
f316a42c 2405
7db0ba24
DV
2406 /*
2407 * Note that there's only one work item which does gpu resets, so we
2408 * need not worry about concurrent gpu resets potentially incrementing
2409 * error->reset_counter twice. We only need to take care of another
2410 * racing irq/hangcheck declaring the gpu dead for a second time. A
2411 * quick check for that is good enough: schedule_work ensures the
2412 * correct ordering between hang detection and this work item, and since
2413 * the reset in-progress bit is only ever set by code outside of this
2414 * work we don't need to worry about any other races.
2415 */
2416 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
f803aa55 2417 DRM_DEBUG_DRIVER("resetting chip\n");
5bdebb18 2418 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
7db0ba24 2419 reset_event);
1f83fee0 2420
f454c694
ID
2421 /*
2422 * In most cases it's guaranteed that we get here with an RPM
2423 * reference held, for example because there is a pending GPU
2424 * request that won't finish until the reset is done. This
2425 * isn't the case at least when we get here by doing a
2426 * simulated reset via debugs, so get an RPM reference.
2427 */
2428 intel_runtime_pm_get(dev_priv);
7514747d
VS
2429
2430 intel_prepare_reset(dev);
2431
17e1df07
DV
2432 /*
2433 * All state reset _must_ be completed before we update the
2434 * reset counter, for otherwise waiters might miss the reset
2435 * pending state and not properly drop locks, resulting in
2436 * deadlocks with the reset work.
2437 */
f69061be
DV
2438 ret = i915_reset(dev);
2439
7514747d 2440 intel_finish_reset(dev);
17e1df07 2441
f454c694
ID
2442 intel_runtime_pm_put(dev_priv);
2443
f69061be
DV
2444 if (ret == 0) {
2445 /*
2446 * After all the gem state is reset, increment the reset
2447 * counter and wake up everyone waiting for the reset to
2448 * complete.
2449 *
2450 * Since unlock operations are a one-sided barrier only,
2451 * we need to insert a barrier here to order any seqno
2452 * updates before
2453 * the counter increment.
2454 */
4e857c58 2455 smp_mb__before_atomic();
f69061be
DV
2456 atomic_inc(&dev_priv->gpu_error.reset_counter);
2457
5bdebb18 2458 kobject_uevent_env(&dev->primary->kdev->kobj,
f69061be 2459 KOBJ_CHANGE, reset_done_event);
1f83fee0 2460 } else {
805de8f4 2461 atomic_or(I915_WEDGED, &error->reset_counter);
f316a42c 2462 }
1f83fee0 2463
17e1df07
DV
2464 /*
2465 * Note: The wake_up also serves as a memory barrier so that
2466 * waiters see the update value of the reset counter atomic_t.
2467 */
2468 i915_error_wake_up(dev_priv, true);
f316a42c 2469 }
8a905236
JB
2470}
2471
35aed2e6 2472static void i915_report_and_clear_eir(struct drm_device *dev)
8a905236
JB
2473{
2474 struct drm_i915_private *dev_priv = dev->dev_private;
bd9854f9 2475 uint32_t instdone[I915_NUM_INSTDONE_REG];
8a905236 2476 u32 eir = I915_READ(EIR);
050ee91f 2477 int pipe, i;
8a905236 2478
35aed2e6
CW
2479 if (!eir)
2480 return;
8a905236 2481
a70491cc 2482 pr_err("render error detected, EIR: 0x%08x\n", eir);
8a905236 2483
bd9854f9
BW
2484 i915_get_extra_instdone(dev, instdone);
2485
8a905236
JB
2486 if (IS_G4X(dev)) {
2487 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2488 u32 ipeir = I915_READ(IPEIR_I965);
2489
a70491cc
JP
2490 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2491 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
050ee91f
BW
2492 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2493 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
a70491cc 2494 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
a70491cc 2495 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
8a905236 2496 I915_WRITE(IPEIR_I965, ipeir);
3143a2bf 2497 POSTING_READ(IPEIR_I965);
8a905236
JB
2498 }
2499 if (eir & GM45_ERROR_PAGE_TABLE) {
2500 u32 pgtbl_err = I915_READ(PGTBL_ER);
a70491cc
JP
2501 pr_err("page table error\n");
2502 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
8a905236 2503 I915_WRITE(PGTBL_ER, pgtbl_err);
3143a2bf 2504 POSTING_READ(PGTBL_ER);
8a905236
JB
2505 }
2506 }
2507
a6c45cf0 2508 if (!IS_GEN2(dev)) {
8a905236
JB
2509 if (eir & I915_ERROR_PAGE_TABLE) {
2510 u32 pgtbl_err = I915_READ(PGTBL_ER);
a70491cc
JP
2511 pr_err("page table error\n");
2512 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
8a905236 2513 I915_WRITE(PGTBL_ER, pgtbl_err);
3143a2bf 2514 POSTING_READ(PGTBL_ER);
8a905236
JB
2515 }
2516 }
2517
2518 if (eir & I915_ERROR_MEMORY_REFRESH) {
a70491cc 2519 pr_err("memory refresh error:\n");
055e393f 2520 for_each_pipe(dev_priv, pipe)
a70491cc 2521 pr_err("pipe %c stat: 0x%08x\n",
9db4a9c7 2522 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
8a905236
JB
2523 /* pipestat has already been acked */
2524 }
2525 if (eir & I915_ERROR_INSTRUCTION) {
a70491cc
JP
2526 pr_err("instruction error\n");
2527 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
050ee91f
BW
2528 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2529 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
a6c45cf0 2530 if (INTEL_INFO(dev)->gen < 4) {
8a905236
JB
2531 u32 ipeir = I915_READ(IPEIR);
2532
a70491cc
JP
2533 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
2534 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
a70491cc 2535 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
8a905236 2536 I915_WRITE(IPEIR, ipeir);
3143a2bf 2537 POSTING_READ(IPEIR);
8a905236
JB
2538 } else {
2539 u32 ipeir = I915_READ(IPEIR_I965);
2540
a70491cc
JP
2541 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2542 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
a70491cc 2543 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
a70491cc 2544 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
8a905236 2545 I915_WRITE(IPEIR_I965, ipeir);
3143a2bf 2546 POSTING_READ(IPEIR_I965);
8a905236
JB
2547 }
2548 }
2549
2550 I915_WRITE(EIR, eir);
3143a2bf 2551 POSTING_READ(EIR);
8a905236
JB
2552 eir = I915_READ(EIR);
2553 if (eir) {
2554 /*
2555 * some errors might have become stuck,
2556 * mask them.
2557 */
2558 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2559 I915_WRITE(EMR, I915_READ(EMR) | eir);
2560 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2561 }
35aed2e6
CW
2562}
2563
2564/**
b8d24a06 2565 * i915_handle_error - handle a gpu error
35aed2e6
CW
2566 * @dev: drm device
2567 *
b8d24a06 2568 * Do some basic checking of regsiter state at error time and
35aed2e6
CW
2569 * dump it to the syslog. Also call i915_capture_error_state() to make
2570 * sure we get a record and make it available in debugfs. Fire a uevent
2571 * so userspace knows something bad happened (should trigger collection
2572 * of a ring dump etc.).
2573 */
58174462
MK
2574void i915_handle_error(struct drm_device *dev, bool wedged,
2575 const char *fmt, ...)
35aed2e6
CW
2576{
2577 struct drm_i915_private *dev_priv = dev->dev_private;
58174462
MK
2578 va_list args;
2579 char error_msg[80];
35aed2e6 2580
58174462
MK
2581 va_start(args, fmt);
2582 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2583 va_end(args);
2584
2585 i915_capture_error_state(dev, wedged, error_msg);
35aed2e6 2586 i915_report_and_clear_eir(dev);
8a905236 2587
ba1234d1 2588 if (wedged) {
805de8f4 2589 atomic_or(I915_RESET_IN_PROGRESS_FLAG,
f69061be 2590 &dev_priv->gpu_error.reset_counter);
ba1234d1 2591
11ed50ec 2592 /*
b8d24a06
MK
2593 * Wakeup waiting processes so that the reset function
2594 * i915_reset_and_wakeup doesn't deadlock trying to grab
2595 * various locks. By bumping the reset counter first, the woken
17e1df07
DV
2596 * processes will see a reset in progress and back off,
2597 * releasing their locks and then wait for the reset completion.
2598 * We must do this for _all_ gpu waiters that might hold locks
2599 * that the reset work needs to acquire.
2600 *
2601 * Note: The wake_up serves as the required memory barrier to
2602 * ensure that the waiters see the updated value of the reset
2603 * counter atomic_t.
11ed50ec 2604 */
17e1df07 2605 i915_error_wake_up(dev_priv, false);
11ed50ec
BG
2606 }
2607
b8d24a06 2608 i915_reset_and_wakeup(dev);
8a905236
JB
2609}
2610
42f52ef8
KP
2611/* Called from drm generic code, passed 'crtc' which
2612 * we use as a pipe index
2613 */
f71d4af4 2614static int i915_enable_vblank(struct drm_device *dev, int pipe)
0a3e67a4 2615{
2d1013dd 2616 struct drm_i915_private *dev_priv = dev->dev_private;
e9d21d7f 2617 unsigned long irqflags;
71e0ffa5 2618
1ec14ad3 2619 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
f796cf8f 2620 if (INTEL_INFO(dev)->gen >= 4)
7c463586 2621 i915_enable_pipestat(dev_priv, pipe,
755e9019 2622 PIPE_START_VBLANK_INTERRUPT_STATUS);
e9d21d7f 2623 else
7c463586 2624 i915_enable_pipestat(dev_priv, pipe,
755e9019 2625 PIPE_VBLANK_INTERRUPT_STATUS);
1ec14ad3 2626 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
8692d00e 2627
0a3e67a4
JB
2628 return 0;
2629}
2630
f71d4af4 2631static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
f796cf8f 2632{
2d1013dd 2633 struct drm_i915_private *dev_priv = dev->dev_private;
f796cf8f 2634 unsigned long irqflags;
b518421f 2635 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
40da17c2 2636 DE_PIPE_VBLANK(pipe);
f796cf8f 2637
f796cf8f 2638 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
b518421f 2639 ironlake_enable_display_irq(dev_priv, bit);
b1f14ad0
JB
2640 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2641
2642 return 0;
2643}
2644
7e231dbe
JB
2645static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2646{
2d1013dd 2647 struct drm_i915_private *dev_priv = dev->dev_private;
7e231dbe 2648 unsigned long irqflags;
7e231dbe 2649
7e231dbe 2650 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
31acc7f5 2651 i915_enable_pipestat(dev_priv, pipe,
755e9019 2652 PIPE_START_VBLANK_INTERRUPT_STATUS);
7e231dbe
JB
2653 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2654
2655 return 0;
2656}
2657
abd58f01
BW
2658static int gen8_enable_vblank(struct drm_device *dev, int pipe)
2659{
2660 struct drm_i915_private *dev_priv = dev->dev_private;
2661 unsigned long irqflags;
abd58f01 2662
abd58f01 2663 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
7167d7c6
DV
2664 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
2665 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2666 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
abd58f01
BW
2667 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2668 return 0;
2669}
2670
42f52ef8
KP
2671/* Called from drm generic code, passed 'crtc' which
2672 * we use as a pipe index
2673 */
f71d4af4 2674static void i915_disable_vblank(struct drm_device *dev, int pipe)
0a3e67a4 2675{
2d1013dd 2676 struct drm_i915_private *dev_priv = dev->dev_private;
e9d21d7f 2677 unsigned long irqflags;
0a3e67a4 2678
1ec14ad3 2679 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
f796cf8f 2680 i915_disable_pipestat(dev_priv, pipe,
755e9019
ID
2681 PIPE_VBLANK_INTERRUPT_STATUS |
2682 PIPE_START_VBLANK_INTERRUPT_STATUS);
f796cf8f
JB
2683 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2684}
2685
f71d4af4 2686static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
f796cf8f 2687{
2d1013dd 2688 struct drm_i915_private *dev_priv = dev->dev_private;
f796cf8f 2689 unsigned long irqflags;
b518421f 2690 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
40da17c2 2691 DE_PIPE_VBLANK(pipe);
f796cf8f
JB
2692
2693 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
b518421f 2694 ironlake_disable_display_irq(dev_priv, bit);
b1f14ad0
JB
2695 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2696}
2697
7e231dbe
JB
2698static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
2699{
2d1013dd 2700 struct drm_i915_private *dev_priv = dev->dev_private;
7e231dbe 2701 unsigned long irqflags;
7e231dbe
JB
2702
2703 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
31acc7f5 2704 i915_disable_pipestat(dev_priv, pipe,
755e9019 2705 PIPE_START_VBLANK_INTERRUPT_STATUS);
7e231dbe
JB
2706 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2707}
2708
abd58f01
BW
2709static void gen8_disable_vblank(struct drm_device *dev, int pipe)
2710{
2711 struct drm_i915_private *dev_priv = dev->dev_private;
2712 unsigned long irqflags;
abd58f01 2713
abd58f01 2714 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
7167d7c6
DV
2715 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
2716 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2717 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
abd58f01
BW
2718 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2719}
2720
9107e9d2 2721static bool
94f7bbe1 2722ring_idle(struct intel_engine_cs *ring, u32 seqno)
9107e9d2
CW
2723{
2724 return (list_empty(&ring->request_list) ||
94f7bbe1 2725 i915_seqno_passed(seqno, ring->last_submitted_seqno));
f65d9421
BG
2726}
2727
a028c4b0
DV
2728static bool
2729ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
2730{
2731 if (INTEL_INFO(dev)->gen >= 8) {
a6cdb93a 2732 return (ipehr >> 23) == 0x1c;
a028c4b0
DV
2733 } else {
2734 ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
2735 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
2736 MI_SEMAPHORE_REGISTER);
2737 }
2738}
2739
a4872ba6 2740static struct intel_engine_cs *
a6cdb93a 2741semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
921d42ea
DV
2742{
2743 struct drm_i915_private *dev_priv = ring->dev->dev_private;
a4872ba6 2744 struct intel_engine_cs *signaller;
921d42ea
DV
2745 int i;
2746
2747 if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
a6cdb93a
RV
2748 for_each_ring(signaller, dev_priv, i) {
2749 if (ring == signaller)
2750 continue;
2751
2752 if (offset == signaller->semaphore.signal_ggtt[ring->id])
2753 return signaller;
2754 }
921d42ea
DV
2755 } else {
2756 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
2757
2758 for_each_ring(signaller, dev_priv, i) {
2759 if(ring == signaller)
2760 continue;
2761
ebc348b2 2762 if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
921d42ea
DV
2763 return signaller;
2764 }
2765 }
2766
a6cdb93a
RV
2767 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
2768 ring->id, ipehr, offset);
921d42ea
DV
2769
2770 return NULL;
2771}
2772
a4872ba6
OM
2773static struct intel_engine_cs *
2774semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
a24a11e6
CW
2775{
2776 struct drm_i915_private *dev_priv = ring->dev->dev_private;
88fe429d 2777 u32 cmd, ipehr, head;
a6cdb93a
RV
2778 u64 offset = 0;
2779 int i, backwards;
a24a11e6
CW
2780
2781 ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
a028c4b0 2782 if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
6274f212 2783 return NULL;
a24a11e6 2784
88fe429d
DV
2785 /*
2786 * HEAD is likely pointing to the dword after the actual command,
2787 * so scan backwards until we find the MBOX. But limit it to just 3
a6cdb93a
RV
2788 * or 4 dwords depending on the semaphore wait command size.
2789 * Note that we don't care about ACTHD here since that might
88fe429d
DV
2790 * point at at batch, and semaphores are always emitted into the
2791 * ringbuffer itself.
a24a11e6 2792 */
88fe429d 2793 head = I915_READ_HEAD(ring) & HEAD_ADDR;
a6cdb93a 2794 backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
88fe429d 2795
a6cdb93a 2796 for (i = backwards; i; --i) {
88fe429d
DV
2797 /*
2798 * Be paranoid and presume the hw has gone off into the wild -
2799 * our ring is smaller than what the hardware (and hence
2800 * HEAD_ADDR) allows. Also handles wrap-around.
2801 */
ee1b1e5e 2802 head &= ring->buffer->size - 1;
88fe429d
DV
2803
2804 /* This here seems to blow up */
ee1b1e5e 2805 cmd = ioread32(ring->buffer->virtual_start + head);
a24a11e6
CW
2806 if (cmd == ipehr)
2807 break;
2808
88fe429d
DV
2809 head -= 4;
2810 }
a24a11e6 2811
88fe429d
DV
2812 if (!i)
2813 return NULL;
a24a11e6 2814
ee1b1e5e 2815 *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
a6cdb93a
RV
2816 if (INTEL_INFO(ring->dev)->gen >= 8) {
2817 offset = ioread32(ring->buffer->virtual_start + head + 12);
2818 offset <<= 32;
2819 offset = ioread32(ring->buffer->virtual_start + head + 8);
2820 }
2821 return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
a24a11e6
CW
2822}
2823
a4872ba6 2824static int semaphore_passed(struct intel_engine_cs *ring)
6274f212
CW
2825{
2826 struct drm_i915_private *dev_priv = ring->dev->dev_private;
a4872ba6 2827 struct intel_engine_cs *signaller;
a0d036b0 2828 u32 seqno;
6274f212 2829
4be17381 2830 ring->hangcheck.deadlock++;
6274f212
CW
2831
2832 signaller = semaphore_waits_for(ring, &seqno);
4be17381
CW
2833 if (signaller == NULL)
2834 return -1;
2835
2836 /* Prevent pathological recursion due to driver bugs */
2837 if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
6274f212
CW
2838 return -1;
2839
4be17381
CW
2840 if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
2841 return 1;
2842
a0d036b0
CW
2843 /* cursory check for an unkickable deadlock */
2844 if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
2845 semaphore_passed(signaller) < 0)
4be17381
CW
2846 return -1;
2847
2848 return 0;
6274f212
CW
2849}
2850
2851static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2852{
a4872ba6 2853 struct intel_engine_cs *ring;
6274f212
CW
2854 int i;
2855
2856 for_each_ring(ring, dev_priv, i)
4be17381 2857 ring->hangcheck.deadlock = 0;
6274f212
CW
2858}
2859
ad8beaea 2860static enum intel_ring_hangcheck_action
a4872ba6 2861ring_stuck(struct intel_engine_cs *ring, u64 acthd)
1ec14ad3
CW
2862{
2863 struct drm_device *dev = ring->dev;
2864 struct drm_i915_private *dev_priv = dev->dev_private;
9107e9d2
CW
2865 u32 tmp;
2866
f260fe7b
MK
2867 if (acthd != ring->hangcheck.acthd) {
2868 if (acthd > ring->hangcheck.max_acthd) {
2869 ring->hangcheck.max_acthd = acthd;
2870 return HANGCHECK_ACTIVE;
2871 }
2872
2873 return HANGCHECK_ACTIVE_LOOP;
2874 }
6274f212 2875
9107e9d2 2876 if (IS_GEN2(dev))
f2f4d82f 2877 return HANGCHECK_HUNG;
9107e9d2
CW
2878
2879 /* Is the chip hanging on a WAIT_FOR_EVENT?
2880 * If so we can simply poke the RB_WAIT bit
2881 * and break the hang. This should work on
2882 * all but the second generation chipsets.
2883 */
2884 tmp = I915_READ_CTL(ring);
1ec14ad3 2885 if (tmp & RING_WAIT) {
58174462
MK
2886 i915_handle_error(dev, false,
2887 "Kicking stuck wait on %s",
2888 ring->name);
1ec14ad3 2889 I915_WRITE_CTL(ring, tmp);
f2f4d82f 2890 return HANGCHECK_KICK;
6274f212
CW
2891 }
2892
2893 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
2894 switch (semaphore_passed(ring)) {
2895 default:
f2f4d82f 2896 return HANGCHECK_HUNG;
6274f212 2897 case 1:
58174462
MK
2898 i915_handle_error(dev, false,
2899 "Kicking stuck semaphore on %s",
2900 ring->name);
6274f212 2901 I915_WRITE_CTL(ring, tmp);
f2f4d82f 2902 return HANGCHECK_KICK;
6274f212 2903 case 0:
f2f4d82f 2904 return HANGCHECK_WAIT;
6274f212 2905 }
9107e9d2 2906 }
ed5cbb03 2907
f2f4d82f 2908 return HANGCHECK_HUNG;
ed5cbb03
MK
2909}
2910
737b1506 2911/*
f65d9421 2912 * This is called when the chip hasn't reported back with completed
05407ff8
MK
2913 * batchbuffers in a long time. We keep track per ring seqno progress and
2914 * if there are no progress, hangcheck score for that ring is increased.
2915 * Further, acthd is inspected to see if the ring is stuck. On stuck case
2916 * we kick the ring. If we see no progress on three subsequent calls
2917 * we assume chip is wedged and try to fix it by resetting the chip.
f65d9421 2918 */
737b1506 2919static void i915_hangcheck_elapsed(struct work_struct *work)
f65d9421 2920{
737b1506
CW
2921 struct drm_i915_private *dev_priv =
2922 container_of(work, typeof(*dev_priv),
2923 gpu_error.hangcheck_work.work);
2924 struct drm_device *dev = dev_priv->dev;
a4872ba6 2925 struct intel_engine_cs *ring;
b4519513 2926 int i;
05407ff8 2927 int busy_count = 0, rings_hung = 0;
9107e9d2
CW
2928 bool stuck[I915_NUM_RINGS] = { 0 };
2929#define BUSY 1
2930#define KICK 5
2931#define HUNG 20
893eead0 2932
d330a953 2933 if (!i915.enable_hangcheck)
3e0dc6b0
BW
2934 return;
2935
b4519513 2936 for_each_ring(ring, dev_priv, i) {
50877445
CW
2937 u64 acthd;
2938 u32 seqno;
9107e9d2 2939 bool busy = true;
05407ff8 2940
6274f212
CW
2941 semaphore_clear_deadlocks(dev_priv);
2942
05407ff8
MK
2943 seqno = ring->get_seqno(ring, false);
2944 acthd = intel_ring_get_active_head(ring);
b4519513 2945
9107e9d2 2946 if (ring->hangcheck.seqno == seqno) {
94f7bbe1 2947 if (ring_idle(ring, seqno)) {
da661464
MK
2948 ring->hangcheck.action = HANGCHECK_IDLE;
2949
9107e9d2
CW
2950 if (waitqueue_active(&ring->irq_queue)) {
2951 /* Issue a wake-up to catch stuck h/w. */
094f9a54 2952 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
f4adcd24
DV
2953 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
2954 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2955 ring->name);
2956 else
2957 DRM_INFO("Fake missed irq on %s\n",
2958 ring->name);
094f9a54
CW
2959 wake_up_all(&ring->irq_queue);
2960 }
2961 /* Safeguard against driver failure */
2962 ring->hangcheck.score += BUSY;
9107e9d2
CW
2963 } else
2964 busy = false;
05407ff8 2965 } else {
6274f212
CW
2966 /* We always increment the hangcheck score
2967 * if the ring is busy and still processing
2968 * the same request, so that no single request
2969 * can run indefinitely (such as a chain of
2970 * batches). The only time we do not increment
2971 * the hangcheck score on this ring, if this
2972 * ring is in a legitimate wait for another
2973 * ring. In that case the waiting ring is a
2974 * victim and we want to be sure we catch the
2975 * right culprit. Then every time we do kick
2976 * the ring, add a small increment to the
2977 * score so that we can catch a batch that is
2978 * being repeatedly kicked and so responsible
2979 * for stalling the machine.
2980 */
ad8beaea
MK
2981 ring->hangcheck.action = ring_stuck(ring,
2982 acthd);
2983
2984 switch (ring->hangcheck.action) {
da661464 2985 case HANGCHECK_IDLE:
f2f4d82f 2986 case HANGCHECK_WAIT:
f2f4d82f 2987 case HANGCHECK_ACTIVE:
f260fe7b
MK
2988 break;
2989 case HANGCHECK_ACTIVE_LOOP:
ea04cb31 2990 ring->hangcheck.score += BUSY;
6274f212 2991 break;
f2f4d82f 2992 case HANGCHECK_KICK:
ea04cb31 2993 ring->hangcheck.score += KICK;
6274f212 2994 break;
f2f4d82f 2995 case HANGCHECK_HUNG:
ea04cb31 2996 ring->hangcheck.score += HUNG;
6274f212
CW
2997 stuck[i] = true;
2998 break;
2999 }
05407ff8 3000 }
9107e9d2 3001 } else {
da661464
MK
3002 ring->hangcheck.action = HANGCHECK_ACTIVE;
3003
9107e9d2
CW
3004 /* Gradually reduce the count so that we catch DoS
3005 * attempts across multiple batches.
3006 */
3007 if (ring->hangcheck.score > 0)
3008 ring->hangcheck.score--;
f260fe7b
MK
3009
3010 ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
d1e61e7f
CW
3011 }
3012
05407ff8
MK
3013 ring->hangcheck.seqno = seqno;
3014 ring->hangcheck.acthd = acthd;
9107e9d2 3015 busy_count += busy;
893eead0 3016 }
b9201c14 3017
92cab734 3018 for_each_ring(ring, dev_priv, i) {
b6b0fac0 3019 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
b8d88d1d
DV
3020 DRM_INFO("%s on %s\n",
3021 stuck[i] ? "stuck" : "no progress",
3022 ring->name);
a43adf07 3023 rings_hung++;
92cab734
MK
3024 }
3025 }
3026
05407ff8 3027 if (rings_hung)
58174462 3028 return i915_handle_error(dev, true, "Ring hung");
f65d9421 3029
05407ff8
MK
3030 if (busy_count)
3031 /* Reset timer case chip hangs without another request
3032 * being added */
10cd45b6
MK
3033 i915_queue_hangcheck(dev);
3034}
3035
3036void i915_queue_hangcheck(struct drm_device *dev)
3037{
737b1506 3038 struct i915_gpu_error *e = &to_i915(dev)->gpu_error;
672e7b7c 3039
d330a953 3040 if (!i915.enable_hangcheck)
10cd45b6
MK
3041 return;
3042
737b1506
CW
3043 /* Don't continually defer the hangcheck so that it is always run at
3044 * least once after work has been scheduled on any ring. Otherwise,
3045 * we will ignore a hung ring if a second ring is kept busy.
3046 */
3047
3048 queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work,
3049 round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES));
f65d9421
BG
3050}
3051
1c69eb42 3052static void ibx_irq_reset(struct drm_device *dev)
91738a95
PZ
3053{
3054 struct drm_i915_private *dev_priv = dev->dev_private;
3055
3056 if (HAS_PCH_NOP(dev))
3057 return;
3058
f86f3fb0 3059 GEN5_IRQ_RESET(SDE);
105b122e
PZ
3060
3061 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
3062 I915_WRITE(SERR_INT, 0xffffffff);
622364b6 3063}
105b122e 3064
622364b6
PZ
3065/*
3066 * SDEIER is also touched by the interrupt handler to work around missed PCH
3067 * interrupts. Hence we can't update it after the interrupt handler is enabled -
3068 * instead we unconditionally enable all PCH interrupt sources here, but then
3069 * only unmask them as needed with SDEIMR.
3070 *
3071 * This function needs to be called before interrupts are enabled.
3072 */
3073static void ibx_irq_pre_postinstall(struct drm_device *dev)
3074{
3075 struct drm_i915_private *dev_priv = dev->dev_private;
3076
3077 if (HAS_PCH_NOP(dev))
3078 return;
3079
3080 WARN_ON(I915_READ(SDEIER) != 0);
91738a95
PZ
3081 I915_WRITE(SDEIER, 0xffffffff);
3082 POSTING_READ(SDEIER);
3083}
3084
7c4d664e 3085static void gen5_gt_irq_reset(struct drm_device *dev)
d18ea1b5
DV
3086{
3087 struct drm_i915_private *dev_priv = dev->dev_private;
3088
f86f3fb0 3089 GEN5_IRQ_RESET(GT);
a9d356a6 3090 if (INTEL_INFO(dev)->gen >= 6)
f86f3fb0 3091 GEN5_IRQ_RESET(GEN6_PM);
d18ea1b5
DV
3092}
3093
1da177e4
LT
3094/* drm_dma.h hooks
3095*/
be30b29f 3096static void ironlake_irq_reset(struct drm_device *dev)
036a4a7d 3097{
2d1013dd 3098 struct drm_i915_private *dev_priv = dev->dev_private;
036a4a7d 3099
0c841212 3100 I915_WRITE(HWSTAM, 0xffffffff);
bdfcdb63 3101
f86f3fb0 3102 GEN5_IRQ_RESET(DE);
c6d954c1
PZ
3103 if (IS_GEN7(dev))
3104 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
036a4a7d 3105
7c4d664e 3106 gen5_gt_irq_reset(dev);
c650156a 3107
1c69eb42 3108 ibx_irq_reset(dev);
7d99163d 3109}
c650156a 3110
70591a41
VS
3111static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
3112{
3113 enum pipe pipe;
3114
0706f17c 3115 i915_hotplug_interrupt_update(dev_priv, 0xFFFFFFFF, 0);
70591a41
VS
3116 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3117
3118 for_each_pipe(dev_priv, pipe)
3119 I915_WRITE(PIPESTAT(pipe), 0xffff);
3120
3121 GEN5_IRQ_RESET(VLV_);
3122}
3123
7e231dbe
JB
3124static void valleyview_irq_preinstall(struct drm_device *dev)
3125{
2d1013dd 3126 struct drm_i915_private *dev_priv = dev->dev_private;
7e231dbe 3127
7e231dbe
JB
3128 /* VLV magic */
3129 I915_WRITE(VLV_IMR, 0);
3130 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
3131 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
3132 I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
3133
7c4d664e 3134 gen5_gt_irq_reset(dev);
7e231dbe 3135
7c4cde39 3136 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
7e231dbe 3137
70591a41 3138 vlv_display_irq_reset(dev_priv);
7e231dbe
JB
3139}
3140
d6e3cca3
DV
3141static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3142{
3143 GEN8_IRQ_RESET_NDX(GT, 0);
3144 GEN8_IRQ_RESET_NDX(GT, 1);
3145 GEN8_IRQ_RESET_NDX(GT, 2);
3146 GEN8_IRQ_RESET_NDX(GT, 3);
3147}
3148
823f6b38 3149static void gen8_irq_reset(struct drm_device *dev)
abd58f01
BW
3150{
3151 struct drm_i915_private *dev_priv = dev->dev_private;
3152 int pipe;
3153
abd58f01
BW
3154 I915_WRITE(GEN8_MASTER_IRQ, 0);
3155 POSTING_READ(GEN8_MASTER_IRQ);
3156
d6e3cca3 3157 gen8_gt_irq_reset(dev_priv);
abd58f01 3158
055e393f 3159 for_each_pipe(dev_priv, pipe)
f458ebbc
DV
3160 if (intel_display_power_is_enabled(dev_priv,
3161 POWER_DOMAIN_PIPE(pipe)))
813bde43 3162 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
abd58f01 3163
f86f3fb0
PZ
3164 GEN5_IRQ_RESET(GEN8_DE_PORT_);
3165 GEN5_IRQ_RESET(GEN8_DE_MISC_);
3166 GEN5_IRQ_RESET(GEN8_PCU_);
abd58f01 3167
266ea3d9
SS
3168 if (HAS_PCH_SPLIT(dev))
3169 ibx_irq_reset(dev);
abd58f01 3170}
09f2344d 3171
4c6c03be
DL
3172void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3173 unsigned int pipe_mask)
d49bdb0e 3174{
1180e206 3175 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
d49bdb0e 3176
13321786 3177 spin_lock_irq(&dev_priv->irq_lock);
d14c0343
DL
3178 if (pipe_mask & 1 << PIPE_A)
3179 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_A,
3180 dev_priv->de_irq_mask[PIPE_A],
3181 ~dev_priv->de_irq_mask[PIPE_A] | extra_ier);
4c6c03be
DL
3182 if (pipe_mask & 1 << PIPE_B)
3183 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B,
3184 dev_priv->de_irq_mask[PIPE_B],
3185 ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
3186 if (pipe_mask & 1 << PIPE_C)
3187 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C,
3188 dev_priv->de_irq_mask[PIPE_C],
3189 ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
13321786 3190 spin_unlock_irq(&dev_priv->irq_lock);
d49bdb0e
PZ
3191}
3192
43f328d7
VS
3193static void cherryview_irq_preinstall(struct drm_device *dev)
3194{
3195 struct drm_i915_private *dev_priv = dev->dev_private;
43f328d7
VS
3196
3197 I915_WRITE(GEN8_MASTER_IRQ, 0);
3198 POSTING_READ(GEN8_MASTER_IRQ);
3199
d6e3cca3 3200 gen8_gt_irq_reset(dev_priv);
43f328d7
VS
3201
3202 GEN5_IRQ_RESET(GEN8_PCU_);
3203
43f328d7
VS
3204 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3205
70591a41 3206 vlv_display_irq_reset(dev_priv);
43f328d7
VS
3207}
3208
87a02106
VS
3209static u32 intel_hpd_enabled_irqs(struct drm_device *dev,
3210 const u32 hpd[HPD_NUM_PINS])
3211{
3212 struct drm_i915_private *dev_priv = to_i915(dev);
3213 struct intel_encoder *encoder;
3214 u32 enabled_irqs = 0;
3215
3216 for_each_intel_encoder(dev, encoder)
3217 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
3218 enabled_irqs |= hpd[encoder->hpd_pin];
3219
3220 return enabled_irqs;
3221}
3222
82a28bcf 3223static void ibx_hpd_irq_setup(struct drm_device *dev)
7fe0b973 3224{
2d1013dd 3225 struct drm_i915_private *dev_priv = dev->dev_private;
87a02106 3226 u32 hotplug_irqs, hotplug, enabled_irqs;
82a28bcf
DV
3227
3228 if (HAS_PCH_IBX(dev)) {
fee884ed 3229 hotplug_irqs = SDE_HOTPLUG_MASK;
87a02106 3230 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ibx);
82a28bcf 3231 } else {
fee884ed 3232 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
87a02106 3233 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_cpt);
82a28bcf 3234 }
7fe0b973 3235
fee884ed 3236 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
82a28bcf
DV
3237
3238 /*
3239 * Enable digital hotplug on the PCH, and configure the DP short pulse
6dbf30ce
VS
3240 * duration to 2ms (which is the minimum in the Display Port spec).
3241 * The pulse duration bits are reserved on LPT+.
82a28bcf 3242 */
7fe0b973
KP
3243 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3244 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3245 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3246 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3247 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
0b2eb33e
VS
3248 /*
3249 * When CPU and PCH are on the same package, port A
3250 * HPD must be enabled in both north and south.
3251 */
3252 if (HAS_PCH_LPT_LP(dev))
3253 hotplug |= PORTA_HOTPLUG_ENABLE;
7fe0b973 3254 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
6dbf30ce 3255}
26951caf 3256
6dbf30ce
VS
3257static void spt_hpd_irq_setup(struct drm_device *dev)
3258{
3259 struct drm_i915_private *dev_priv = dev->dev_private;
3260 u32 hotplug_irqs, hotplug, enabled_irqs;
3261
3262 hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
3263 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_spt);
3264
3265 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3266
3267 /* Enable digital hotplug on the PCH */
3268 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3269 hotplug |= PORTD_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE |
74c0b395 3270 PORTB_HOTPLUG_ENABLE | PORTA_HOTPLUG_ENABLE;
6dbf30ce
VS
3271 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3272
3273 hotplug = I915_READ(PCH_PORT_HOTPLUG2);
3274 hotplug |= PORTE_HOTPLUG_ENABLE;
3275 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
7fe0b973
KP
3276}
3277
e4ce95aa
VS
3278static void ilk_hpd_irq_setup(struct drm_device *dev)
3279{
3280 struct drm_i915_private *dev_priv = dev->dev_private;
3281 u32 hotplug_irqs, hotplug, enabled_irqs;
3282
3a3b3c7d
VS
3283 if (INTEL_INFO(dev)->gen >= 8) {
3284 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
3285 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bdw);
3286
3287 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3288 } else if (INTEL_INFO(dev)->gen >= 7) {
23bb4cb5
VS
3289 hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
3290 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ivb);
3a3b3c7d
VS
3291
3292 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
23bb4cb5
VS
3293 } else {
3294 hotplug_irqs = DE_DP_A_HOTPLUG;
3295 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ilk);
e4ce95aa 3296
3a3b3c7d
VS
3297 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3298 }
e4ce95aa
VS
3299
3300 /*
3301 * Enable digital hotplug on the CPU, and configure the DP short pulse
3302 * duration to 2ms (which is the minimum in the Display Port spec)
23bb4cb5 3303 * The pulse duration bits are reserved on HSW+.
e4ce95aa
VS
3304 */
3305 hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
3306 hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
3307 hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms;
3308 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3309
3310 ibx_hpd_irq_setup(dev);
3311}
3312
e0a20ad7
SS
3313static void bxt_hpd_irq_setup(struct drm_device *dev)
3314{
3315 struct drm_i915_private *dev_priv = dev->dev_private;
a52bb15b 3316 u32 hotplug_irqs, hotplug, enabled_irqs;
e0a20ad7 3317
a52bb15b
VS
3318 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bxt);
3319 hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
e0a20ad7 3320
a52bb15b 3321 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
e0a20ad7 3322
a52bb15b
VS
3323 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3324 hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE |
3325 PORTA_HOTPLUG_ENABLE;
3326 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
e0a20ad7
SS
3327}
3328
d46da437
PZ
3329static void ibx_irq_postinstall(struct drm_device *dev)
3330{
2d1013dd 3331 struct drm_i915_private *dev_priv = dev->dev_private;
82a28bcf 3332 u32 mask;
e5868a31 3333
692a04cf
DV
3334 if (HAS_PCH_NOP(dev))
3335 return;
3336
105b122e 3337 if (HAS_PCH_IBX(dev))
5c673b60 3338 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
105b122e 3339 else
5c673b60 3340 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
8664281b 3341
337ba017 3342 GEN5_ASSERT_IIR_IS_ZERO(SDEIIR);
d46da437 3343 I915_WRITE(SDEIMR, ~mask);
d46da437
PZ
3344}
3345
0a9a8c91
DV
3346static void gen5_gt_irq_postinstall(struct drm_device *dev)
3347{
3348 struct drm_i915_private *dev_priv = dev->dev_private;
3349 u32 pm_irqs, gt_irqs;
3350
3351 pm_irqs = gt_irqs = 0;
3352
3353 dev_priv->gt_irq_mask = ~0;
040d2baa 3354 if (HAS_L3_DPF(dev)) {
0a9a8c91 3355 /* L3 parity interrupt is always unmasked. */
35a85ac6
BW
3356 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3357 gt_irqs |= GT_PARITY_ERROR(dev);
0a9a8c91
DV
3358 }
3359
3360 gt_irqs |= GT_RENDER_USER_INTERRUPT;
3361 if (IS_GEN5(dev)) {
3362 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
3363 ILK_BSD_USER_INTERRUPT;
3364 } else {
3365 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3366 }
3367
35079899 3368 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
0a9a8c91
DV
3369
3370 if (INTEL_INFO(dev)->gen >= 6) {
78e68d36
ID
3371 /*
3372 * RPS interrupts will get enabled/disabled on demand when RPS
3373 * itself is enabled/disabled.
3374 */
0a9a8c91
DV
3375 if (HAS_VEBOX(dev))
3376 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3377
605cd25b 3378 dev_priv->pm_irq_mask = 0xffffffff;
35079899 3379 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
0a9a8c91
DV
3380 }
3381}
3382
f71d4af4 3383static int ironlake_irq_postinstall(struct drm_device *dev)
036a4a7d 3384{
2d1013dd 3385 struct drm_i915_private *dev_priv = dev->dev_private;
8e76f8dc
PZ
3386 u32 display_mask, extra_mask;
3387
3388 if (INTEL_INFO(dev)->gen >= 7) {
3389 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3390 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3391 DE_PLANEB_FLIP_DONE_IVB |
5c673b60 3392 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
8e76f8dc 3393 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
23bb4cb5
VS
3394 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3395 DE_DP_A_HOTPLUG_IVB);
8e76f8dc
PZ
3396 } else {
3397 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3398 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
5b3a856b 3399 DE_AUX_CHANNEL_A |
5b3a856b
DV
3400 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3401 DE_POISON);
e4ce95aa
VS
3402 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3403 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3404 DE_DP_A_HOTPLUG);
8e76f8dc 3405 }
036a4a7d 3406
1ec14ad3 3407 dev_priv->irq_mask = ~display_mask;
036a4a7d 3408
0c841212
PZ
3409 I915_WRITE(HWSTAM, 0xeffe);
3410
622364b6
PZ
3411 ibx_irq_pre_postinstall(dev);
3412
35079899 3413 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
036a4a7d 3414
0a9a8c91 3415 gen5_gt_irq_postinstall(dev);
036a4a7d 3416
d46da437 3417 ibx_irq_postinstall(dev);
7fe0b973 3418
f97108d1 3419 if (IS_IRONLAKE_M(dev)) {
6005ce42
DV
3420 /* Enable PCU event interrupts
3421 *
3422 * spinlocking not required here for correctness since interrupt
4bc9d430
DV
3423 * setup is guaranteed to run in single-threaded context. But we
3424 * need it to make the assert_spin_locked happy. */
d6207435 3425 spin_lock_irq(&dev_priv->irq_lock);
f97108d1 3426 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
d6207435 3427 spin_unlock_irq(&dev_priv->irq_lock);
f97108d1
JB
3428 }
3429
036a4a7d
ZW
3430 return 0;
3431}
3432
f8b79e58
ID
3433static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
3434{
3435 u32 pipestat_mask;
3436 u32 iir_mask;
120dda4f 3437 enum pipe pipe;
f8b79e58
ID
3438
3439 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3440 PIPE_FIFO_UNDERRUN_STATUS;
3441
120dda4f
VS
3442 for_each_pipe(dev_priv, pipe)
3443 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
f8b79e58
ID
3444 POSTING_READ(PIPESTAT(PIPE_A));
3445
3446 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3447 PIPE_CRC_DONE_INTERRUPT_STATUS;
3448
120dda4f
VS
3449 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3450 for_each_pipe(dev_priv, pipe)
3451 i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
f8b79e58
ID
3452
3453 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3454 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3455 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
120dda4f
VS
3456 if (IS_CHERRYVIEW(dev_priv))
3457 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
f8b79e58
ID
3458 dev_priv->irq_mask &= ~iir_mask;
3459
3460 I915_WRITE(VLV_IIR, iir_mask);
3461 I915_WRITE(VLV_IIR, iir_mask);
f8b79e58 3462 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
76e41860
VS
3463 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3464 POSTING_READ(VLV_IMR);
f8b79e58
ID
3465}
3466
3467static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
3468{
3469 u32 pipestat_mask;
3470 u32 iir_mask;
120dda4f 3471 enum pipe pipe;
f8b79e58
ID
3472
3473 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3474 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
6c7fba04 3475 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
120dda4f
VS
3476 if (IS_CHERRYVIEW(dev_priv))
3477 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
f8b79e58
ID
3478
3479 dev_priv->irq_mask |= iir_mask;
f8b79e58 3480 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
76e41860 3481 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
f8b79e58
ID
3482 I915_WRITE(VLV_IIR, iir_mask);
3483 I915_WRITE(VLV_IIR, iir_mask);
3484 POSTING_READ(VLV_IIR);
3485
3486 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3487 PIPE_CRC_DONE_INTERRUPT_STATUS;
3488
120dda4f
VS
3489 i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3490 for_each_pipe(dev_priv, pipe)
3491 i915_disable_pipestat(dev_priv, pipe, pipestat_mask);
f8b79e58
ID
3492
3493 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3494 PIPE_FIFO_UNDERRUN_STATUS;
120dda4f
VS
3495
3496 for_each_pipe(dev_priv, pipe)
3497 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
f8b79e58
ID
3498 POSTING_READ(PIPESTAT(PIPE_A));
3499}
3500
3501void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3502{
3503 assert_spin_locked(&dev_priv->irq_lock);
3504
3505 if (dev_priv->display_irqs_enabled)
3506 return;
3507
3508 dev_priv->display_irqs_enabled = true;
3509
950eabaf 3510 if (intel_irqs_enabled(dev_priv))
f8b79e58
ID
3511 valleyview_display_irqs_install(dev_priv);
3512}
3513
3514void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3515{
3516 assert_spin_locked(&dev_priv->irq_lock);
3517
3518 if (!dev_priv->display_irqs_enabled)
3519 return;
3520
3521 dev_priv->display_irqs_enabled = false;
3522
950eabaf 3523 if (intel_irqs_enabled(dev_priv))
f8b79e58
ID
3524 valleyview_display_irqs_uninstall(dev_priv);
3525}
3526
0e6c9a9e 3527static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
7e231dbe 3528{
f8b79e58 3529 dev_priv->irq_mask = ~0;
7e231dbe 3530
0706f17c 3531 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
20afbda2
DV
3532 POSTING_READ(PORT_HOTPLUG_EN);
3533
7e231dbe 3534 I915_WRITE(VLV_IIR, 0xffffffff);
76e41860
VS
3535 I915_WRITE(VLV_IIR, 0xffffffff);
3536 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3537 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3538 POSTING_READ(VLV_IMR);
7e231dbe 3539
b79480ba
DV
3540 /* Interrupt setup is already guaranteed to be single-threaded, this is
3541 * just to make the assert_spin_locked check happy. */
d6207435 3542 spin_lock_irq(&dev_priv->irq_lock);
f8b79e58
ID
3543 if (dev_priv->display_irqs_enabled)
3544 valleyview_display_irqs_install(dev_priv);
d6207435 3545 spin_unlock_irq(&dev_priv->irq_lock);
0e6c9a9e
VS
3546}
3547
3548static int valleyview_irq_postinstall(struct drm_device *dev)
3549{
3550 struct drm_i915_private *dev_priv = dev->dev_private;
3551
3552 vlv_display_irq_postinstall(dev_priv);
7e231dbe 3553
0a9a8c91 3554 gen5_gt_irq_postinstall(dev);
7e231dbe
JB
3555
3556 /* ack & enable invalid PTE error interrupts */
3557#if 0 /* FIXME: add support to irq handler for checking these bits */
3558 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3559 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
3560#endif
3561
3562 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
20afbda2
DV
3563
3564 return 0;
3565}
3566
abd58f01
BW
3567static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3568{
abd58f01
BW
3569 /* These are interrupts we'll toggle with the ring mask register */
3570 uint32_t gt_interrupts[] = {
3571 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
73d477f6 3572 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
abd58f01 3573 GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
73d477f6
OM
3574 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3575 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
abd58f01 3576 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
73d477f6
OM
3577 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3578 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3579 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
abd58f01 3580 0,
73d477f6
OM
3581 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3582 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
abd58f01
BW
3583 };
3584
0961021a 3585 dev_priv->pm_irq_mask = 0xffffffff;
9a2d2d87
D
3586 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3587 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
78e68d36
ID
3588 /*
3589 * RPS interrupts will get enabled/disabled on demand when RPS itself
3590 * is enabled/disabled.
3591 */
3592 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
9a2d2d87 3593 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
abd58f01
BW
3594}
3595
3596static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3597{
770de83d
DL
3598 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3599 uint32_t de_pipe_enables;
3a3b3c7d
VS
3600 u32 de_port_masked = GEN8_AUX_CHANNEL_A;
3601 u32 de_port_enables;
3602 enum pipe pipe;
770de83d 3603
b4834a50 3604 if (INTEL_INFO(dev_priv)->gen >= 9) {
770de83d
DL
3605 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3606 GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3a3b3c7d
VS
3607 de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
3608 GEN9_AUX_CHANNEL_D;
9e63743e 3609 if (IS_BROXTON(dev_priv))
3a3b3c7d
VS
3610 de_port_masked |= BXT_DE_PORT_GMBUS;
3611 } else {
770de83d
DL
3612 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
3613 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3a3b3c7d 3614 }
770de83d
DL
3615
3616 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3617 GEN8_PIPE_FIFO_UNDERRUN;
3618
3a3b3c7d 3619 de_port_enables = de_port_masked;
a52bb15b
VS
3620 if (IS_BROXTON(dev_priv))
3621 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3622 else if (IS_BROADWELL(dev_priv))
3a3b3c7d
VS
3623 de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
3624
13b3a0a7
DV
3625 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3626 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3627 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
abd58f01 3628
055e393f 3629 for_each_pipe(dev_priv, pipe)
f458ebbc 3630 if (intel_display_power_is_enabled(dev_priv,
813bde43
PZ
3631 POWER_DOMAIN_PIPE(pipe)))
3632 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3633 dev_priv->de_irq_mask[pipe],
3634 de_pipe_enables);
abd58f01 3635
3a3b3c7d 3636 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
abd58f01
BW
3637}
3638
3639static int gen8_irq_postinstall(struct drm_device *dev)
3640{
3641 struct drm_i915_private *dev_priv = dev->dev_private;
3642
266ea3d9
SS
3643 if (HAS_PCH_SPLIT(dev))
3644 ibx_irq_pre_postinstall(dev);
622364b6 3645
abd58f01
BW
3646 gen8_gt_irq_postinstall(dev_priv);
3647 gen8_de_irq_postinstall(dev_priv);
3648
266ea3d9
SS
3649 if (HAS_PCH_SPLIT(dev))
3650 ibx_irq_postinstall(dev);
abd58f01
BW
3651
3652 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
3653 POSTING_READ(GEN8_MASTER_IRQ);
3654
3655 return 0;
3656}
3657
43f328d7
VS
3658static int cherryview_irq_postinstall(struct drm_device *dev)
3659{
3660 struct drm_i915_private *dev_priv = dev->dev_private;
43f328d7 3661
c2b66797 3662 vlv_display_irq_postinstall(dev_priv);
43f328d7
VS
3663
3664 gen8_gt_irq_postinstall(dev_priv);
3665
3666 I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
3667 POSTING_READ(GEN8_MASTER_IRQ);
3668
3669 return 0;
3670}
3671
abd58f01
BW
3672static void gen8_irq_uninstall(struct drm_device *dev)
3673{
3674 struct drm_i915_private *dev_priv = dev->dev_private;
abd58f01
BW
3675
3676 if (!dev_priv)
3677 return;
3678
823f6b38 3679 gen8_irq_reset(dev);
abd58f01
BW
3680}
3681
8ea0be4f
VS
3682static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
3683{
3684 /* Interrupt setup is already guaranteed to be single-threaded, this is
3685 * just to make the assert_spin_locked check happy. */
3686 spin_lock_irq(&dev_priv->irq_lock);
3687 if (dev_priv->display_irqs_enabled)
3688 valleyview_display_irqs_uninstall(dev_priv);
3689 spin_unlock_irq(&dev_priv->irq_lock);
3690
3691 vlv_display_irq_reset(dev_priv);
3692
c352d1ba 3693 dev_priv->irq_mask = ~0;
8ea0be4f
VS
3694}
3695
7e231dbe
JB
3696static void valleyview_irq_uninstall(struct drm_device *dev)
3697{
2d1013dd 3698 struct drm_i915_private *dev_priv = dev->dev_private;
7e231dbe
JB
3699
3700 if (!dev_priv)
3701 return;
3702
843d0e7d
ID
3703 I915_WRITE(VLV_MASTER_IER, 0);
3704
893fce8e
VS
3705 gen5_gt_irq_reset(dev);
3706
7e231dbe 3707 I915_WRITE(HWSTAM, 0xffffffff);
f8b79e58 3708
8ea0be4f 3709 vlv_display_irq_uninstall(dev_priv);
7e231dbe
JB
3710}
3711
43f328d7
VS
3712static void cherryview_irq_uninstall(struct drm_device *dev)
3713{
3714 struct drm_i915_private *dev_priv = dev->dev_private;
43f328d7
VS
3715
3716 if (!dev_priv)
3717 return;
3718
3719 I915_WRITE(GEN8_MASTER_IRQ, 0);
3720 POSTING_READ(GEN8_MASTER_IRQ);
3721
a2c30fba 3722 gen8_gt_irq_reset(dev_priv);
43f328d7 3723
a2c30fba 3724 GEN5_IRQ_RESET(GEN8_PCU_);
43f328d7 3725
c2b66797 3726 vlv_display_irq_uninstall(dev_priv);
43f328d7
VS
3727}
3728
f71d4af4 3729static void ironlake_irq_uninstall(struct drm_device *dev)
036a4a7d 3730{
2d1013dd 3731 struct drm_i915_private *dev_priv = dev->dev_private;
4697995b
JB
3732
3733 if (!dev_priv)
3734 return;
3735
be30b29f 3736 ironlake_irq_reset(dev);
036a4a7d
ZW
3737}
3738
a266c7d5 3739static void i8xx_irq_preinstall(struct drm_device * dev)
1da177e4 3740{
2d1013dd 3741 struct drm_i915_private *dev_priv = dev->dev_private;
9db4a9c7 3742 int pipe;
91e3738e 3743
055e393f 3744 for_each_pipe(dev_priv, pipe)
9db4a9c7 3745 I915_WRITE(PIPESTAT(pipe), 0);
a266c7d5
CW
3746 I915_WRITE16(IMR, 0xffff);
3747 I915_WRITE16(IER, 0x0);
3748 POSTING_READ16(IER);
c2798b19
CW
3749}
3750
3751static int i8xx_irq_postinstall(struct drm_device *dev)
3752{
2d1013dd 3753 struct drm_i915_private *dev_priv = dev->dev_private;
c2798b19 3754
c2798b19
CW
3755 I915_WRITE16(EMR,
3756 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3757
3758 /* Unmask the interrupts that we always want on. */
3759 dev_priv->irq_mask =
3760 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3761 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3762 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
37ef01ab 3763 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
c2798b19
CW
3764 I915_WRITE16(IMR, dev_priv->irq_mask);
3765
3766 I915_WRITE16(IER,
3767 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3768 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
c2798b19
CW
3769 I915_USER_INTERRUPT);
3770 POSTING_READ16(IER);
3771
379ef82d
DV
3772 /* Interrupt setup is already guaranteed to be single-threaded, this is
3773 * just to make the assert_spin_locked check happy. */
d6207435 3774 spin_lock_irq(&dev_priv->irq_lock);
755e9019
ID
3775 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3776 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
d6207435 3777 spin_unlock_irq(&dev_priv->irq_lock);
379ef82d 3778
c2798b19
CW
3779 return 0;
3780}
3781
90a72f87
VS
3782/*
3783 * Returns true when a page flip has completed.
3784 */
3785static bool i8xx_handle_vblank(struct drm_device *dev,
1f1c2e24 3786 int plane, int pipe, u32 iir)
90a72f87 3787{
2d1013dd 3788 struct drm_i915_private *dev_priv = dev->dev_private;
1f1c2e24 3789 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
90a72f87 3790
8d7849db 3791 if (!intel_pipe_handle_vblank(dev, pipe))
90a72f87
VS
3792 return false;
3793
3794 if ((iir & flip_pending) == 0)
d6bbafa1 3795 goto check_page_flip;
90a72f87 3796
90a72f87
VS
3797 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3798 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3799 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3800 * the flip is completed (no longer pending). Since this doesn't raise
3801 * an interrupt per se, we watch for the change at vblank.
3802 */
3803 if (I915_READ16(ISR) & flip_pending)
d6bbafa1 3804 goto check_page_flip;
90a72f87 3805
7d47559e 3806 intel_prepare_page_flip(dev, plane);
90a72f87 3807 intel_finish_page_flip(dev, pipe);
90a72f87 3808 return true;
d6bbafa1
CW
3809
3810check_page_flip:
3811 intel_check_page_flip(dev, pipe);
3812 return false;
90a72f87
VS
3813}
3814
ff1f525e 3815static irqreturn_t i8xx_irq_handler(int irq, void *arg)
c2798b19 3816{
45a83f84 3817 struct drm_device *dev = arg;
2d1013dd 3818 struct drm_i915_private *dev_priv = dev->dev_private;
c2798b19
CW
3819 u16 iir, new_iir;
3820 u32 pipe_stats[2];
c2798b19
CW
3821 int pipe;
3822 u16 flip_mask =
3823 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3824 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3825
2dd2a883
ID
3826 if (!intel_irqs_enabled(dev_priv))
3827 return IRQ_NONE;
3828
c2798b19
CW
3829 iir = I915_READ16(IIR);
3830 if (iir == 0)
3831 return IRQ_NONE;
3832
3833 while (iir & ~flip_mask) {
3834 /* Can't rely on pipestat interrupt bit in iir as it might
3835 * have been cleared after the pipestat interrupt was received.
3836 * It doesn't set the bit in iir again, but it still produces
3837 * interrupts (for non-MSI).
3838 */
222c7f51 3839 spin_lock(&dev_priv->irq_lock);
c2798b19 3840 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
aaecdf61 3841 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
c2798b19 3842
055e393f 3843 for_each_pipe(dev_priv, pipe) {
c2798b19
CW
3844 int reg = PIPESTAT(pipe);
3845 pipe_stats[pipe] = I915_READ(reg);
3846
3847 /*
3848 * Clear the PIPE*STAT regs before the IIR
3849 */
2d9d2b0b 3850 if (pipe_stats[pipe] & 0x8000ffff)
c2798b19 3851 I915_WRITE(reg, pipe_stats[pipe]);
c2798b19 3852 }
222c7f51 3853 spin_unlock(&dev_priv->irq_lock);
c2798b19
CW
3854
3855 I915_WRITE16(IIR, iir & ~flip_mask);
3856 new_iir = I915_READ16(IIR); /* Flush posted writes */
3857
c2798b19 3858 if (iir & I915_USER_INTERRUPT)
74cdb337 3859 notify_ring(&dev_priv->ring[RCS]);
c2798b19 3860
055e393f 3861 for_each_pipe(dev_priv, pipe) {
1f1c2e24 3862 int plane = pipe;
3a77c4c4 3863 if (HAS_FBC(dev))
1f1c2e24
VS
3864 plane = !plane;
3865
4356d586 3866 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
1f1c2e24
VS
3867 i8xx_handle_vblank(dev, plane, pipe, iir))
3868 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
c2798b19 3869
4356d586 3870 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
277de95e 3871 i9xx_pipe_crc_irq_handler(dev, pipe);
2d9d2b0b 3872
1f7247c0
DV
3873 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3874 intel_cpu_fifo_underrun_irq_handler(dev_priv,
3875 pipe);
4356d586 3876 }
c2798b19
CW
3877
3878 iir = new_iir;
3879 }
3880
3881 return IRQ_HANDLED;
3882}
3883
3884static void i8xx_irq_uninstall(struct drm_device * dev)
3885{
2d1013dd 3886 struct drm_i915_private *dev_priv = dev->dev_private;
c2798b19
CW
3887 int pipe;
3888
055e393f 3889 for_each_pipe(dev_priv, pipe) {
c2798b19
CW
3890 /* Clear enable bits; then clear status bits */
3891 I915_WRITE(PIPESTAT(pipe), 0);
3892 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3893 }
3894 I915_WRITE16(IMR, 0xffff);
3895 I915_WRITE16(IER, 0x0);
3896 I915_WRITE16(IIR, I915_READ16(IIR));
3897}
3898
a266c7d5
CW
3899static void i915_irq_preinstall(struct drm_device * dev)
3900{
2d1013dd 3901 struct drm_i915_private *dev_priv = dev->dev_private;
a266c7d5
CW
3902 int pipe;
3903
a266c7d5 3904 if (I915_HAS_HOTPLUG(dev)) {
0706f17c 3905 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
a266c7d5
CW
3906 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3907 }
3908
00d98ebd 3909 I915_WRITE16(HWSTAM, 0xeffe);
055e393f 3910 for_each_pipe(dev_priv, pipe)
a266c7d5
CW
3911 I915_WRITE(PIPESTAT(pipe), 0);
3912 I915_WRITE(IMR, 0xffffffff);
3913 I915_WRITE(IER, 0x0);
3914 POSTING_READ(IER);
3915}
3916
3917static int i915_irq_postinstall(struct drm_device *dev)
3918{
2d1013dd 3919 struct drm_i915_private *dev_priv = dev->dev_private;
38bde180 3920 u32 enable_mask;
a266c7d5 3921
38bde180
CW
3922 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3923
3924 /* Unmask the interrupts that we always want on. */
3925 dev_priv->irq_mask =
3926 ~(I915_ASLE_INTERRUPT |
3927 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3928 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3929 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
37ef01ab 3930 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
38bde180
CW
3931
3932 enable_mask =
3933 I915_ASLE_INTERRUPT |
3934 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3935 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
38bde180
CW
3936 I915_USER_INTERRUPT;
3937
a266c7d5 3938 if (I915_HAS_HOTPLUG(dev)) {
0706f17c 3939 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
20afbda2
DV
3940 POSTING_READ(PORT_HOTPLUG_EN);
3941
a266c7d5
CW
3942 /* Enable in IER... */
3943 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3944 /* and unmask in IMR */
3945 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3946 }
3947
a266c7d5
CW
3948 I915_WRITE(IMR, dev_priv->irq_mask);
3949 I915_WRITE(IER, enable_mask);
3950 POSTING_READ(IER);
3951
f49e38dd 3952 i915_enable_asle_pipestat(dev);
20afbda2 3953
379ef82d
DV
3954 /* Interrupt setup is already guaranteed to be single-threaded, this is
3955 * just to make the assert_spin_locked check happy. */
d6207435 3956 spin_lock_irq(&dev_priv->irq_lock);
755e9019
ID
3957 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3958 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
d6207435 3959 spin_unlock_irq(&dev_priv->irq_lock);
379ef82d 3960
20afbda2
DV
3961 return 0;
3962}
3963
90a72f87
VS
3964/*
3965 * Returns true when a page flip has completed.
3966 */
3967static bool i915_handle_vblank(struct drm_device *dev,
3968 int plane, int pipe, u32 iir)
3969{
2d1013dd 3970 struct drm_i915_private *dev_priv = dev->dev_private;
90a72f87
VS
3971 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3972
8d7849db 3973 if (!intel_pipe_handle_vblank(dev, pipe))
90a72f87
VS
3974 return false;
3975
3976 if ((iir & flip_pending) == 0)
d6bbafa1 3977 goto check_page_flip;
90a72f87 3978
90a72f87
VS
3979 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3980 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3981 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3982 * the flip is completed (no longer pending). Since this doesn't raise
3983 * an interrupt per se, we watch for the change at vblank.
3984 */
3985 if (I915_READ(ISR) & flip_pending)
d6bbafa1 3986 goto check_page_flip;
90a72f87 3987
7d47559e 3988 intel_prepare_page_flip(dev, plane);
90a72f87 3989 intel_finish_page_flip(dev, pipe);
90a72f87 3990 return true;
d6bbafa1
CW
3991
3992check_page_flip:
3993 intel_check_page_flip(dev, pipe);
3994 return false;
90a72f87
VS
3995}
3996
ff1f525e 3997static irqreturn_t i915_irq_handler(int irq, void *arg)
a266c7d5 3998{
45a83f84 3999 struct drm_device *dev = arg;
2d1013dd 4000 struct drm_i915_private *dev_priv = dev->dev_private;
8291ee90 4001 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
38bde180
CW
4002 u32 flip_mask =
4003 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4004 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
38bde180 4005 int pipe, ret = IRQ_NONE;
a266c7d5 4006
2dd2a883
ID
4007 if (!intel_irqs_enabled(dev_priv))
4008 return IRQ_NONE;
4009
a266c7d5 4010 iir = I915_READ(IIR);
38bde180
CW
4011 do {
4012 bool irq_received = (iir & ~flip_mask) != 0;
8291ee90 4013 bool blc_event = false;
a266c7d5
CW
4014
4015 /* Can't rely on pipestat interrupt bit in iir as it might
4016 * have been cleared after the pipestat interrupt was received.
4017 * It doesn't set the bit in iir again, but it still produces
4018 * interrupts (for non-MSI).
4019 */
222c7f51 4020 spin_lock(&dev_priv->irq_lock);
a266c7d5 4021 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
aaecdf61 4022 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
a266c7d5 4023
055e393f 4024 for_each_pipe(dev_priv, pipe) {
a266c7d5
CW
4025 int reg = PIPESTAT(pipe);
4026 pipe_stats[pipe] = I915_READ(reg);
4027
38bde180 4028 /* Clear the PIPE*STAT regs before the IIR */
a266c7d5 4029 if (pipe_stats[pipe] & 0x8000ffff) {
a266c7d5 4030 I915_WRITE(reg, pipe_stats[pipe]);
38bde180 4031 irq_received = true;
a266c7d5
CW
4032 }
4033 }
222c7f51 4034 spin_unlock(&dev_priv->irq_lock);
a266c7d5
CW
4035
4036 if (!irq_received)
4037 break;
4038
a266c7d5 4039 /* Consume port. Then clear IIR or we'll miss events */
16c6c56b
VS
4040 if (I915_HAS_HOTPLUG(dev) &&
4041 iir & I915_DISPLAY_PORT_INTERRUPT)
4042 i9xx_hpd_irq_handler(dev);
a266c7d5 4043
38bde180 4044 I915_WRITE(IIR, iir & ~flip_mask);
a266c7d5
CW
4045 new_iir = I915_READ(IIR); /* Flush posted writes */
4046
a266c7d5 4047 if (iir & I915_USER_INTERRUPT)
74cdb337 4048 notify_ring(&dev_priv->ring[RCS]);
a266c7d5 4049
055e393f 4050 for_each_pipe(dev_priv, pipe) {
38bde180 4051 int plane = pipe;
3a77c4c4 4052 if (HAS_FBC(dev))
38bde180 4053 plane = !plane;
90a72f87 4054
8291ee90 4055 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
90a72f87
VS
4056 i915_handle_vblank(dev, plane, pipe, iir))
4057 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
a266c7d5
CW
4058
4059 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4060 blc_event = true;
4356d586
DV
4061
4062 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
277de95e 4063 i9xx_pipe_crc_irq_handler(dev, pipe);
2d9d2b0b 4064
1f7247c0
DV
4065 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4066 intel_cpu_fifo_underrun_irq_handler(dev_priv,
4067 pipe);
a266c7d5
CW
4068 }
4069
a266c7d5
CW
4070 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4071 intel_opregion_asle_intr(dev);
4072
4073 /* With MSI, interrupts are only generated when iir
4074 * transitions from zero to nonzero. If another bit got
4075 * set while we were handling the existing iir bits, then
4076 * we would never get another interrupt.
4077 *
4078 * This is fine on non-MSI as well, as if we hit this path
4079 * we avoid exiting the interrupt handler only to generate
4080 * another one.
4081 *
4082 * Note that for MSI this could cause a stray interrupt report
4083 * if an interrupt landed in the time between writing IIR and
4084 * the posting read. This should be rare enough to never
4085 * trigger the 99% of 100,000 interrupts test for disabling
4086 * stray interrupts.
4087 */
38bde180 4088 ret = IRQ_HANDLED;
a266c7d5 4089 iir = new_iir;
38bde180 4090 } while (iir & ~flip_mask);
a266c7d5
CW
4091
4092 return ret;
4093}
4094
4095static void i915_irq_uninstall(struct drm_device * dev)
4096{
2d1013dd 4097 struct drm_i915_private *dev_priv = dev->dev_private;
a266c7d5
CW
4098 int pipe;
4099
a266c7d5 4100 if (I915_HAS_HOTPLUG(dev)) {
0706f17c 4101 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
a266c7d5
CW
4102 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4103 }
4104
00d98ebd 4105 I915_WRITE16(HWSTAM, 0xffff);
055e393f 4106 for_each_pipe(dev_priv, pipe) {
55b39755 4107 /* Clear enable bits; then clear status bits */
a266c7d5 4108 I915_WRITE(PIPESTAT(pipe), 0);
55b39755
CW
4109 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4110 }
a266c7d5
CW
4111 I915_WRITE(IMR, 0xffffffff);
4112 I915_WRITE(IER, 0x0);
4113
a266c7d5
CW
4114 I915_WRITE(IIR, I915_READ(IIR));
4115}
4116
4117static void i965_irq_preinstall(struct drm_device * dev)
4118{
2d1013dd 4119 struct drm_i915_private *dev_priv = dev->dev_private;
a266c7d5
CW
4120 int pipe;
4121
0706f17c 4122 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
adca4730 4123 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
a266c7d5
CW
4124
4125 I915_WRITE(HWSTAM, 0xeffe);
055e393f 4126 for_each_pipe(dev_priv, pipe)
a266c7d5
CW
4127 I915_WRITE(PIPESTAT(pipe), 0);
4128 I915_WRITE(IMR, 0xffffffff);
4129 I915_WRITE(IER, 0x0);
4130 POSTING_READ(IER);
4131}
4132
4133static int i965_irq_postinstall(struct drm_device *dev)
4134{
2d1013dd 4135 struct drm_i915_private *dev_priv = dev->dev_private;
bbba0a97 4136 u32 enable_mask;
a266c7d5
CW
4137 u32 error_mask;
4138
a266c7d5 4139 /* Unmask the interrupts that we always want on. */
bbba0a97 4140 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
adca4730 4141 I915_DISPLAY_PORT_INTERRUPT |
bbba0a97
CW
4142 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4143 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4144 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4145 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4146 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4147
4148 enable_mask = ~dev_priv->irq_mask;
21ad8330
VS
4149 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4150 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
bbba0a97
CW
4151 enable_mask |= I915_USER_INTERRUPT;
4152
4153 if (IS_G4X(dev))
4154 enable_mask |= I915_BSD_USER_INTERRUPT;
a266c7d5 4155
b79480ba
DV
4156 /* Interrupt setup is already guaranteed to be single-threaded, this is
4157 * just to make the assert_spin_locked check happy. */
d6207435 4158 spin_lock_irq(&dev_priv->irq_lock);
755e9019
ID
4159 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4160 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4161 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
d6207435 4162 spin_unlock_irq(&dev_priv->irq_lock);
a266c7d5 4163
a266c7d5
CW
4164 /*
4165 * Enable some error detection, note the instruction error mask
4166 * bit is reserved, so we leave it masked.
4167 */
4168 if (IS_G4X(dev)) {
4169 error_mask = ~(GM45_ERROR_PAGE_TABLE |
4170 GM45_ERROR_MEM_PRIV |
4171 GM45_ERROR_CP_PRIV |
4172 I915_ERROR_MEMORY_REFRESH);
4173 } else {
4174 error_mask = ~(I915_ERROR_PAGE_TABLE |
4175 I915_ERROR_MEMORY_REFRESH);
4176 }
4177 I915_WRITE(EMR, error_mask);
4178
4179 I915_WRITE(IMR, dev_priv->irq_mask);
4180 I915_WRITE(IER, enable_mask);
4181 POSTING_READ(IER);
4182
0706f17c 4183 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
20afbda2
DV
4184 POSTING_READ(PORT_HOTPLUG_EN);
4185
f49e38dd 4186 i915_enable_asle_pipestat(dev);
20afbda2
DV
4187
4188 return 0;
4189}
4190
bac56d5b 4191static void i915_hpd_irq_setup(struct drm_device *dev)
20afbda2 4192{
2d1013dd 4193 struct drm_i915_private *dev_priv = dev->dev_private;
20afbda2
DV
4194 u32 hotplug_en;
4195
b5ea2d56
DV
4196 assert_spin_locked(&dev_priv->irq_lock);
4197
778eb334
VS
4198 /* Note HDMI and DP share hotplug bits */
4199 /* enable bits are the same for all generations */
0706f17c 4200 hotplug_en = intel_hpd_enabled_irqs(dev, hpd_mask_i915);
778eb334
VS
4201 /* Programming the CRT detection parameters tends
4202 to generate a spurious hotplug event about three
4203 seconds later. So just do it once.
4204 */
4205 if (IS_G4X(dev))
4206 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
778eb334
VS
4207 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4208
4209 /* Ignore TV since it's buggy */
0706f17c
EE
4210 i915_hotplug_interrupt_update_locked(dev_priv,
4211 (HOTPLUG_INT_EN_MASK
4212 | CRT_HOTPLUG_VOLTAGE_COMPARE_MASK),
4213 hotplug_en);
a266c7d5
CW
4214}
4215
ff1f525e 4216static irqreturn_t i965_irq_handler(int irq, void *arg)
a266c7d5 4217{
45a83f84 4218 struct drm_device *dev = arg;
2d1013dd 4219 struct drm_i915_private *dev_priv = dev->dev_private;
a266c7d5
CW
4220 u32 iir, new_iir;
4221 u32 pipe_stats[I915_MAX_PIPES];
a266c7d5 4222 int ret = IRQ_NONE, pipe;
21ad8330
VS
4223 u32 flip_mask =
4224 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4225 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
a266c7d5 4226
2dd2a883
ID
4227 if (!intel_irqs_enabled(dev_priv))
4228 return IRQ_NONE;
4229
a266c7d5
CW
4230 iir = I915_READ(IIR);
4231
a266c7d5 4232 for (;;) {
501e01d7 4233 bool irq_received = (iir & ~flip_mask) != 0;
2c8ba29f
CW
4234 bool blc_event = false;
4235
a266c7d5
CW
4236 /* Can't rely on pipestat interrupt bit in iir as it might
4237 * have been cleared after the pipestat interrupt was received.
4238 * It doesn't set the bit in iir again, but it still produces
4239 * interrupts (for non-MSI).
4240 */
222c7f51 4241 spin_lock(&dev_priv->irq_lock);
a266c7d5 4242 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
aaecdf61 4243 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
a266c7d5 4244
055e393f 4245 for_each_pipe(dev_priv, pipe) {
a266c7d5
CW
4246 int reg = PIPESTAT(pipe);
4247 pipe_stats[pipe] = I915_READ(reg);
4248
4249 /*
4250 * Clear the PIPE*STAT regs before the IIR
4251 */
4252 if (pipe_stats[pipe] & 0x8000ffff) {
a266c7d5 4253 I915_WRITE(reg, pipe_stats[pipe]);
501e01d7 4254 irq_received = true;
a266c7d5
CW
4255 }
4256 }
222c7f51 4257 spin_unlock(&dev_priv->irq_lock);
a266c7d5
CW
4258
4259 if (!irq_received)
4260 break;
4261
4262 ret = IRQ_HANDLED;
4263
4264 /* Consume port. Then clear IIR or we'll miss events */
16c6c56b
VS
4265 if (iir & I915_DISPLAY_PORT_INTERRUPT)
4266 i9xx_hpd_irq_handler(dev);
a266c7d5 4267
21ad8330 4268 I915_WRITE(IIR, iir & ~flip_mask);
a266c7d5
CW
4269 new_iir = I915_READ(IIR); /* Flush posted writes */
4270
a266c7d5 4271 if (iir & I915_USER_INTERRUPT)
74cdb337 4272 notify_ring(&dev_priv->ring[RCS]);
a266c7d5 4273 if (iir & I915_BSD_USER_INTERRUPT)
74cdb337 4274 notify_ring(&dev_priv->ring[VCS]);
a266c7d5 4275
055e393f 4276 for_each_pipe(dev_priv, pipe) {
2c8ba29f 4277 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
90a72f87
VS
4278 i915_handle_vblank(dev, pipe, pipe, iir))
4279 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
a266c7d5
CW
4280
4281 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4282 blc_event = true;
4356d586
DV
4283
4284 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
277de95e 4285 i9xx_pipe_crc_irq_handler(dev, pipe);
a266c7d5 4286
1f7247c0
DV
4287 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4288 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2d9d2b0b 4289 }
a266c7d5
CW
4290
4291 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4292 intel_opregion_asle_intr(dev);
4293
515ac2bb
DV
4294 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4295 gmbus_irq_handler(dev);
4296
a266c7d5
CW
4297 /* With MSI, interrupts are only generated when iir
4298 * transitions from zero to nonzero. If another bit got
4299 * set while we were handling the existing iir bits, then
4300 * we would never get another interrupt.
4301 *
4302 * This is fine on non-MSI as well, as if we hit this path
4303 * we avoid exiting the interrupt handler only to generate
4304 * another one.
4305 *
4306 * Note that for MSI this could cause a stray interrupt report
4307 * if an interrupt landed in the time between writing IIR and
4308 * the posting read. This should be rare enough to never
4309 * trigger the 99% of 100,000 interrupts test for disabling
4310 * stray interrupts.
4311 */
4312 iir = new_iir;
4313 }
4314
4315 return ret;
4316}
4317
4318static void i965_irq_uninstall(struct drm_device * dev)
4319{
2d1013dd 4320 struct drm_i915_private *dev_priv = dev->dev_private;
a266c7d5
CW
4321 int pipe;
4322
4323 if (!dev_priv)
4324 return;
4325
0706f17c 4326 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
adca4730 4327 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
a266c7d5
CW
4328
4329 I915_WRITE(HWSTAM, 0xffffffff);
055e393f 4330 for_each_pipe(dev_priv, pipe)
a266c7d5
CW
4331 I915_WRITE(PIPESTAT(pipe), 0);
4332 I915_WRITE(IMR, 0xffffffff);
4333 I915_WRITE(IER, 0x0);
4334
055e393f 4335 for_each_pipe(dev_priv, pipe)
a266c7d5
CW
4336 I915_WRITE(PIPESTAT(pipe),
4337 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4338 I915_WRITE(IIR, I915_READ(IIR));
4339}
4340
fca52a55
DV
4341/**
4342 * intel_irq_init - initializes irq support
4343 * @dev_priv: i915 device instance
4344 *
4345 * This function initializes all the irq support including work items, timers
4346 * and all the vtables. It does not setup the interrupt itself though.
4347 */
b963291c 4348void intel_irq_init(struct drm_i915_private *dev_priv)
f71d4af4 4349{
b963291c 4350 struct drm_device *dev = dev_priv->dev;
8b2e326d 4351
77913b39
JN
4352 intel_hpd_init_work(dev_priv);
4353
c6a828d3 4354 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
a4da4fa4 4355 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
8b2e326d 4356
a6706b45 4357 /* Let's track the enabled rps events */
b963291c 4358 if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
6c65a587 4359 /* WaGsvRC0ResidencyMethod:vlv */
6f4b12f8 4360 dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
31685c25
D
4361 else
4362 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
a6706b45 4363
737b1506
CW
4364 INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
4365 i915_hangcheck_elapsed);
61bac78e 4366
97a19a24 4367 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
9ee32fea 4368
b963291c 4369 if (IS_GEN2(dev_priv)) {
4cdb83ec
VS
4370 dev->max_vblank_count = 0;
4371 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
b963291c 4372 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
f71d4af4
JB
4373 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4374 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
391f75e2
VS
4375 } else {
4376 dev->driver->get_vblank_counter = i915_get_vblank_counter;
4377 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
f71d4af4
JB
4378 }
4379
21da2700
VS
4380 /*
4381 * Opt out of the vblank disable timer on everything except gen2.
4382 * Gen2 doesn't have a hardware frame counter and so depends on
4383 * vblank interrupts to produce sane vblank seuquence numbers.
4384 */
b963291c 4385 if (!IS_GEN2(dev_priv))
21da2700
VS
4386 dev->vblank_disable_immediate = true;
4387
f3a5c3f6
DV
4388 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4389 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
f71d4af4 4390
b963291c 4391 if (IS_CHERRYVIEW(dev_priv)) {
43f328d7
VS
4392 dev->driver->irq_handler = cherryview_irq_handler;
4393 dev->driver->irq_preinstall = cherryview_irq_preinstall;
4394 dev->driver->irq_postinstall = cherryview_irq_postinstall;
4395 dev->driver->irq_uninstall = cherryview_irq_uninstall;
4396 dev->driver->enable_vblank = valleyview_enable_vblank;
4397 dev->driver->disable_vblank = valleyview_disable_vblank;
4398 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
b963291c 4399 } else if (IS_VALLEYVIEW(dev_priv)) {
7e231dbe
JB
4400 dev->driver->irq_handler = valleyview_irq_handler;
4401 dev->driver->irq_preinstall = valleyview_irq_preinstall;
4402 dev->driver->irq_postinstall = valleyview_irq_postinstall;
4403 dev->driver->irq_uninstall = valleyview_irq_uninstall;
4404 dev->driver->enable_vblank = valleyview_enable_vblank;
4405 dev->driver->disable_vblank = valleyview_disable_vblank;
fa00abe0 4406 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
b963291c 4407 } else if (INTEL_INFO(dev_priv)->gen >= 8) {
abd58f01 4408 dev->driver->irq_handler = gen8_irq_handler;
723761b8 4409 dev->driver->irq_preinstall = gen8_irq_reset;
abd58f01
BW
4410 dev->driver->irq_postinstall = gen8_irq_postinstall;
4411 dev->driver->irq_uninstall = gen8_irq_uninstall;
4412 dev->driver->enable_vblank = gen8_enable_vblank;
4413 dev->driver->disable_vblank = gen8_disable_vblank;
6dbf30ce 4414 if (IS_BROXTON(dev))
e0a20ad7 4415 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
6dbf30ce
VS
4416 else if (HAS_PCH_SPT(dev))
4417 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
4418 else
3a3b3c7d 4419 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
f71d4af4
JB
4420 } else if (HAS_PCH_SPLIT(dev)) {
4421 dev->driver->irq_handler = ironlake_irq_handler;
723761b8 4422 dev->driver->irq_preinstall = ironlake_irq_reset;
f71d4af4
JB
4423 dev->driver->irq_postinstall = ironlake_irq_postinstall;
4424 dev->driver->irq_uninstall = ironlake_irq_uninstall;
4425 dev->driver->enable_vblank = ironlake_enable_vblank;
4426 dev->driver->disable_vblank = ironlake_disable_vblank;
23bb4cb5 4427 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
f71d4af4 4428 } else {
b963291c 4429 if (INTEL_INFO(dev_priv)->gen == 2) {
c2798b19
CW
4430 dev->driver->irq_preinstall = i8xx_irq_preinstall;
4431 dev->driver->irq_postinstall = i8xx_irq_postinstall;
4432 dev->driver->irq_handler = i8xx_irq_handler;
4433 dev->driver->irq_uninstall = i8xx_irq_uninstall;
b963291c 4434 } else if (INTEL_INFO(dev_priv)->gen == 3) {
a266c7d5
CW
4435 dev->driver->irq_preinstall = i915_irq_preinstall;
4436 dev->driver->irq_postinstall = i915_irq_postinstall;
4437 dev->driver->irq_uninstall = i915_irq_uninstall;
4438 dev->driver->irq_handler = i915_irq_handler;
c2798b19 4439 } else {
a266c7d5
CW
4440 dev->driver->irq_preinstall = i965_irq_preinstall;
4441 dev->driver->irq_postinstall = i965_irq_postinstall;
4442 dev->driver->irq_uninstall = i965_irq_uninstall;
4443 dev->driver->irq_handler = i965_irq_handler;
c2798b19 4444 }
778eb334
VS
4445 if (I915_HAS_HOTPLUG(dev_priv))
4446 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
f71d4af4
JB
4447 dev->driver->enable_vblank = i915_enable_vblank;
4448 dev->driver->disable_vblank = i915_disable_vblank;
4449 }
4450}
20afbda2 4451
fca52a55
DV
4452/**
4453 * intel_irq_install - enables the hardware interrupt
4454 * @dev_priv: i915 device instance
4455 *
4456 * This function enables the hardware interrupt handling, but leaves the hotplug
4457 * handling still disabled. It is called after intel_irq_init().
4458 *
4459 * In the driver load and resume code we need working interrupts in a few places
4460 * but don't want to deal with the hassle of concurrent probe and hotplug
4461 * workers. Hence the split into this two-stage approach.
4462 */
2aeb7d3a
DV
4463int intel_irq_install(struct drm_i915_private *dev_priv)
4464{
4465 /*
4466 * We enable some interrupt sources in our postinstall hooks, so mark
4467 * interrupts as enabled _before_ actually enabling them to avoid
4468 * special cases in our ordering checks.
4469 */
4470 dev_priv->pm.irqs_enabled = true;
4471
4472 return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
4473}
4474
fca52a55
DV
4475/**
4476 * intel_irq_uninstall - finilizes all irq handling
4477 * @dev_priv: i915 device instance
4478 *
4479 * This stops interrupt and hotplug handling and unregisters and frees all
4480 * resources acquired in the init functions.
4481 */
2aeb7d3a
DV
4482void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4483{
4484 drm_irq_uninstall(dev_priv->dev);
4485 intel_hpd_cancel_work(dev_priv);
4486 dev_priv->pm.irqs_enabled = false;
4487}
4488
fca52a55
DV
4489/**
4490 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4491 * @dev_priv: i915 device instance
4492 *
4493 * This function is used to disable interrupts at runtime, both in the runtime
4494 * pm and the system suspend/resume code.
4495 */
b963291c 4496void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
c67a470b 4497{
b963291c 4498 dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
2aeb7d3a 4499 dev_priv->pm.irqs_enabled = false;
2dd2a883 4500 synchronize_irq(dev_priv->dev->irq);
c67a470b
PZ
4501}
4502
fca52a55
DV
4503/**
4504 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4505 * @dev_priv: i915 device instance
4506 *
4507 * This function is used to enable interrupts at runtime, both in the runtime
4508 * pm and the system suspend/resume code.
4509 */
b963291c 4510void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
c67a470b 4511{
2aeb7d3a 4512 dev_priv->pm.irqs_enabled = true;
b963291c
DV
4513 dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
4514 dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
c67a470b 4515}
This page took 1.305279 seconds and 5 git commands to generate.