drm/i915/gen8: Tidy display interrupt processing
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_irq.c
CommitLineData
0d6aa60b 1/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
1da177e4 2 */
0d6aa60b 3/*
1da177e4
LT
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
bc54fd1a
DA
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
0d6aa60b 27 */
1da177e4 28
a70491cc
JP
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
63eeaf38 31#include <linux/sysrq.h>
5a0e3ad6 32#include <linux/slab.h>
b2c88f5b 33#include <linux/circ_buf.h>
760285e7
DH
34#include <drm/drmP.h>
35#include <drm/i915_drm.h>
1da177e4 36#include "i915_drv.h"
1c5d22f7 37#include "i915_trace.h"
79e53945 38#include "intel_drv.h"
1da177e4 39
fca52a55
DV
40/**
41 * DOC: interrupt handling
42 *
43 * These functions provide the basic support for enabling and disabling the
44 * interrupt handling support. There's a lot more functionality in i915_irq.c
45 * and related files, but that will be described in separate chapters.
46 */
47
e4ce95aa
VS
48static const u32 hpd_ilk[HPD_NUM_PINS] = {
49 [HPD_PORT_A] = DE_DP_A_HOTPLUG,
50};
51
23bb4cb5
VS
52static const u32 hpd_ivb[HPD_NUM_PINS] = {
53 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
54};
55
3a3b3c7d
VS
56static const u32 hpd_bdw[HPD_NUM_PINS] = {
57 [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
58};
59
7c7e10db 60static const u32 hpd_ibx[HPD_NUM_PINS] = {
e5868a31
EE
61 [HPD_CRT] = SDE_CRT_HOTPLUG,
62 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
63 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
64 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
65 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
66};
67
7c7e10db 68static const u32 hpd_cpt[HPD_NUM_PINS] = {
e5868a31 69 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
73c352a2 70 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
e5868a31
EE
71 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
72 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
73 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
74};
75
26951caf 76static const u32 hpd_spt[HPD_NUM_PINS] = {
74c0b395 77 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
26951caf
XZ
78 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
79 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
80 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
81 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
82};
83
7c7e10db 84static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
e5868a31
EE
85 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
86 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
87 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
88 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
89 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
90 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
91};
92
7c7e10db 93static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
e5868a31
EE
94 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
95 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
96 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
97 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
98 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
99 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
100};
101
4bca26d0 102static const u32 hpd_status_i915[HPD_NUM_PINS] = {
e5868a31
EE
103 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
104 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
105 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
106 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
107 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
108 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
109};
110
e0a20ad7
SS
111/* BXT hpd list */
112static const u32 hpd_bxt[HPD_NUM_PINS] = {
7f3561be 113 [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
e0a20ad7
SS
114 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
115 [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
116};
117
5c502442 118/* IIR can theoretically queue up two events. Be paranoid. */
f86f3fb0 119#define GEN8_IRQ_RESET_NDX(type, which) do { \
5c502442
PZ
120 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
121 POSTING_READ(GEN8_##type##_IMR(which)); \
122 I915_WRITE(GEN8_##type##_IER(which), 0); \
123 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
124 POSTING_READ(GEN8_##type##_IIR(which)); \
125 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
126 POSTING_READ(GEN8_##type##_IIR(which)); \
127} while (0)
128
f86f3fb0 129#define GEN5_IRQ_RESET(type) do { \
a9d356a6 130 I915_WRITE(type##IMR, 0xffffffff); \
5c502442 131 POSTING_READ(type##IMR); \
a9d356a6 132 I915_WRITE(type##IER, 0); \
5c502442
PZ
133 I915_WRITE(type##IIR, 0xffffffff); \
134 POSTING_READ(type##IIR); \
135 I915_WRITE(type##IIR, 0xffffffff); \
136 POSTING_READ(type##IIR); \
a9d356a6
PZ
137} while (0)
138
337ba017
PZ
139/*
140 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
141 */
f0f59a00
VS
142static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv,
143 i915_reg_t reg)
b51a2842
VS
144{
145 u32 val = I915_READ(reg);
146
147 if (val == 0)
148 return;
149
150 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
f0f59a00 151 i915_mmio_reg_offset(reg), val);
b51a2842
VS
152 I915_WRITE(reg, 0xffffffff);
153 POSTING_READ(reg);
154 I915_WRITE(reg, 0xffffffff);
155 POSTING_READ(reg);
156}
337ba017 157
35079899 158#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
b51a2842 159 gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
35079899 160 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
7d1bd539
VS
161 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
162 POSTING_READ(GEN8_##type##_IMR(which)); \
35079899
PZ
163} while (0)
164
165#define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
b51a2842 166 gen5_assert_iir_is_zero(dev_priv, type##IIR); \
35079899 167 I915_WRITE(type##IER, (ier_val)); \
7d1bd539
VS
168 I915_WRITE(type##IMR, (imr_val)); \
169 POSTING_READ(type##IMR); \
35079899
PZ
170} while (0)
171
c9a9a268
ID
172static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
173
0706f17c
EE
174/* For display hotplug interrupt */
175static inline void
176i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
177 uint32_t mask,
178 uint32_t bits)
179{
180 uint32_t val;
181
182 assert_spin_locked(&dev_priv->irq_lock);
183 WARN_ON(bits & ~mask);
184
185 val = I915_READ(PORT_HOTPLUG_EN);
186 val &= ~mask;
187 val |= bits;
188 I915_WRITE(PORT_HOTPLUG_EN, val);
189}
190
191/**
192 * i915_hotplug_interrupt_update - update hotplug interrupt enable
193 * @dev_priv: driver private
194 * @mask: bits to update
195 * @bits: bits to enable
196 * NOTE: the HPD enable bits are modified both inside and outside
197 * of an interrupt context. To avoid that read-modify-write cycles
198 * interfer, these bits are protected by a spinlock. Since this
199 * function is usually not called from a context where the lock is
200 * held already, this function acquires the lock itself. A non-locking
201 * version is also available.
202 */
203void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
204 uint32_t mask,
205 uint32_t bits)
206{
207 spin_lock_irq(&dev_priv->irq_lock);
208 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
209 spin_unlock_irq(&dev_priv->irq_lock);
210}
211
d9dc34f1
VS
212/**
213 * ilk_update_display_irq - update DEIMR
214 * @dev_priv: driver private
215 * @interrupt_mask: mask of interrupt bits to update
216 * @enabled_irq_mask: mask of interrupt bits to enable
217 */
fbdedaea
VS
218void ilk_update_display_irq(struct drm_i915_private *dev_priv,
219 uint32_t interrupt_mask,
220 uint32_t enabled_irq_mask)
036a4a7d 221{
d9dc34f1
VS
222 uint32_t new_val;
223
4bc9d430
DV
224 assert_spin_locked(&dev_priv->irq_lock);
225
d9dc34f1
VS
226 WARN_ON(enabled_irq_mask & ~interrupt_mask);
227
9df7575f 228 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
c67a470b 229 return;
c67a470b 230
d9dc34f1
VS
231 new_val = dev_priv->irq_mask;
232 new_val &= ~interrupt_mask;
233 new_val |= (~enabled_irq_mask & interrupt_mask);
234
235 if (new_val != dev_priv->irq_mask) {
236 dev_priv->irq_mask = new_val;
1ec14ad3 237 I915_WRITE(DEIMR, dev_priv->irq_mask);
3143a2bf 238 POSTING_READ(DEIMR);
036a4a7d
ZW
239 }
240}
241
43eaea13
PZ
242/**
243 * ilk_update_gt_irq - update GTIMR
244 * @dev_priv: driver private
245 * @interrupt_mask: mask of interrupt bits to update
246 * @enabled_irq_mask: mask of interrupt bits to enable
247 */
248static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
249 uint32_t interrupt_mask,
250 uint32_t enabled_irq_mask)
251{
252 assert_spin_locked(&dev_priv->irq_lock);
253
15a17aae
DV
254 WARN_ON(enabled_irq_mask & ~interrupt_mask);
255
9df7575f 256 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
c67a470b 257 return;
c67a470b 258
43eaea13
PZ
259 dev_priv->gt_irq_mask &= ~interrupt_mask;
260 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
261 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
262 POSTING_READ(GTIMR);
263}
264
480c8033 265void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
43eaea13
PZ
266{
267 ilk_update_gt_irq(dev_priv, mask, mask);
268}
269
480c8033 270void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
43eaea13
PZ
271{
272 ilk_update_gt_irq(dev_priv, mask, 0);
273}
274
f0f59a00 275static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
b900b949
ID
276{
277 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
278}
279
f0f59a00 280static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv)
a72fbc3a
ID
281{
282 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
283}
284
f0f59a00 285static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
b900b949
ID
286{
287 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
288}
289
edbfdb45 290/**
81fd874e
VS
291 * snb_update_pm_irq - update GEN6_PMIMR
292 * @dev_priv: driver private
293 * @interrupt_mask: mask of interrupt bits to update
294 * @enabled_irq_mask: mask of interrupt bits to enable
295 */
edbfdb45
PZ
296static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
297 uint32_t interrupt_mask,
298 uint32_t enabled_irq_mask)
299{
605cd25b 300 uint32_t new_val;
edbfdb45 301
15a17aae
DV
302 WARN_ON(enabled_irq_mask & ~interrupt_mask);
303
edbfdb45
PZ
304 assert_spin_locked(&dev_priv->irq_lock);
305
605cd25b 306 new_val = dev_priv->pm_irq_mask;
f52ecbcf
PZ
307 new_val &= ~interrupt_mask;
308 new_val |= (~enabled_irq_mask & interrupt_mask);
309
605cd25b
PZ
310 if (new_val != dev_priv->pm_irq_mask) {
311 dev_priv->pm_irq_mask = new_val;
a72fbc3a
ID
312 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
313 POSTING_READ(gen6_pm_imr(dev_priv));
f52ecbcf 314 }
edbfdb45
PZ
315}
316
480c8033 317void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
edbfdb45 318{
9939fba2
ID
319 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
320 return;
321
edbfdb45
PZ
322 snb_update_pm_irq(dev_priv, mask, mask);
323}
324
9939fba2
ID
325static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
326 uint32_t mask)
edbfdb45
PZ
327{
328 snb_update_pm_irq(dev_priv, mask, 0);
329}
330
9939fba2
ID
331void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
332{
333 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
334 return;
335
336 __gen6_disable_pm_irq(dev_priv, mask);
337}
338
3cc134e3
ID
339void gen6_reset_rps_interrupts(struct drm_device *dev)
340{
341 struct drm_i915_private *dev_priv = dev->dev_private;
f0f59a00 342 i915_reg_t reg = gen6_pm_iir(dev_priv);
3cc134e3
ID
343
344 spin_lock_irq(&dev_priv->irq_lock);
345 I915_WRITE(reg, dev_priv->pm_rps_events);
346 I915_WRITE(reg, dev_priv->pm_rps_events);
347 POSTING_READ(reg);
096fad9e 348 dev_priv->rps.pm_iir = 0;
3cc134e3
ID
349 spin_unlock_irq(&dev_priv->irq_lock);
350}
351
b900b949
ID
352void gen6_enable_rps_interrupts(struct drm_device *dev)
353{
354 struct drm_i915_private *dev_priv = dev->dev_private;
355
356 spin_lock_irq(&dev_priv->irq_lock);
78e68d36 357
b900b949 358 WARN_ON(dev_priv->rps.pm_iir);
3cc134e3 359 WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
d4d70aa5 360 dev_priv->rps.interrupts_enabled = true;
78e68d36
ID
361 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
362 dev_priv->pm_rps_events);
b900b949 363 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
78e68d36 364
b900b949
ID
365 spin_unlock_irq(&dev_priv->irq_lock);
366}
367
59d02a1f
ID
368u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
369{
370 /*
f24eeb19 371 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
59d02a1f 372 * if GEN6_PM_UP_EI_EXPIRED is masked.
f24eeb19
ID
373 *
374 * TODO: verify if this can be reproduced on VLV,CHV.
59d02a1f
ID
375 */
376 if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
377 mask &= ~GEN6_PM_RP_UP_EI_EXPIRED;
378
379 if (INTEL_INFO(dev_priv)->gen >= 8)
380 mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
381
382 return mask;
383}
384
b900b949
ID
385void gen6_disable_rps_interrupts(struct drm_device *dev)
386{
387 struct drm_i915_private *dev_priv = dev->dev_private;
388
d4d70aa5
ID
389 spin_lock_irq(&dev_priv->irq_lock);
390 dev_priv->rps.interrupts_enabled = false;
391 spin_unlock_irq(&dev_priv->irq_lock);
392
393 cancel_work_sync(&dev_priv->rps.work);
394
9939fba2
ID
395 spin_lock_irq(&dev_priv->irq_lock);
396
59d02a1f 397 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
9939fba2
ID
398
399 __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
b900b949
ID
400 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
401 ~dev_priv->pm_rps_events);
58072ccb
ID
402
403 spin_unlock_irq(&dev_priv->irq_lock);
404
405 synchronize_irq(dev->irq);
b900b949
ID
406}
407
3a3b3c7d 408/**
81fd874e
VS
409 * bdw_update_port_irq - update DE port interrupt
410 * @dev_priv: driver private
411 * @interrupt_mask: mask of interrupt bits to update
412 * @enabled_irq_mask: mask of interrupt bits to enable
413 */
3a3b3c7d
VS
414static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
415 uint32_t interrupt_mask,
416 uint32_t enabled_irq_mask)
417{
418 uint32_t new_val;
419 uint32_t old_val;
420
421 assert_spin_locked(&dev_priv->irq_lock);
422
423 WARN_ON(enabled_irq_mask & ~interrupt_mask);
424
425 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
426 return;
427
428 old_val = I915_READ(GEN8_DE_PORT_IMR);
429
430 new_val = old_val;
431 new_val &= ~interrupt_mask;
432 new_val |= (~enabled_irq_mask & interrupt_mask);
433
434 if (new_val != old_val) {
435 I915_WRITE(GEN8_DE_PORT_IMR, new_val);
436 POSTING_READ(GEN8_DE_PORT_IMR);
437 }
438}
439
013d3752
VS
440/**
441 * bdw_update_pipe_irq - update DE pipe interrupt
442 * @dev_priv: driver private
443 * @pipe: pipe whose interrupt to update
444 * @interrupt_mask: mask of interrupt bits to update
445 * @enabled_irq_mask: mask of interrupt bits to enable
446 */
447void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
448 enum pipe pipe,
449 uint32_t interrupt_mask,
450 uint32_t enabled_irq_mask)
451{
452 uint32_t new_val;
453
454 assert_spin_locked(&dev_priv->irq_lock);
455
456 WARN_ON(enabled_irq_mask & ~interrupt_mask);
457
458 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
459 return;
460
461 new_val = dev_priv->de_irq_mask[pipe];
462 new_val &= ~interrupt_mask;
463 new_val |= (~enabled_irq_mask & interrupt_mask);
464
465 if (new_val != dev_priv->de_irq_mask[pipe]) {
466 dev_priv->de_irq_mask[pipe] = new_val;
467 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
468 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
469 }
470}
471
fee884ed
DV
472/**
473 * ibx_display_interrupt_update - update SDEIMR
474 * @dev_priv: driver private
475 * @interrupt_mask: mask of interrupt bits to update
476 * @enabled_irq_mask: mask of interrupt bits to enable
477 */
47339cd9
DV
478void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
479 uint32_t interrupt_mask,
480 uint32_t enabled_irq_mask)
fee884ed
DV
481{
482 uint32_t sdeimr = I915_READ(SDEIMR);
483 sdeimr &= ~interrupt_mask;
484 sdeimr |= (~enabled_irq_mask & interrupt_mask);
485
15a17aae
DV
486 WARN_ON(enabled_irq_mask & ~interrupt_mask);
487
fee884ed
DV
488 assert_spin_locked(&dev_priv->irq_lock);
489
9df7575f 490 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
c67a470b 491 return;
c67a470b 492
fee884ed
DV
493 I915_WRITE(SDEIMR, sdeimr);
494 POSTING_READ(SDEIMR);
495}
8664281b 496
b5ea642a 497static void
755e9019
ID
498__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
499 u32 enable_mask, u32 status_mask)
7c463586 500{
f0f59a00 501 i915_reg_t reg = PIPESTAT(pipe);
755e9019 502 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
7c463586 503
b79480ba 504 assert_spin_locked(&dev_priv->irq_lock);
d518ce50 505 WARN_ON(!intel_irqs_enabled(dev_priv));
b79480ba 506
04feced9
VS
507 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
508 status_mask & ~PIPESTAT_INT_STATUS_MASK,
509 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
510 pipe_name(pipe), enable_mask, status_mask))
755e9019
ID
511 return;
512
513 if ((pipestat & enable_mask) == enable_mask)
46c06a30
VS
514 return;
515
91d181dd
ID
516 dev_priv->pipestat_irq_mask[pipe] |= status_mask;
517
46c06a30 518 /* Enable the interrupt, clear any pending status */
755e9019 519 pipestat |= enable_mask | status_mask;
46c06a30
VS
520 I915_WRITE(reg, pipestat);
521 POSTING_READ(reg);
7c463586
KP
522}
523
b5ea642a 524static void
755e9019
ID
525__i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
526 u32 enable_mask, u32 status_mask)
7c463586 527{
f0f59a00 528 i915_reg_t reg = PIPESTAT(pipe);
755e9019 529 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
7c463586 530
b79480ba 531 assert_spin_locked(&dev_priv->irq_lock);
d518ce50 532 WARN_ON(!intel_irqs_enabled(dev_priv));
b79480ba 533
04feced9
VS
534 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
535 status_mask & ~PIPESTAT_INT_STATUS_MASK,
536 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
537 pipe_name(pipe), enable_mask, status_mask))
46c06a30
VS
538 return;
539
755e9019
ID
540 if ((pipestat & enable_mask) == 0)
541 return;
542
91d181dd
ID
543 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
544
755e9019 545 pipestat &= ~enable_mask;
46c06a30
VS
546 I915_WRITE(reg, pipestat);
547 POSTING_READ(reg);
7c463586
KP
548}
549
10c59c51
ID
550static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
551{
552 u32 enable_mask = status_mask << 16;
553
554 /*
724a6905
VS
555 * On pipe A we don't support the PSR interrupt yet,
556 * on pipe B and C the same bit MBZ.
10c59c51
ID
557 */
558 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
559 return 0;
724a6905
VS
560 /*
561 * On pipe B and C we don't support the PSR interrupt yet, on pipe
562 * A the same bit is for perf counters which we don't use either.
563 */
564 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
565 return 0;
10c59c51
ID
566
567 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
568 SPRITE0_FLIP_DONE_INT_EN_VLV |
569 SPRITE1_FLIP_DONE_INT_EN_VLV);
570 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
571 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
572 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
573 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
574
575 return enable_mask;
576}
577
755e9019
ID
578void
579i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
580 u32 status_mask)
581{
582 u32 enable_mask;
583
666a4537 584 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
10c59c51
ID
585 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
586 status_mask);
587 else
588 enable_mask = status_mask << 16;
755e9019
ID
589 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
590}
591
592void
593i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
594 u32 status_mask)
595{
596 u32 enable_mask;
597
666a4537 598 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
10c59c51
ID
599 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
600 status_mask);
601 else
602 enable_mask = status_mask << 16;
755e9019
ID
603 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
604}
605
01c66889 606/**
f49e38dd 607 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
468f9d29 608 * @dev: drm device
01c66889 609 */
f49e38dd 610static void i915_enable_asle_pipestat(struct drm_device *dev)
01c66889 611{
2d1013dd 612 struct drm_i915_private *dev_priv = dev->dev_private;
1ec14ad3 613
f49e38dd
JN
614 if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
615 return;
616
13321786 617 spin_lock_irq(&dev_priv->irq_lock);
01c66889 618
755e9019 619 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
f898780b 620 if (INTEL_INFO(dev)->gen >= 4)
3b6c42e8 621 i915_enable_pipestat(dev_priv, PIPE_A,
755e9019 622 PIPE_LEGACY_BLC_EVENT_STATUS);
1ec14ad3 623
13321786 624 spin_unlock_irq(&dev_priv->irq_lock);
01c66889
ZY
625}
626
f75f3746
VS
627/*
628 * This timing diagram depicts the video signal in and
629 * around the vertical blanking period.
630 *
631 * Assumptions about the fictitious mode used in this example:
632 * vblank_start >= 3
633 * vsync_start = vblank_start + 1
634 * vsync_end = vblank_start + 2
635 * vtotal = vblank_start + 3
636 *
637 * start of vblank:
638 * latch double buffered registers
639 * increment frame counter (ctg+)
640 * generate start of vblank interrupt (gen4+)
641 * |
642 * | frame start:
643 * | generate frame start interrupt (aka. vblank interrupt) (gmch)
644 * | may be shifted forward 1-3 extra lines via PIPECONF
645 * | |
646 * | | start of vsync:
647 * | | generate vsync interrupt
648 * | | |
649 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
650 * . \hs/ . \hs/ \hs/ \hs/ . \hs/
651 * ----va---> <-----------------vb--------------------> <--------va-------------
652 * | | <----vs-----> |
653 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
654 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
655 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
656 * | | |
657 * last visible pixel first visible pixel
658 * | increment frame counter (gen3/4)
659 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
660 *
661 * x = horizontal active
662 * _ = horizontal blanking
663 * hs = horizontal sync
664 * va = vertical active
665 * vb = vertical blanking
666 * vs = vertical sync
667 * vbs = vblank_start (number)
668 *
669 * Summary:
670 * - most events happen at the start of horizontal sync
671 * - frame start happens at the start of horizontal blank, 1-4 lines
672 * (depending on PIPECONF settings) after the start of vblank
673 * - gen3/4 pixel and frame counter are synchronized with the start
674 * of horizontal active on the first line of vertical active
675 */
676
88e72717 677static u32 i8xx_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
4cdb83ec
VS
678{
679 /* Gen2 doesn't have a hardware frame counter */
680 return 0;
681}
682
42f52ef8
KP
683/* Called from drm generic code, passed a 'crtc', which
684 * we use as a pipe index
685 */
88e72717 686static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
0a3e67a4 687{
2d1013dd 688 struct drm_i915_private *dev_priv = dev->dev_private;
f0f59a00 689 i915_reg_t high_frame, low_frame;
0b2a8e09 690 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
f3a5c3f6
DV
691 struct intel_crtc *intel_crtc =
692 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
fc467a22 693 const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
0a3e67a4 694
f3a5c3f6
DV
695 htotal = mode->crtc_htotal;
696 hsync_start = mode->crtc_hsync_start;
697 vbl_start = mode->crtc_vblank_start;
698 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
699 vbl_start = DIV_ROUND_UP(vbl_start, 2);
391f75e2 700
0b2a8e09
VS
701 /* Convert to pixel count */
702 vbl_start *= htotal;
703
704 /* Start of vblank event occurs at start of hsync */
705 vbl_start -= htotal - hsync_start;
706
9db4a9c7
JB
707 high_frame = PIPEFRAME(pipe);
708 low_frame = PIPEFRAMEPIXEL(pipe);
5eddb70b 709
0a3e67a4
JB
710 /*
711 * High & low register fields aren't synchronized, so make sure
712 * we get a low value that's stable across two reads of the high
713 * register.
714 */
715 do {
5eddb70b 716 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
391f75e2 717 low = I915_READ(low_frame);
5eddb70b 718 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
0a3e67a4
JB
719 } while (high1 != high2);
720
5eddb70b 721 high1 >>= PIPE_FRAME_HIGH_SHIFT;
391f75e2 722 pixel = low & PIPE_PIXEL_MASK;
5eddb70b 723 low >>= PIPE_FRAME_LOW_SHIFT;
391f75e2
VS
724
725 /*
726 * The frame counter increments at beginning of active.
727 * Cook up a vblank counter by also checking the pixel
728 * counter against vblank start.
729 */
edc08d0a 730 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
0a3e67a4
JB
731}
732
974e59ba 733static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
9880b7a5 734{
2d1013dd 735 struct drm_i915_private *dev_priv = dev->dev_private;
9880b7a5 736
649636ef 737 return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
9880b7a5
JB
738}
739
75aa3f63 740/* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
a225f079
VS
741static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
742{
743 struct drm_device *dev = crtc->base.dev;
744 struct drm_i915_private *dev_priv = dev->dev_private;
fc467a22 745 const struct drm_display_mode *mode = &crtc->base.hwmode;
a225f079 746 enum pipe pipe = crtc->pipe;
80715b2f 747 int position, vtotal;
a225f079 748
80715b2f 749 vtotal = mode->crtc_vtotal;
a225f079
VS
750 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
751 vtotal /= 2;
752
753 if (IS_GEN2(dev))
75aa3f63 754 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
a225f079 755 else
75aa3f63 756 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
a225f079 757
41b578fb
JB
758 /*
759 * On HSW, the DSL reg (0x70000) appears to return 0 if we
760 * read it just before the start of vblank. So try it again
761 * so we don't accidentally end up spanning a vblank frame
762 * increment, causing the pipe_update_end() code to squak at us.
763 *
764 * The nature of this problem means we can't simply check the ISR
765 * bit and return the vblank start value; nor can we use the scanline
766 * debug register in the transcoder as it appears to have the same
767 * problem. We may need to extend this to include other platforms,
768 * but so far testing only shows the problem on HSW.
769 */
b2916819 770 if (HAS_DDI(dev) && !position) {
41b578fb
JB
771 int i, temp;
772
773 for (i = 0; i < 100; i++) {
774 udelay(1);
775 temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) &
776 DSL_LINEMASK_GEN3;
777 if (temp != position) {
778 position = temp;
779 break;
780 }
781 }
782 }
783
a225f079 784 /*
80715b2f
VS
785 * See update_scanline_offset() for the details on the
786 * scanline_offset adjustment.
a225f079 787 */
80715b2f 788 return (position + crtc->scanline_offset) % vtotal;
a225f079
VS
789}
790
88e72717 791static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
abca9e45 792 unsigned int flags, int *vpos, int *hpos,
3bb403bf
VS
793 ktime_t *stime, ktime_t *etime,
794 const struct drm_display_mode *mode)
0af7e4df 795{
c2baf4b7
VS
796 struct drm_i915_private *dev_priv = dev->dev_private;
797 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
798 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3aa18df8 799 int position;
78e8fc6b 800 int vbl_start, vbl_end, hsync_start, htotal, vtotal;
0af7e4df
MK
801 bool in_vbl = true;
802 int ret = 0;
ad3543ed 803 unsigned long irqflags;
0af7e4df 804
fc467a22 805 if (WARN_ON(!mode->crtc_clock)) {
0af7e4df 806 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
9db4a9c7 807 "pipe %c\n", pipe_name(pipe));
0af7e4df
MK
808 return 0;
809 }
810
c2baf4b7 811 htotal = mode->crtc_htotal;
78e8fc6b 812 hsync_start = mode->crtc_hsync_start;
c2baf4b7
VS
813 vtotal = mode->crtc_vtotal;
814 vbl_start = mode->crtc_vblank_start;
815 vbl_end = mode->crtc_vblank_end;
0af7e4df 816
d31faf65
VS
817 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
818 vbl_start = DIV_ROUND_UP(vbl_start, 2);
819 vbl_end /= 2;
820 vtotal /= 2;
821 }
822
c2baf4b7
VS
823 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
824
ad3543ed
MK
825 /*
826 * Lock uncore.lock, as we will do multiple timing critical raw
827 * register reads, potentially with preemption disabled, so the
828 * following code must not block on uncore.lock.
829 */
830 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
78e8fc6b 831
ad3543ed
MK
832 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
833
834 /* Get optional system timestamp before query. */
835 if (stime)
836 *stime = ktime_get();
837
7c06b08a 838 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
0af7e4df
MK
839 /* No obvious pixelcount register. Only query vertical
840 * scanout position from Display scan line register.
841 */
a225f079 842 position = __intel_get_crtc_scanline(intel_crtc);
0af7e4df
MK
843 } else {
844 /* Have access to pixelcount since start of frame.
845 * We can split this into vertical and horizontal
846 * scanout position.
847 */
75aa3f63 848 position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
0af7e4df 849
3aa18df8
VS
850 /* convert to pixel counts */
851 vbl_start *= htotal;
852 vbl_end *= htotal;
853 vtotal *= htotal;
78e8fc6b 854
7e78f1cb
VS
855 /*
856 * In interlaced modes, the pixel counter counts all pixels,
857 * so one field will have htotal more pixels. In order to avoid
858 * the reported position from jumping backwards when the pixel
859 * counter is beyond the length of the shorter field, just
860 * clamp the position the length of the shorter field. This
861 * matches how the scanline counter based position works since
862 * the scanline counter doesn't count the two half lines.
863 */
864 if (position >= vtotal)
865 position = vtotal - 1;
866
78e8fc6b
VS
867 /*
868 * Start of vblank interrupt is triggered at start of hsync,
869 * just prior to the first active line of vblank. However we
870 * consider lines to start at the leading edge of horizontal
871 * active. So, should we get here before we've crossed into
872 * the horizontal active of the first line in vblank, we would
873 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
874 * always add htotal-hsync_start to the current pixel position.
875 */
876 position = (position + htotal - hsync_start) % vtotal;
0af7e4df
MK
877 }
878
ad3543ed
MK
879 /* Get optional system timestamp after query. */
880 if (etime)
881 *etime = ktime_get();
882
883 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
884
885 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
886
3aa18df8
VS
887 in_vbl = position >= vbl_start && position < vbl_end;
888
889 /*
890 * While in vblank, position will be negative
891 * counting up towards 0 at vbl_end. And outside
892 * vblank, position will be positive counting
893 * up since vbl_end.
894 */
895 if (position >= vbl_start)
896 position -= vbl_end;
897 else
898 position += vtotal - vbl_end;
0af7e4df 899
7c06b08a 900 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
3aa18df8
VS
901 *vpos = position;
902 *hpos = 0;
903 } else {
904 *vpos = position / htotal;
905 *hpos = position - (*vpos * htotal);
906 }
0af7e4df 907
0af7e4df
MK
908 /* In vblank? */
909 if (in_vbl)
3d3cbd84 910 ret |= DRM_SCANOUTPOS_IN_VBLANK;
0af7e4df
MK
911
912 return ret;
913}
914
a225f079
VS
915int intel_get_crtc_scanline(struct intel_crtc *crtc)
916{
917 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
918 unsigned long irqflags;
919 int position;
920
921 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
922 position = __intel_get_crtc_scanline(crtc);
923 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
924
925 return position;
926}
927
88e72717 928static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
0af7e4df
MK
929 int *max_error,
930 struct timeval *vblank_time,
931 unsigned flags)
932{
4041b853 933 struct drm_crtc *crtc;
0af7e4df 934
88e72717
TR
935 if (pipe >= INTEL_INFO(dev)->num_pipes) {
936 DRM_ERROR("Invalid crtc %u\n", pipe);
0af7e4df
MK
937 return -EINVAL;
938 }
939
940 /* Get drm_crtc to timestamp: */
4041b853
CW
941 crtc = intel_get_crtc_for_pipe(dev, pipe);
942 if (crtc == NULL) {
88e72717 943 DRM_ERROR("Invalid crtc %u\n", pipe);
4041b853
CW
944 return -EINVAL;
945 }
946
fc467a22 947 if (!crtc->hwmode.crtc_clock) {
88e72717 948 DRM_DEBUG_KMS("crtc %u is disabled\n", pipe);
4041b853
CW
949 return -EBUSY;
950 }
0af7e4df
MK
951
952 /* Helper routine in DRM core does all the work: */
4041b853
CW
953 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
954 vblank_time, flags,
fc467a22 955 &crtc->hwmode);
0af7e4df
MK
956}
957
d0ecd7e2 958static void ironlake_rps_change_irq_handler(struct drm_device *dev)
f97108d1 959{
2d1013dd 960 struct drm_i915_private *dev_priv = dev->dev_private;
b5b72e89 961 u32 busy_up, busy_down, max_avg, min_avg;
9270388e 962 u8 new_delay;
9270388e 963
d0ecd7e2 964 spin_lock(&mchdev_lock);
f97108d1 965
73edd18f
DV
966 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
967
20e4d407 968 new_delay = dev_priv->ips.cur_delay;
9270388e 969
7648fa99 970 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
b5b72e89
MG
971 busy_up = I915_READ(RCPREVBSYTUPAVG);
972 busy_down = I915_READ(RCPREVBSYTDNAVG);
f97108d1
JB
973 max_avg = I915_READ(RCBMAXAVG);
974 min_avg = I915_READ(RCBMINAVG);
975
976 /* Handle RCS change request from hw */
b5b72e89 977 if (busy_up > max_avg) {
20e4d407
DV
978 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
979 new_delay = dev_priv->ips.cur_delay - 1;
980 if (new_delay < dev_priv->ips.max_delay)
981 new_delay = dev_priv->ips.max_delay;
b5b72e89 982 } else if (busy_down < min_avg) {
20e4d407
DV
983 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
984 new_delay = dev_priv->ips.cur_delay + 1;
985 if (new_delay > dev_priv->ips.min_delay)
986 new_delay = dev_priv->ips.min_delay;
f97108d1
JB
987 }
988
7648fa99 989 if (ironlake_set_drps(dev, new_delay))
20e4d407 990 dev_priv->ips.cur_delay = new_delay;
f97108d1 991
d0ecd7e2 992 spin_unlock(&mchdev_lock);
9270388e 993
f97108d1
JB
994 return;
995}
996
74cdb337 997static void notify_ring(struct intel_engine_cs *ring)
549f7365 998{
93b0a4e0 999 if (!intel_ring_initialized(ring))
475553de
CW
1000 return;
1001
bcfcc8ba 1002 trace_i915_gem_request_notify(ring);
9862e600 1003
549f7365 1004 wake_up_all(&ring->irq_queue);
549f7365
CW
1005}
1006
43cf3bf0
CW
1007static void vlv_c0_read(struct drm_i915_private *dev_priv,
1008 struct intel_rps_ei *ei)
31685c25 1009{
43cf3bf0
CW
1010 ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
1011 ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
1012 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
1013}
31685c25 1014
43cf3bf0
CW
1015static bool vlv_c0_above(struct drm_i915_private *dev_priv,
1016 const struct intel_rps_ei *old,
1017 const struct intel_rps_ei *now,
1018 int threshold)
1019{
1020 u64 time, c0;
7bad74d5 1021 unsigned int mul = 100;
31685c25 1022
43cf3bf0
CW
1023 if (old->cz_clock == 0)
1024 return false;
31685c25 1025
7bad74d5
VS
1026 if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
1027 mul <<= 8;
1028
43cf3bf0 1029 time = now->cz_clock - old->cz_clock;
7bad74d5 1030 time *= threshold * dev_priv->czclk_freq;
31685c25 1031
43cf3bf0
CW
1032 /* Workload can be split between render + media, e.g. SwapBuffers
1033 * being blitted in X after being rendered in mesa. To account for
1034 * this we need to combine both engines into our activity counter.
31685c25 1035 */
43cf3bf0
CW
1036 c0 = now->render_c0 - old->render_c0;
1037 c0 += now->media_c0 - old->media_c0;
7bad74d5 1038 c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC;
31685c25 1039
43cf3bf0 1040 return c0 >= time;
31685c25
D
1041}
1042
43cf3bf0 1043void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
31685c25 1044{
43cf3bf0
CW
1045 vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
1046 dev_priv->rps.up_ei = dev_priv->rps.down_ei;
43cf3bf0 1047}
31685c25 1048
43cf3bf0
CW
1049static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
1050{
1051 struct intel_rps_ei now;
1052 u32 events = 0;
31685c25 1053
6f4b12f8 1054 if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
43cf3bf0 1055 return 0;
31685c25 1056
43cf3bf0
CW
1057 vlv_c0_read(dev_priv, &now);
1058 if (now.cz_clock == 0)
1059 return 0;
31685c25 1060
43cf3bf0
CW
1061 if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
1062 if (!vlv_c0_above(dev_priv,
1063 &dev_priv->rps.down_ei, &now,
8fb55197 1064 dev_priv->rps.down_threshold))
43cf3bf0
CW
1065 events |= GEN6_PM_RP_DOWN_THRESHOLD;
1066 dev_priv->rps.down_ei = now;
1067 }
31685c25 1068
43cf3bf0
CW
1069 if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
1070 if (vlv_c0_above(dev_priv,
1071 &dev_priv->rps.up_ei, &now,
8fb55197 1072 dev_priv->rps.up_threshold))
43cf3bf0
CW
1073 events |= GEN6_PM_RP_UP_THRESHOLD;
1074 dev_priv->rps.up_ei = now;
31685c25
D
1075 }
1076
43cf3bf0 1077 return events;
31685c25
D
1078}
1079
f5a4c67d
CW
1080static bool any_waiters(struct drm_i915_private *dev_priv)
1081{
1082 struct intel_engine_cs *ring;
1083 int i;
1084
1085 for_each_ring(ring, dev_priv, i)
1086 if (ring->irq_refcount)
1087 return true;
1088
1089 return false;
1090}
1091
4912d041 1092static void gen6_pm_rps_work(struct work_struct *work)
3b8d8d91 1093{
2d1013dd
JN
1094 struct drm_i915_private *dev_priv =
1095 container_of(work, struct drm_i915_private, rps.work);
8d3afd7d
CW
1096 bool client_boost;
1097 int new_delay, adj, min, max;
edbfdb45 1098 u32 pm_iir;
4912d041 1099
59cdb63d 1100 spin_lock_irq(&dev_priv->irq_lock);
d4d70aa5
ID
1101 /* Speed up work cancelation during disabling rps interrupts. */
1102 if (!dev_priv->rps.interrupts_enabled) {
1103 spin_unlock_irq(&dev_priv->irq_lock);
1104 return;
1105 }
1f814dac
ID
1106
1107 /*
1108 * The RPS work is synced during runtime suspend, we don't require a
1109 * wakeref. TODO: instead of disabling the asserts make sure that we
1110 * always hold an RPM reference while the work is running.
1111 */
1112 DISABLE_RPM_WAKEREF_ASSERTS(dev_priv);
1113
c6a828d3
DV
1114 pm_iir = dev_priv->rps.pm_iir;
1115 dev_priv->rps.pm_iir = 0;
a72fbc3a
ID
1116 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
1117 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
8d3afd7d
CW
1118 client_boost = dev_priv->rps.client_boost;
1119 dev_priv->rps.client_boost = false;
59cdb63d 1120 spin_unlock_irq(&dev_priv->irq_lock);
3b8d8d91 1121
60611c13 1122 /* Make sure we didn't queue anything we're not going to process. */
a6706b45 1123 WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
60611c13 1124
8d3afd7d 1125 if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
1f814dac 1126 goto out;
3b8d8d91 1127
4fc688ce 1128 mutex_lock(&dev_priv->rps.hw_lock);
7b9e0ae6 1129
43cf3bf0
CW
1130 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
1131
dd75fdc8 1132 adj = dev_priv->rps.last_adj;
edcf284b 1133 new_delay = dev_priv->rps.cur_freq;
8d3afd7d
CW
1134 min = dev_priv->rps.min_freq_softlimit;
1135 max = dev_priv->rps.max_freq_softlimit;
1136
1137 if (client_boost) {
1138 new_delay = dev_priv->rps.max_freq_softlimit;
1139 adj = 0;
1140 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
dd75fdc8
CW
1141 if (adj > 0)
1142 adj *= 2;
edcf284b
CW
1143 else /* CHV needs even encode values */
1144 adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
7425034a
VS
1145 /*
1146 * For better performance, jump directly
1147 * to RPe if we're below it.
1148 */
edcf284b 1149 if (new_delay < dev_priv->rps.efficient_freq - adj) {
b39fb297 1150 new_delay = dev_priv->rps.efficient_freq;
edcf284b
CW
1151 adj = 0;
1152 }
f5a4c67d
CW
1153 } else if (any_waiters(dev_priv)) {
1154 adj = 0;
dd75fdc8 1155 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
b39fb297
BW
1156 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1157 new_delay = dev_priv->rps.efficient_freq;
dd75fdc8 1158 else
b39fb297 1159 new_delay = dev_priv->rps.min_freq_softlimit;
dd75fdc8
CW
1160 adj = 0;
1161 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1162 if (adj < 0)
1163 adj *= 2;
edcf284b
CW
1164 else /* CHV needs even encode values */
1165 adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
dd75fdc8 1166 } else { /* unknown event */
edcf284b 1167 adj = 0;
dd75fdc8 1168 }
3b8d8d91 1169
edcf284b
CW
1170 dev_priv->rps.last_adj = adj;
1171
79249636
BW
1172 /* sysfs frequency interfaces may have snuck in while servicing the
1173 * interrupt
1174 */
edcf284b 1175 new_delay += adj;
8d3afd7d 1176 new_delay = clamp_t(int, new_delay, min, max);
27544369 1177
ffe02b40 1178 intel_set_rps(dev_priv->dev, new_delay);
3b8d8d91 1179
4fc688ce 1180 mutex_unlock(&dev_priv->rps.hw_lock);
1f814dac
ID
1181out:
1182 ENABLE_RPM_WAKEREF_ASSERTS(dev_priv);
3b8d8d91
JB
1183}
1184
e3689190
BW
1185
1186/**
1187 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1188 * occurred.
1189 * @work: workqueue struct
1190 *
1191 * Doesn't actually do anything except notify userspace. As a consequence of
1192 * this event, userspace should try to remap the bad rows since statistically
1193 * it is likely the same row is more likely to go bad again.
1194 */
1195static void ivybridge_parity_work(struct work_struct *work)
1196{
2d1013dd
JN
1197 struct drm_i915_private *dev_priv =
1198 container_of(work, struct drm_i915_private, l3_parity.error_work);
e3689190 1199 u32 error_status, row, bank, subbank;
35a85ac6 1200 char *parity_event[6];
e3689190 1201 uint32_t misccpctl;
35a85ac6 1202 uint8_t slice = 0;
e3689190
BW
1203
1204 /* We must turn off DOP level clock gating to access the L3 registers.
1205 * In order to prevent a get/put style interface, acquire struct mutex
1206 * any time we access those registers.
1207 */
1208 mutex_lock(&dev_priv->dev->struct_mutex);
1209
35a85ac6
BW
1210 /* If we've screwed up tracking, just let the interrupt fire again */
1211 if (WARN_ON(!dev_priv->l3_parity.which_slice))
1212 goto out;
1213
e3689190
BW
1214 misccpctl = I915_READ(GEN7_MISCCPCTL);
1215 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1216 POSTING_READ(GEN7_MISCCPCTL);
1217
35a85ac6 1218 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
f0f59a00 1219 i915_reg_t reg;
e3689190 1220
35a85ac6
BW
1221 slice--;
1222 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
1223 break;
e3689190 1224
35a85ac6 1225 dev_priv->l3_parity.which_slice &= ~(1<<slice);
e3689190 1226
6fa1c5f1 1227 reg = GEN7_L3CDERRST1(slice);
e3689190 1228
35a85ac6
BW
1229 error_status = I915_READ(reg);
1230 row = GEN7_PARITY_ERROR_ROW(error_status);
1231 bank = GEN7_PARITY_ERROR_BANK(error_status);
1232 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1233
1234 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1235 POSTING_READ(reg);
1236
1237 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1238 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1239 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1240 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1241 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1242 parity_event[5] = NULL;
1243
5bdebb18 1244 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
35a85ac6 1245 KOBJ_CHANGE, parity_event);
e3689190 1246
35a85ac6
BW
1247 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1248 slice, row, bank, subbank);
e3689190 1249
35a85ac6
BW
1250 kfree(parity_event[4]);
1251 kfree(parity_event[3]);
1252 kfree(parity_event[2]);
1253 kfree(parity_event[1]);
1254 }
e3689190 1255
35a85ac6 1256 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
e3689190 1257
35a85ac6
BW
1258out:
1259 WARN_ON(dev_priv->l3_parity.which_slice);
4cb21832 1260 spin_lock_irq(&dev_priv->irq_lock);
480c8033 1261 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
4cb21832 1262 spin_unlock_irq(&dev_priv->irq_lock);
35a85ac6
BW
1263
1264 mutex_unlock(&dev_priv->dev->struct_mutex);
e3689190
BW
1265}
1266
35a85ac6 1267static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
e3689190 1268{
2d1013dd 1269 struct drm_i915_private *dev_priv = dev->dev_private;
e3689190 1270
040d2baa 1271 if (!HAS_L3_DPF(dev))
e3689190
BW
1272 return;
1273
d0ecd7e2 1274 spin_lock(&dev_priv->irq_lock);
480c8033 1275 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
d0ecd7e2 1276 spin_unlock(&dev_priv->irq_lock);
e3689190 1277
35a85ac6
BW
1278 iir &= GT_PARITY_ERROR(dev);
1279 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1280 dev_priv->l3_parity.which_slice |= 1 << 1;
1281
1282 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1283 dev_priv->l3_parity.which_slice |= 1 << 0;
1284
a4da4fa4 1285 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
e3689190
BW
1286}
1287
f1af8fc1
PZ
1288static void ilk_gt_irq_handler(struct drm_device *dev,
1289 struct drm_i915_private *dev_priv,
1290 u32 gt_iir)
1291{
1292 if (gt_iir &
1293 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
74cdb337 1294 notify_ring(&dev_priv->ring[RCS]);
f1af8fc1 1295 if (gt_iir & ILK_BSD_USER_INTERRUPT)
74cdb337 1296 notify_ring(&dev_priv->ring[VCS]);
f1af8fc1
PZ
1297}
1298
e7b4c6b1
DV
1299static void snb_gt_irq_handler(struct drm_device *dev,
1300 struct drm_i915_private *dev_priv,
1301 u32 gt_iir)
1302{
1303
cc609d5d
BW
1304 if (gt_iir &
1305 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
74cdb337 1306 notify_ring(&dev_priv->ring[RCS]);
cc609d5d 1307 if (gt_iir & GT_BSD_USER_INTERRUPT)
74cdb337 1308 notify_ring(&dev_priv->ring[VCS]);
cc609d5d 1309 if (gt_iir & GT_BLT_USER_INTERRUPT)
74cdb337 1310 notify_ring(&dev_priv->ring[BCS]);
e7b4c6b1 1311
cc609d5d
BW
1312 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1313 GT_BSD_CS_ERROR_INTERRUPT |
aaecdf61
DV
1314 GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1315 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
e3689190 1316
35a85ac6
BW
1317 if (gt_iir & GT_PARITY_ERROR(dev))
1318 ivybridge_parity_error_irq_handler(dev, gt_iir);
e7b4c6b1
DV
1319}
1320
fbcc1a0c 1321static __always_inline void
e4ba99b9 1322gen8_cs_irq_handler(struct intel_engine_cs *ring, u32 iir, int test_shift)
fbcc1a0c
NH
1323{
1324 if (iir & (GT_RENDER_USER_INTERRUPT << test_shift))
1325 notify_ring(ring);
1326 if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift))
1327 intel_lrc_irq_handler(ring);
1328}
1329
74cdb337 1330static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
abd58f01
BW
1331 u32 master_ctl)
1332{
abd58f01
BW
1333 irqreturn_t ret = IRQ_NONE;
1334
1335 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
5dd280b0
NH
1336 u32 iir = I915_READ_FW(GEN8_GT_IIR(0));
1337 if (iir) {
1338 I915_WRITE_FW(GEN8_GT_IIR(0), iir);
abd58f01 1339 ret = IRQ_HANDLED;
e981e7b1 1340
fbcc1a0c
NH
1341 gen8_cs_irq_handler(&dev_priv->ring[RCS],
1342 iir, GEN8_RCS_IRQ_SHIFT);
74cdb337 1343
fbcc1a0c
NH
1344 gen8_cs_irq_handler(&dev_priv->ring[BCS],
1345 iir, GEN8_BCS_IRQ_SHIFT);
abd58f01
BW
1346 } else
1347 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1348 }
1349
85f9b5f9 1350 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
5dd280b0
NH
1351 u32 iir = I915_READ_FW(GEN8_GT_IIR(1));
1352 if (iir) {
1353 I915_WRITE_FW(GEN8_GT_IIR(1), iir);
abd58f01 1354 ret = IRQ_HANDLED;
e981e7b1 1355
fbcc1a0c
NH
1356 gen8_cs_irq_handler(&dev_priv->ring[VCS],
1357 iir, GEN8_VCS1_IRQ_SHIFT);
abd58f01 1358
fbcc1a0c
NH
1359 gen8_cs_irq_handler(&dev_priv->ring[VCS2],
1360 iir, GEN8_VCS2_IRQ_SHIFT);
0961021a 1361 } else
abd58f01 1362 DRM_ERROR("The master control interrupt lied (GT1)!\n");
0961021a
BW
1363 }
1364
abd58f01 1365 if (master_ctl & GEN8_GT_VECS_IRQ) {
5dd280b0
NH
1366 u32 iir = I915_READ_FW(GEN8_GT_IIR(3));
1367 if (iir) {
1368 I915_WRITE_FW(GEN8_GT_IIR(3), iir);
abd58f01 1369 ret = IRQ_HANDLED;
e981e7b1 1370
fbcc1a0c
NH
1371 gen8_cs_irq_handler(&dev_priv->ring[VECS],
1372 iir, GEN8_VECS_IRQ_SHIFT);
abd58f01
BW
1373 } else
1374 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1375 }
1376
0961021a 1377 if (master_ctl & GEN8_GT_PM_IRQ) {
5dd280b0
NH
1378 u32 iir = I915_READ_FW(GEN8_GT_IIR(2));
1379 if (iir & dev_priv->pm_rps_events) {
cb0d205e 1380 I915_WRITE_FW(GEN8_GT_IIR(2),
5dd280b0 1381 iir & dev_priv->pm_rps_events);
38cc46d7 1382 ret = IRQ_HANDLED;
5dd280b0 1383 gen6_rps_irq_handler(dev_priv, iir);
0961021a
BW
1384 } else
1385 DRM_ERROR("The master control interrupt lied (PM)!\n");
1386 }
1387
abd58f01
BW
1388 return ret;
1389}
1390
63c88d22
ID
1391static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
1392{
1393 switch (port) {
1394 case PORT_A:
195baa06 1395 return val & PORTA_HOTPLUG_LONG_DETECT;
63c88d22
ID
1396 case PORT_B:
1397 return val & PORTB_HOTPLUG_LONG_DETECT;
1398 case PORT_C:
1399 return val & PORTC_HOTPLUG_LONG_DETECT;
63c88d22
ID
1400 default:
1401 return false;
1402 }
1403}
1404
6dbf30ce
VS
1405static bool spt_port_hotplug2_long_detect(enum port port, u32 val)
1406{
1407 switch (port) {
1408 case PORT_E:
1409 return val & PORTE_HOTPLUG_LONG_DETECT;
1410 default:
1411 return false;
1412 }
1413}
1414
74c0b395
VS
1415static bool spt_port_hotplug_long_detect(enum port port, u32 val)
1416{
1417 switch (port) {
1418 case PORT_A:
1419 return val & PORTA_HOTPLUG_LONG_DETECT;
1420 case PORT_B:
1421 return val & PORTB_HOTPLUG_LONG_DETECT;
1422 case PORT_C:
1423 return val & PORTC_HOTPLUG_LONG_DETECT;
1424 case PORT_D:
1425 return val & PORTD_HOTPLUG_LONG_DETECT;
1426 default:
1427 return false;
1428 }
1429}
1430
e4ce95aa
VS
1431static bool ilk_port_hotplug_long_detect(enum port port, u32 val)
1432{
1433 switch (port) {
1434 case PORT_A:
1435 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1436 default:
1437 return false;
1438 }
1439}
1440
676574df 1441static bool pch_port_hotplug_long_detect(enum port port, u32 val)
13cf5504
DA
1442{
1443 switch (port) {
13cf5504 1444 case PORT_B:
676574df 1445 return val & PORTB_HOTPLUG_LONG_DETECT;
13cf5504 1446 case PORT_C:
676574df 1447 return val & PORTC_HOTPLUG_LONG_DETECT;
13cf5504 1448 case PORT_D:
676574df
JN
1449 return val & PORTD_HOTPLUG_LONG_DETECT;
1450 default:
1451 return false;
13cf5504
DA
1452 }
1453}
1454
676574df 1455static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
13cf5504
DA
1456{
1457 switch (port) {
13cf5504 1458 case PORT_B:
676574df 1459 return val & PORTB_HOTPLUG_INT_LONG_PULSE;
13cf5504 1460 case PORT_C:
676574df 1461 return val & PORTC_HOTPLUG_INT_LONG_PULSE;
13cf5504 1462 case PORT_D:
676574df
JN
1463 return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1464 default:
1465 return false;
13cf5504
DA
1466 }
1467}
1468
42db67d6
VS
1469/*
1470 * Get a bit mask of pins that have triggered, and which ones may be long.
1471 * This can be called multiple times with the same masks to accumulate
1472 * hotplug detection results from several registers.
1473 *
1474 * Note that the caller is expected to zero out the masks initially.
1475 */
fd63e2a9 1476static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
8c841e57 1477 u32 hotplug_trigger, u32 dig_hotplug_reg,
fd63e2a9
ID
1478 const u32 hpd[HPD_NUM_PINS],
1479 bool long_pulse_detect(enum port port, u32 val))
676574df 1480{
8c841e57 1481 enum port port;
676574df
JN
1482 int i;
1483
676574df 1484 for_each_hpd_pin(i) {
8c841e57
JN
1485 if ((hpd[i] & hotplug_trigger) == 0)
1486 continue;
676574df 1487
8c841e57
JN
1488 *pin_mask |= BIT(i);
1489
cc24fcdc
ID
1490 if (!intel_hpd_pin_to_port(i, &port))
1491 continue;
1492
fd63e2a9 1493 if (long_pulse_detect(port, dig_hotplug_reg))
8c841e57 1494 *long_mask |= BIT(i);
676574df
JN
1495 }
1496
1497 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
1498 hotplug_trigger, dig_hotplug_reg, *pin_mask);
1499
1500}
1501
515ac2bb
DV
1502static void gmbus_irq_handler(struct drm_device *dev)
1503{
2d1013dd 1504 struct drm_i915_private *dev_priv = dev->dev_private;
28c70f16 1505
28c70f16 1506 wake_up_all(&dev_priv->gmbus_wait_queue);
515ac2bb
DV
1507}
1508
ce99c256
DV
1509static void dp_aux_irq_handler(struct drm_device *dev)
1510{
2d1013dd 1511 struct drm_i915_private *dev_priv = dev->dev_private;
9ee32fea 1512
9ee32fea 1513 wake_up_all(&dev_priv->gmbus_wait_queue);
ce99c256
DV
1514}
1515
8bf1e9f1 1516#if defined(CONFIG_DEBUG_FS)
277de95e
DV
1517static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1518 uint32_t crc0, uint32_t crc1,
1519 uint32_t crc2, uint32_t crc3,
1520 uint32_t crc4)
8bf1e9f1
SH
1521{
1522 struct drm_i915_private *dev_priv = dev->dev_private;
1523 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1524 struct intel_pipe_crc_entry *entry;
ac2300d4 1525 int head, tail;
b2c88f5b 1526
d538bbdf
DL
1527 spin_lock(&pipe_crc->lock);
1528
0c912c79 1529 if (!pipe_crc->entries) {
d538bbdf 1530 spin_unlock(&pipe_crc->lock);
34273620 1531 DRM_DEBUG_KMS("spurious interrupt\n");
0c912c79
DL
1532 return;
1533 }
1534
d538bbdf
DL
1535 head = pipe_crc->head;
1536 tail = pipe_crc->tail;
b2c88f5b
DL
1537
1538 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
d538bbdf 1539 spin_unlock(&pipe_crc->lock);
b2c88f5b
DL
1540 DRM_ERROR("CRC buffer overflowing\n");
1541 return;
1542 }
1543
1544 entry = &pipe_crc->entries[head];
8bf1e9f1 1545
8bc5e955 1546 entry->frame = dev->driver->get_vblank_counter(dev, pipe);
eba94eb9
DV
1547 entry->crc[0] = crc0;
1548 entry->crc[1] = crc1;
1549 entry->crc[2] = crc2;
1550 entry->crc[3] = crc3;
1551 entry->crc[4] = crc4;
b2c88f5b
DL
1552
1553 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
d538bbdf
DL
1554 pipe_crc->head = head;
1555
1556 spin_unlock(&pipe_crc->lock);
07144428
DL
1557
1558 wake_up_interruptible(&pipe_crc->wq);
8bf1e9f1 1559}
277de95e
DV
1560#else
1561static inline void
1562display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1563 uint32_t crc0, uint32_t crc1,
1564 uint32_t crc2, uint32_t crc3,
1565 uint32_t crc4) {}
1566#endif
1567
eba94eb9 1568
277de95e 1569static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
5a69b89f
DV
1570{
1571 struct drm_i915_private *dev_priv = dev->dev_private;
1572
277de95e
DV
1573 display_pipe_crc_irq_handler(dev, pipe,
1574 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1575 0, 0, 0, 0);
5a69b89f
DV
1576}
1577
277de95e 1578static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
eba94eb9
DV
1579{
1580 struct drm_i915_private *dev_priv = dev->dev_private;
1581
277de95e
DV
1582 display_pipe_crc_irq_handler(dev, pipe,
1583 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1584 I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1585 I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1586 I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1587 I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
eba94eb9 1588}
5b3a856b 1589
277de95e 1590static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
5b3a856b
DV
1591{
1592 struct drm_i915_private *dev_priv = dev->dev_private;
0b5c5ed0
DV
1593 uint32_t res1, res2;
1594
1595 if (INTEL_INFO(dev)->gen >= 3)
1596 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1597 else
1598 res1 = 0;
1599
1600 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1601 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1602 else
1603 res2 = 0;
5b3a856b 1604
277de95e
DV
1605 display_pipe_crc_irq_handler(dev, pipe,
1606 I915_READ(PIPE_CRC_RES_RED(pipe)),
1607 I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1608 I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1609 res1, res2);
5b3a856b 1610}
8bf1e9f1 1611
1403c0d4
PZ
1612/* The RPS events need forcewake, so we add them to a work queue and mask their
1613 * IMR bits until the work is done. Other interrupts can be processed without
1614 * the work queue. */
1615static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
baf02a1f 1616{
a6706b45 1617 if (pm_iir & dev_priv->pm_rps_events) {
59cdb63d 1618 spin_lock(&dev_priv->irq_lock);
480c8033 1619 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
d4d70aa5
ID
1620 if (dev_priv->rps.interrupts_enabled) {
1621 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1622 queue_work(dev_priv->wq, &dev_priv->rps.work);
1623 }
59cdb63d 1624 spin_unlock(&dev_priv->irq_lock);
baf02a1f 1625 }
baf02a1f 1626
c9a9a268
ID
1627 if (INTEL_INFO(dev_priv)->gen >= 8)
1628 return;
1629
1403c0d4
PZ
1630 if (HAS_VEBOX(dev_priv->dev)) {
1631 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
74cdb337 1632 notify_ring(&dev_priv->ring[VECS]);
12638c57 1633
aaecdf61
DV
1634 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1635 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
12638c57 1636 }
baf02a1f
BW
1637}
1638
8d7849db
VS
1639static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
1640{
8d7849db
VS
1641 if (!drm_handle_vblank(dev, pipe))
1642 return false;
1643
8d7849db
VS
1644 return true;
1645}
1646
c1874ed7
ID
1647static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
1648{
1649 struct drm_i915_private *dev_priv = dev->dev_private;
91d181dd 1650 u32 pipe_stats[I915_MAX_PIPES] = { };
c1874ed7
ID
1651 int pipe;
1652
58ead0d7 1653 spin_lock(&dev_priv->irq_lock);
055e393f 1654 for_each_pipe(dev_priv, pipe) {
f0f59a00 1655 i915_reg_t reg;
bbb5eebf 1656 u32 mask, iir_bit = 0;
91d181dd 1657
bbb5eebf
DV
1658 /*
1659 * PIPESTAT bits get signalled even when the interrupt is
1660 * disabled with the mask bits, and some of the status bits do
1661 * not generate interrupts at all (like the underrun bit). Hence
1662 * we need to be careful that we only handle what we want to
1663 * handle.
1664 */
0f239f4c
DV
1665
1666 /* fifo underruns are filterered in the underrun handler. */
1667 mask = PIPE_FIFO_UNDERRUN_STATUS;
bbb5eebf
DV
1668
1669 switch (pipe) {
1670 case PIPE_A:
1671 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1672 break;
1673 case PIPE_B:
1674 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1675 break;
3278f67f
VS
1676 case PIPE_C:
1677 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1678 break;
bbb5eebf
DV
1679 }
1680 if (iir & iir_bit)
1681 mask |= dev_priv->pipestat_irq_mask[pipe];
1682
1683 if (!mask)
91d181dd
ID
1684 continue;
1685
1686 reg = PIPESTAT(pipe);
bbb5eebf
DV
1687 mask |= PIPESTAT_INT_ENABLE_MASK;
1688 pipe_stats[pipe] = I915_READ(reg) & mask;
c1874ed7
ID
1689
1690 /*
1691 * Clear the PIPE*STAT regs before the IIR
1692 */
91d181dd
ID
1693 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
1694 PIPESTAT_INT_STATUS_MASK))
c1874ed7
ID
1695 I915_WRITE(reg, pipe_stats[pipe]);
1696 }
58ead0d7 1697 spin_unlock(&dev_priv->irq_lock);
c1874ed7 1698
055e393f 1699 for_each_pipe(dev_priv, pipe) {
d6bbafa1
CW
1700 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
1701 intel_pipe_handle_vblank(dev, pipe))
1702 intel_check_page_flip(dev, pipe);
c1874ed7 1703
579a9b0e 1704 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
c1874ed7
ID
1705 intel_prepare_page_flip(dev, pipe);
1706 intel_finish_page_flip(dev, pipe);
1707 }
1708
1709 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1710 i9xx_pipe_crc_irq_handler(dev, pipe);
1711
1f7247c0
DV
1712 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1713 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
c1874ed7
ID
1714 }
1715
1716 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1717 gmbus_irq_handler(dev);
1718}
1719
16c6c56b
VS
1720static void i9xx_hpd_irq_handler(struct drm_device *dev)
1721{
1722 struct drm_i915_private *dev_priv = dev->dev_private;
1723 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
42db67d6 1724 u32 pin_mask = 0, long_mask = 0;
16c6c56b 1725
0d2e4297
JN
1726 if (!hotplug_status)
1727 return;
16c6c56b 1728
0d2e4297
JN
1729 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1730 /*
1731 * Make sure hotplug status is cleared before we clear IIR, or else we
1732 * may miss hotplug events.
1733 */
1734 POSTING_READ(PORT_HOTPLUG_STAT);
16c6c56b 1735
666a4537 1736 if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
0d2e4297 1737 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
16c6c56b 1738
58f2cf24
VS
1739 if (hotplug_trigger) {
1740 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1741 hotplug_trigger, hpd_status_g4x,
1742 i9xx_port_hotplug_long_detect);
1743
1744 intel_hpd_irq_handler(dev, pin_mask, long_mask);
1745 }
369712e8
JN
1746
1747 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1748 dp_aux_irq_handler(dev);
0d2e4297
JN
1749 } else {
1750 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
16c6c56b 1751
58f2cf24
VS
1752 if (hotplug_trigger) {
1753 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
44cc6c08 1754 hotplug_trigger, hpd_status_i915,
58f2cf24 1755 i9xx_port_hotplug_long_detect);
58f2cf24
VS
1756 intel_hpd_irq_handler(dev, pin_mask, long_mask);
1757 }
3ff60f89 1758 }
16c6c56b
VS
1759}
1760
ff1f525e 1761static irqreturn_t valleyview_irq_handler(int irq, void *arg)
7e231dbe 1762{
45a83f84 1763 struct drm_device *dev = arg;
2d1013dd 1764 struct drm_i915_private *dev_priv = dev->dev_private;
7e231dbe
JB
1765 u32 iir, gt_iir, pm_iir;
1766 irqreturn_t ret = IRQ_NONE;
7e231dbe 1767
2dd2a883
ID
1768 if (!intel_irqs_enabled(dev_priv))
1769 return IRQ_NONE;
1770
1f814dac
ID
1771 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
1772 disable_rpm_wakeref_asserts(dev_priv);
1773
7e231dbe 1774 while (true) {
3ff60f89
OM
1775 /* Find, clear, then process each source of interrupt */
1776
7e231dbe 1777 gt_iir = I915_READ(GTIIR);
3ff60f89
OM
1778 if (gt_iir)
1779 I915_WRITE(GTIIR, gt_iir);
1780
7e231dbe 1781 pm_iir = I915_READ(GEN6_PMIIR);
3ff60f89
OM
1782 if (pm_iir)
1783 I915_WRITE(GEN6_PMIIR, pm_iir);
1784
1785 iir = I915_READ(VLV_IIR);
1786 if (iir) {
1787 /* Consume port before clearing IIR or we'll miss events */
1788 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1789 i9xx_hpd_irq_handler(dev);
1790 I915_WRITE(VLV_IIR, iir);
1791 }
7e231dbe
JB
1792
1793 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1794 goto out;
1795
1796 ret = IRQ_HANDLED;
1797
3ff60f89
OM
1798 if (gt_iir)
1799 snb_gt_irq_handler(dev, dev_priv, gt_iir);
60611c13 1800 if (pm_iir)
d0ecd7e2 1801 gen6_rps_irq_handler(dev_priv, pm_iir);
3ff60f89
OM
1802 /* Call regardless, as some status bits might not be
1803 * signalled in iir */
1804 valleyview_pipestat_irq_handler(dev, iir);
7e231dbe
JB
1805 }
1806
1807out:
1f814dac
ID
1808 enable_rpm_wakeref_asserts(dev_priv);
1809
7e231dbe
JB
1810 return ret;
1811}
1812
43f328d7
VS
1813static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1814{
45a83f84 1815 struct drm_device *dev = arg;
43f328d7
VS
1816 struct drm_i915_private *dev_priv = dev->dev_private;
1817 u32 master_ctl, iir;
1818 irqreturn_t ret = IRQ_NONE;
43f328d7 1819
2dd2a883
ID
1820 if (!intel_irqs_enabled(dev_priv))
1821 return IRQ_NONE;
1822
1f814dac
ID
1823 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
1824 disable_rpm_wakeref_asserts(dev_priv);
1825
8e5fd599
VS
1826 for (;;) {
1827 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1828 iir = I915_READ(VLV_IIR);
43f328d7 1829
8e5fd599
VS
1830 if (master_ctl == 0 && iir == 0)
1831 break;
43f328d7 1832
27b6c122
OM
1833 ret = IRQ_HANDLED;
1834
8e5fd599 1835 I915_WRITE(GEN8_MASTER_IRQ, 0);
43f328d7 1836
27b6c122 1837 /* Find, clear, then process each source of interrupt */
43f328d7 1838
27b6c122
OM
1839 if (iir) {
1840 /* Consume port before clearing IIR or we'll miss events */
1841 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1842 i9xx_hpd_irq_handler(dev);
1843 I915_WRITE(VLV_IIR, iir);
1844 }
43f328d7 1845
74cdb337 1846 gen8_gt_irq_handler(dev_priv, master_ctl);
43f328d7 1847
27b6c122
OM
1848 /* Call regardless, as some status bits might not be
1849 * signalled in iir */
1850 valleyview_pipestat_irq_handler(dev, iir);
43f328d7 1851
8e5fd599
VS
1852 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
1853 POSTING_READ(GEN8_MASTER_IRQ);
8e5fd599 1854 }
3278f67f 1855
1f814dac
ID
1856 enable_rpm_wakeref_asserts(dev_priv);
1857
43f328d7
VS
1858 return ret;
1859}
1860
40e56410
VS
1861static void ibx_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
1862 const u32 hpd[HPD_NUM_PINS])
1863{
1864 struct drm_i915_private *dev_priv = to_i915(dev);
1865 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1866
6a39d7c9
JN
1867 /*
1868 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
1869 * unless we touch the hotplug register, even if hotplug_trigger is
1870 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
1871 * errors.
1872 */
40e56410 1873 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
6a39d7c9
JN
1874 if (!hotplug_trigger) {
1875 u32 mask = PORTA_HOTPLUG_STATUS_MASK |
1876 PORTD_HOTPLUG_STATUS_MASK |
1877 PORTC_HOTPLUG_STATUS_MASK |
1878 PORTB_HOTPLUG_STATUS_MASK;
1879 dig_hotplug_reg &= ~mask;
1880 }
1881
40e56410 1882 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
6a39d7c9
JN
1883 if (!hotplug_trigger)
1884 return;
40e56410
VS
1885
1886 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1887 dig_hotplug_reg, hpd,
1888 pch_port_hotplug_long_detect);
1889
1890 intel_hpd_irq_handler(dev, pin_mask, long_mask);
1891}
1892
23e81d69 1893static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
776ad806 1894{
2d1013dd 1895 struct drm_i915_private *dev_priv = dev->dev_private;
9db4a9c7 1896 int pipe;
b543fb04 1897 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
13cf5504 1898
6a39d7c9 1899 ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
91d131d2 1900
cfc33bf7
VS
1901 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1902 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1903 SDE_AUDIO_POWER_SHIFT);
776ad806 1904 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
cfc33bf7
VS
1905 port_name(port));
1906 }
776ad806 1907
ce99c256
DV
1908 if (pch_iir & SDE_AUX_MASK)
1909 dp_aux_irq_handler(dev);
1910
776ad806 1911 if (pch_iir & SDE_GMBUS)
515ac2bb 1912 gmbus_irq_handler(dev);
776ad806
JB
1913
1914 if (pch_iir & SDE_AUDIO_HDCP_MASK)
1915 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1916
1917 if (pch_iir & SDE_AUDIO_TRANS_MASK)
1918 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1919
1920 if (pch_iir & SDE_POISON)
1921 DRM_ERROR("PCH poison interrupt\n");
1922
9db4a9c7 1923 if (pch_iir & SDE_FDI_MASK)
055e393f 1924 for_each_pipe(dev_priv, pipe)
9db4a9c7
JB
1925 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1926 pipe_name(pipe),
1927 I915_READ(FDI_RX_IIR(pipe)));
776ad806
JB
1928
1929 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1930 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1931
1932 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1933 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1934
776ad806 1935 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1f7247c0 1936 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
8664281b
PZ
1937
1938 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1f7247c0 1939 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
8664281b
PZ
1940}
1941
1942static void ivb_err_int_handler(struct drm_device *dev)
1943{
1944 struct drm_i915_private *dev_priv = dev->dev_private;
1945 u32 err_int = I915_READ(GEN7_ERR_INT);
5a69b89f 1946 enum pipe pipe;
8664281b 1947
de032bf4
PZ
1948 if (err_int & ERR_INT_POISON)
1949 DRM_ERROR("Poison interrupt\n");
1950
055e393f 1951 for_each_pipe(dev_priv, pipe) {
1f7247c0
DV
1952 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1953 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
8bf1e9f1 1954
5a69b89f
DV
1955 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1956 if (IS_IVYBRIDGE(dev))
277de95e 1957 ivb_pipe_crc_irq_handler(dev, pipe);
5a69b89f 1958 else
277de95e 1959 hsw_pipe_crc_irq_handler(dev, pipe);
5a69b89f
DV
1960 }
1961 }
8bf1e9f1 1962
8664281b
PZ
1963 I915_WRITE(GEN7_ERR_INT, err_int);
1964}
1965
1966static void cpt_serr_int_handler(struct drm_device *dev)
1967{
1968 struct drm_i915_private *dev_priv = dev->dev_private;
1969 u32 serr_int = I915_READ(SERR_INT);
1970
de032bf4
PZ
1971 if (serr_int & SERR_INT_POISON)
1972 DRM_ERROR("PCH poison interrupt\n");
1973
8664281b 1974 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
1f7247c0 1975 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
8664281b
PZ
1976
1977 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
1f7247c0 1978 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
8664281b
PZ
1979
1980 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
1f7247c0 1981 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
8664281b
PZ
1982
1983 I915_WRITE(SERR_INT, serr_int);
776ad806
JB
1984}
1985
23e81d69
AJ
1986static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1987{
2d1013dd 1988 struct drm_i915_private *dev_priv = dev->dev_private;
23e81d69 1989 int pipe;
6dbf30ce 1990 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
13cf5504 1991
6a39d7c9 1992 ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
91d131d2 1993
cfc33bf7
VS
1994 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1995 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1996 SDE_AUDIO_POWER_SHIFT_CPT);
1997 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1998 port_name(port));
1999 }
23e81d69
AJ
2000
2001 if (pch_iir & SDE_AUX_MASK_CPT)
ce99c256 2002 dp_aux_irq_handler(dev);
23e81d69
AJ
2003
2004 if (pch_iir & SDE_GMBUS_CPT)
515ac2bb 2005 gmbus_irq_handler(dev);
23e81d69
AJ
2006
2007 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
2008 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
2009
2010 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
2011 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
2012
2013 if (pch_iir & SDE_FDI_MASK_CPT)
055e393f 2014 for_each_pipe(dev_priv, pipe)
23e81d69
AJ
2015 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2016 pipe_name(pipe),
2017 I915_READ(FDI_RX_IIR(pipe)));
8664281b
PZ
2018
2019 if (pch_iir & SDE_ERROR_CPT)
2020 cpt_serr_int_handler(dev);
23e81d69
AJ
2021}
2022
6dbf30ce
VS
2023static void spt_irq_handler(struct drm_device *dev, u32 pch_iir)
2024{
2025 struct drm_i915_private *dev_priv = dev->dev_private;
2026 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
2027 ~SDE_PORTE_HOTPLUG_SPT;
2028 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
2029 u32 pin_mask = 0, long_mask = 0;
2030
2031 if (hotplug_trigger) {
2032 u32 dig_hotplug_reg;
2033
2034 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2035 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2036
2037 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2038 dig_hotplug_reg, hpd_spt,
74c0b395 2039 spt_port_hotplug_long_detect);
6dbf30ce
VS
2040 }
2041
2042 if (hotplug2_trigger) {
2043 u32 dig_hotplug_reg;
2044
2045 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
2046 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
2047
2048 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger,
2049 dig_hotplug_reg, hpd_spt,
2050 spt_port_hotplug2_long_detect);
2051 }
2052
2053 if (pin_mask)
2054 intel_hpd_irq_handler(dev, pin_mask, long_mask);
2055
2056 if (pch_iir & SDE_GMBUS_CPT)
2057 gmbus_irq_handler(dev);
2058}
2059
40e56410
VS
2060static void ilk_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
2061 const u32 hpd[HPD_NUM_PINS])
2062{
2063 struct drm_i915_private *dev_priv = to_i915(dev);
2064 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2065
2066 dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
2067 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
2068
2069 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2070 dig_hotplug_reg, hpd,
2071 ilk_port_hotplug_long_detect);
2072
2073 intel_hpd_irq_handler(dev, pin_mask, long_mask);
2074}
2075
c008bc6e
PZ
2076static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
2077{
2078 struct drm_i915_private *dev_priv = dev->dev_private;
40da17c2 2079 enum pipe pipe;
e4ce95aa
VS
2080 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
2081
40e56410
VS
2082 if (hotplug_trigger)
2083 ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ilk);
c008bc6e
PZ
2084
2085 if (de_iir & DE_AUX_CHANNEL_A)
2086 dp_aux_irq_handler(dev);
2087
2088 if (de_iir & DE_GSE)
2089 intel_opregion_asle_intr(dev);
2090
c008bc6e
PZ
2091 if (de_iir & DE_POISON)
2092 DRM_ERROR("Poison interrupt\n");
2093
055e393f 2094 for_each_pipe(dev_priv, pipe) {
d6bbafa1
CW
2095 if (de_iir & DE_PIPE_VBLANK(pipe) &&
2096 intel_pipe_handle_vblank(dev, pipe))
2097 intel_check_page_flip(dev, pipe);
5b3a856b 2098
40da17c2 2099 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
1f7247c0 2100 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
5b3a856b 2101
40da17c2
DV
2102 if (de_iir & DE_PIPE_CRC_DONE(pipe))
2103 i9xx_pipe_crc_irq_handler(dev, pipe);
c008bc6e 2104
40da17c2
DV
2105 /* plane/pipes map 1:1 on ilk+ */
2106 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
2107 intel_prepare_page_flip(dev, pipe);
2108 intel_finish_page_flip_plane(dev, pipe);
2109 }
c008bc6e
PZ
2110 }
2111
2112 /* check event from PCH */
2113 if (de_iir & DE_PCH_EVENT) {
2114 u32 pch_iir = I915_READ(SDEIIR);
2115
2116 if (HAS_PCH_CPT(dev))
2117 cpt_irq_handler(dev, pch_iir);
2118 else
2119 ibx_irq_handler(dev, pch_iir);
2120
2121 /* should clear PCH hotplug event before clear CPU irq */
2122 I915_WRITE(SDEIIR, pch_iir);
2123 }
2124
2125 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
2126 ironlake_rps_change_irq_handler(dev);
2127}
2128
9719fb98
PZ
2129static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
2130{
2131 struct drm_i915_private *dev_priv = dev->dev_private;
07d27e20 2132 enum pipe pipe;
23bb4cb5
VS
2133 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2134
40e56410
VS
2135 if (hotplug_trigger)
2136 ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ivb);
9719fb98
PZ
2137
2138 if (de_iir & DE_ERR_INT_IVB)
2139 ivb_err_int_handler(dev);
2140
2141 if (de_iir & DE_AUX_CHANNEL_A_IVB)
2142 dp_aux_irq_handler(dev);
2143
2144 if (de_iir & DE_GSE_IVB)
2145 intel_opregion_asle_intr(dev);
2146
055e393f 2147 for_each_pipe(dev_priv, pipe) {
d6bbafa1
CW
2148 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
2149 intel_pipe_handle_vblank(dev, pipe))
2150 intel_check_page_flip(dev, pipe);
40da17c2
DV
2151
2152 /* plane/pipes map 1:1 on ilk+ */
07d27e20
DL
2153 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
2154 intel_prepare_page_flip(dev, pipe);
2155 intel_finish_page_flip_plane(dev, pipe);
9719fb98
PZ
2156 }
2157 }
2158
2159 /* check event from PCH */
2160 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
2161 u32 pch_iir = I915_READ(SDEIIR);
2162
2163 cpt_irq_handler(dev, pch_iir);
2164
2165 /* clear PCH hotplug event before clear CPU irq */
2166 I915_WRITE(SDEIIR, pch_iir);
2167 }
2168}
2169
72c90f62
OM
2170/*
2171 * To handle irqs with the minimum potential races with fresh interrupts, we:
2172 * 1 - Disable Master Interrupt Control.
2173 * 2 - Find the source(s) of the interrupt.
2174 * 3 - Clear the Interrupt Identity bits (IIR).
2175 * 4 - Process the interrupt(s) that had bits set in the IIRs.
2176 * 5 - Re-enable Master Interrupt Control.
2177 */
f1af8fc1 2178static irqreturn_t ironlake_irq_handler(int irq, void *arg)
b1f14ad0 2179{
45a83f84 2180 struct drm_device *dev = arg;
2d1013dd 2181 struct drm_i915_private *dev_priv = dev->dev_private;
f1af8fc1 2182 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
0e43406b 2183 irqreturn_t ret = IRQ_NONE;
b1f14ad0 2184
2dd2a883
ID
2185 if (!intel_irqs_enabled(dev_priv))
2186 return IRQ_NONE;
2187
1f814dac
ID
2188 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2189 disable_rpm_wakeref_asserts(dev_priv);
2190
b1f14ad0
JB
2191 /* disable master interrupt before clearing iir */
2192 de_ier = I915_READ(DEIER);
2193 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
23a78516 2194 POSTING_READ(DEIER);
b1f14ad0 2195
44498aea
PZ
2196 /* Disable south interrupts. We'll only write to SDEIIR once, so further
2197 * interrupts will will be stored on its back queue, and then we'll be
2198 * able to process them after we restore SDEIER (as soon as we restore
2199 * it, we'll get an interrupt if SDEIIR still has something to process
2200 * due to its back queue). */
ab5c608b
BW
2201 if (!HAS_PCH_NOP(dev)) {
2202 sde_ier = I915_READ(SDEIER);
2203 I915_WRITE(SDEIER, 0);
2204 POSTING_READ(SDEIER);
2205 }
44498aea 2206
72c90f62
OM
2207 /* Find, clear, then process each source of interrupt */
2208
b1f14ad0 2209 gt_iir = I915_READ(GTIIR);
0e43406b 2210 if (gt_iir) {
72c90f62
OM
2211 I915_WRITE(GTIIR, gt_iir);
2212 ret = IRQ_HANDLED;
d8fc8a47 2213 if (INTEL_INFO(dev)->gen >= 6)
f1af8fc1 2214 snb_gt_irq_handler(dev, dev_priv, gt_iir);
d8fc8a47
PZ
2215 else
2216 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
b1f14ad0
JB
2217 }
2218
0e43406b
CW
2219 de_iir = I915_READ(DEIIR);
2220 if (de_iir) {
72c90f62
OM
2221 I915_WRITE(DEIIR, de_iir);
2222 ret = IRQ_HANDLED;
f1af8fc1
PZ
2223 if (INTEL_INFO(dev)->gen >= 7)
2224 ivb_display_irq_handler(dev, de_iir);
2225 else
2226 ilk_display_irq_handler(dev, de_iir);
b1f14ad0
JB
2227 }
2228
f1af8fc1
PZ
2229 if (INTEL_INFO(dev)->gen >= 6) {
2230 u32 pm_iir = I915_READ(GEN6_PMIIR);
2231 if (pm_iir) {
f1af8fc1
PZ
2232 I915_WRITE(GEN6_PMIIR, pm_iir);
2233 ret = IRQ_HANDLED;
72c90f62 2234 gen6_rps_irq_handler(dev_priv, pm_iir);
f1af8fc1 2235 }
0e43406b 2236 }
b1f14ad0 2237
b1f14ad0
JB
2238 I915_WRITE(DEIER, de_ier);
2239 POSTING_READ(DEIER);
ab5c608b
BW
2240 if (!HAS_PCH_NOP(dev)) {
2241 I915_WRITE(SDEIER, sde_ier);
2242 POSTING_READ(SDEIER);
2243 }
b1f14ad0 2244
1f814dac
ID
2245 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2246 enable_rpm_wakeref_asserts(dev_priv);
2247
b1f14ad0
JB
2248 return ret;
2249}
2250
40e56410
VS
2251static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
2252 const u32 hpd[HPD_NUM_PINS])
d04a492d 2253{
cebd87a0
VS
2254 struct drm_i915_private *dev_priv = to_i915(dev);
2255 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
d04a492d 2256
a52bb15b
VS
2257 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2258 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
d04a492d 2259
cebd87a0 2260 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
40e56410 2261 dig_hotplug_reg, hpd,
cebd87a0 2262 bxt_port_hotplug_long_detect);
40e56410 2263
676574df 2264 intel_hpd_irq_handler(dev, pin_mask, long_mask);
d04a492d
SS
2265}
2266
abd58f01
BW
2267static irqreturn_t gen8_irq_handler(int irq, void *arg)
2268{
2269 struct drm_device *dev = arg;
2270 struct drm_i915_private *dev_priv = dev->dev_private;
e32192e1 2271 u32 master_ctl, iir;
abd58f01 2272 irqreturn_t ret = IRQ_NONE;
c42664cc 2273 enum pipe pipe;
88e04703 2274
2dd2a883
ID
2275 if (!intel_irqs_enabled(dev_priv))
2276 return IRQ_NONE;
2277
1f814dac
ID
2278 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2279 disable_rpm_wakeref_asserts(dev_priv);
2280
cb0d205e 2281 master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
abd58f01
BW
2282 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2283 if (!master_ctl)
1f814dac 2284 goto out;
abd58f01 2285
cb0d205e 2286 I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
abd58f01 2287
38cc46d7
OM
2288 /* Find, clear, then process each source of interrupt */
2289
74cdb337 2290 ret = gen8_gt_irq_handler(dev_priv, master_ctl);
abd58f01
BW
2291
2292 if (master_ctl & GEN8_DE_MISC_IRQ) {
e32192e1
TU
2293 iir = I915_READ(GEN8_DE_MISC_IIR);
2294 if (iir) {
2295 I915_WRITE(GEN8_DE_MISC_IIR, iir);
abd58f01 2296 ret = IRQ_HANDLED;
e32192e1 2297 if (iir & GEN8_DE_MISC_GSE)
38cc46d7
OM
2298 intel_opregion_asle_intr(dev);
2299 else
2300 DRM_ERROR("Unexpected DE Misc interrupt\n");
abd58f01 2301 }
38cc46d7
OM
2302 else
2303 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
abd58f01
BW
2304 }
2305
6d766f02 2306 if (master_ctl & GEN8_DE_PORT_IRQ) {
e32192e1
TU
2307 iir = I915_READ(GEN8_DE_PORT_IIR);
2308 if (iir) {
2309 u32 tmp_mask;
d04a492d 2310 bool found = false;
cebd87a0 2311
e32192e1 2312 I915_WRITE(GEN8_DE_PORT_IIR, iir);
6d766f02 2313 ret = IRQ_HANDLED;
88e04703 2314
e32192e1
TU
2315 tmp_mask = GEN8_AUX_CHANNEL_A;
2316 if (INTEL_INFO(dev_priv)->gen >= 9)
2317 tmp_mask |= GEN9_AUX_CHANNEL_B |
2318 GEN9_AUX_CHANNEL_C |
2319 GEN9_AUX_CHANNEL_D;
2320
2321 if (iir & tmp_mask) {
38cc46d7 2322 dp_aux_irq_handler(dev);
d04a492d
SS
2323 found = true;
2324 }
2325
e32192e1
TU
2326 if (IS_BROXTON(dev_priv)) {
2327 tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
2328 if (tmp_mask) {
2329 bxt_hpd_irq_handler(dev, tmp_mask, hpd_bxt);
2330 found = true;
2331 }
2332 } else if (IS_BROADWELL(dev_priv)) {
2333 tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
2334 if (tmp_mask) {
2335 ilk_hpd_irq_handler(dev, tmp_mask, hpd_bdw);
2336 found = true;
2337 }
d04a492d
SS
2338 }
2339
e32192e1 2340 if (IS_BROXTON(dev) && (iir & BXT_DE_PORT_GMBUS)) {
9e63743e
SS
2341 gmbus_irq_handler(dev);
2342 found = true;
2343 }
2344
d04a492d 2345 if (!found)
38cc46d7 2346 DRM_ERROR("Unexpected DE Port interrupt\n");
6d766f02 2347 }
38cc46d7
OM
2348 else
2349 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
6d766f02
DV
2350 }
2351
055e393f 2352 for_each_pipe(dev_priv, pipe) {
e32192e1 2353 u32 flip_done, fault_errors;
abd58f01 2354
c42664cc
DV
2355 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2356 continue;
abd58f01 2357
e32192e1
TU
2358 iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2359 if (!iir) {
2360 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2361 continue;
2362 }
770de83d 2363
e32192e1
TU
2364 ret = IRQ_HANDLED;
2365 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
38cc46d7 2366
e32192e1
TU
2367 if (iir & GEN8_PIPE_VBLANK &&
2368 intel_pipe_handle_vblank(dev, pipe))
2369 intel_check_page_flip(dev, pipe);
770de83d 2370
e32192e1
TU
2371 flip_done = iir;
2372 if (INTEL_INFO(dev_priv)->gen >= 9)
2373 flip_done &= GEN9_PIPE_PLANE1_FLIP_DONE;
2374 else
2375 flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE;
38cc46d7 2376
e32192e1
TU
2377 if (flip_done) {
2378 intel_prepare_page_flip(dev, pipe);
2379 intel_finish_page_flip_plane(dev, pipe);
2380 }
38cc46d7 2381
e32192e1
TU
2382 if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2383 hsw_pipe_crc_irq_handler(dev, pipe);
38cc46d7 2384
e32192e1
TU
2385 if (iir & GEN8_PIPE_FIFO_UNDERRUN)
2386 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
770de83d 2387
e32192e1
TU
2388 fault_errors = iir;
2389 if (INTEL_INFO(dev_priv)->gen >= 9)
2390 fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2391 else
2392 fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
770de83d 2393
e32192e1
TU
2394 if (fault_errors)
2395 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2396 pipe_name(pipe),
2397 fault_errors);
abd58f01
BW
2398 }
2399
266ea3d9
SS
2400 if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) &&
2401 master_ctl & GEN8_DE_PCH_IRQ) {
92d03a80
DV
2402 /*
2403 * FIXME(BDW): Assume for now that the new interrupt handling
2404 * scheme also closed the SDE interrupt handling race we've seen
2405 * on older pch-split platforms. But this needs testing.
2406 */
e32192e1
TU
2407 iir = I915_READ(SDEIIR);
2408 if (iir) {
2409 I915_WRITE(SDEIIR, iir);
92d03a80 2410 ret = IRQ_HANDLED;
6dbf30ce
VS
2411
2412 if (HAS_PCH_SPT(dev_priv))
e32192e1 2413 spt_irq_handler(dev, iir);
6dbf30ce 2414 else
e32192e1 2415 cpt_irq_handler(dev, iir);
2dfb0b81
JN
2416 } else {
2417 /*
2418 * Like on previous PCH there seems to be something
2419 * fishy going on with forwarding PCH interrupts.
2420 */
2421 DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
2422 }
92d03a80
DV
2423 }
2424
cb0d205e
CW
2425 I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2426 POSTING_READ_FW(GEN8_MASTER_IRQ);
abd58f01 2427
1f814dac
ID
2428out:
2429 enable_rpm_wakeref_asserts(dev_priv);
2430
abd58f01
BW
2431 return ret;
2432}
2433
17e1df07
DV
2434static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2435 bool reset_completed)
2436{
a4872ba6 2437 struct intel_engine_cs *ring;
17e1df07
DV
2438 int i;
2439
2440 /*
2441 * Notify all waiters for GPU completion events that reset state has
2442 * been changed, and that they need to restart their wait after
2443 * checking for potential errors (and bail out to drop locks if there is
2444 * a gpu reset pending so that i915_error_work_func can acquire them).
2445 */
2446
2447 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2448 for_each_ring(ring, dev_priv, i)
2449 wake_up_all(&ring->irq_queue);
2450
2451 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2452 wake_up_all(&dev_priv->pending_flip_queue);
2453
2454 /*
2455 * Signal tasks blocked in i915_gem_wait_for_error that the pending
2456 * reset state is cleared.
2457 */
2458 if (reset_completed)
2459 wake_up_all(&dev_priv->gpu_error.reset_queue);
2460}
2461
8a905236 2462/**
b8d24a06 2463 * i915_reset_and_wakeup - do process context error handling work
468f9d29 2464 * @dev: drm device
8a905236
JB
2465 *
2466 * Fire an error uevent so userspace can see that a hang or error
2467 * was detected.
2468 */
b8d24a06 2469static void i915_reset_and_wakeup(struct drm_device *dev)
8a905236 2470{
b8d24a06
MK
2471 struct drm_i915_private *dev_priv = to_i915(dev);
2472 struct i915_gpu_error *error = &dev_priv->gpu_error;
cce723ed
BW
2473 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2474 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2475 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
17e1df07 2476 int ret;
8a905236 2477
5bdebb18 2478 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
f316a42c 2479
7db0ba24
DV
2480 /*
2481 * Note that there's only one work item which does gpu resets, so we
2482 * need not worry about concurrent gpu resets potentially incrementing
2483 * error->reset_counter twice. We only need to take care of another
2484 * racing irq/hangcheck declaring the gpu dead for a second time. A
2485 * quick check for that is good enough: schedule_work ensures the
2486 * correct ordering between hang detection and this work item, and since
2487 * the reset in-progress bit is only ever set by code outside of this
2488 * work we don't need to worry about any other races.
2489 */
2490 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
f803aa55 2491 DRM_DEBUG_DRIVER("resetting chip\n");
5bdebb18 2492 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
7db0ba24 2493 reset_event);
1f83fee0 2494
f454c694
ID
2495 /*
2496 * In most cases it's guaranteed that we get here with an RPM
2497 * reference held, for example because there is a pending GPU
2498 * request that won't finish until the reset is done. This
2499 * isn't the case at least when we get here by doing a
2500 * simulated reset via debugs, so get an RPM reference.
2501 */
2502 intel_runtime_pm_get(dev_priv);
7514747d
VS
2503
2504 intel_prepare_reset(dev);
2505
17e1df07
DV
2506 /*
2507 * All state reset _must_ be completed before we update the
2508 * reset counter, for otherwise waiters might miss the reset
2509 * pending state and not properly drop locks, resulting in
2510 * deadlocks with the reset work.
2511 */
f69061be
DV
2512 ret = i915_reset(dev);
2513
7514747d 2514 intel_finish_reset(dev);
17e1df07 2515
f454c694
ID
2516 intel_runtime_pm_put(dev_priv);
2517
f69061be
DV
2518 if (ret == 0) {
2519 /*
2520 * After all the gem state is reset, increment the reset
2521 * counter and wake up everyone waiting for the reset to
2522 * complete.
2523 *
2524 * Since unlock operations are a one-sided barrier only,
2525 * we need to insert a barrier here to order any seqno
2526 * updates before
2527 * the counter increment.
2528 */
4e857c58 2529 smp_mb__before_atomic();
f69061be
DV
2530 atomic_inc(&dev_priv->gpu_error.reset_counter);
2531
5bdebb18 2532 kobject_uevent_env(&dev->primary->kdev->kobj,
f69061be 2533 KOBJ_CHANGE, reset_done_event);
1f83fee0 2534 } else {
805de8f4 2535 atomic_or(I915_WEDGED, &error->reset_counter);
f316a42c 2536 }
1f83fee0 2537
17e1df07
DV
2538 /*
2539 * Note: The wake_up also serves as a memory barrier so that
2540 * waiters see the update value of the reset counter atomic_t.
2541 */
2542 i915_error_wake_up(dev_priv, true);
f316a42c 2543 }
8a905236
JB
2544}
2545
35aed2e6 2546static void i915_report_and_clear_eir(struct drm_device *dev)
8a905236
JB
2547{
2548 struct drm_i915_private *dev_priv = dev->dev_private;
bd9854f9 2549 uint32_t instdone[I915_NUM_INSTDONE_REG];
8a905236 2550 u32 eir = I915_READ(EIR);
050ee91f 2551 int pipe, i;
8a905236 2552
35aed2e6
CW
2553 if (!eir)
2554 return;
8a905236 2555
a70491cc 2556 pr_err("render error detected, EIR: 0x%08x\n", eir);
8a905236 2557
bd9854f9
BW
2558 i915_get_extra_instdone(dev, instdone);
2559
8a905236
JB
2560 if (IS_G4X(dev)) {
2561 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2562 u32 ipeir = I915_READ(IPEIR_I965);
2563
a70491cc
JP
2564 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2565 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
050ee91f
BW
2566 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2567 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
a70491cc 2568 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
a70491cc 2569 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
8a905236 2570 I915_WRITE(IPEIR_I965, ipeir);
3143a2bf 2571 POSTING_READ(IPEIR_I965);
8a905236
JB
2572 }
2573 if (eir & GM45_ERROR_PAGE_TABLE) {
2574 u32 pgtbl_err = I915_READ(PGTBL_ER);
a70491cc
JP
2575 pr_err("page table error\n");
2576 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
8a905236 2577 I915_WRITE(PGTBL_ER, pgtbl_err);
3143a2bf 2578 POSTING_READ(PGTBL_ER);
8a905236
JB
2579 }
2580 }
2581
a6c45cf0 2582 if (!IS_GEN2(dev)) {
8a905236
JB
2583 if (eir & I915_ERROR_PAGE_TABLE) {
2584 u32 pgtbl_err = I915_READ(PGTBL_ER);
a70491cc
JP
2585 pr_err("page table error\n");
2586 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
8a905236 2587 I915_WRITE(PGTBL_ER, pgtbl_err);
3143a2bf 2588 POSTING_READ(PGTBL_ER);
8a905236
JB
2589 }
2590 }
2591
2592 if (eir & I915_ERROR_MEMORY_REFRESH) {
a70491cc 2593 pr_err("memory refresh error:\n");
055e393f 2594 for_each_pipe(dev_priv, pipe)
a70491cc 2595 pr_err("pipe %c stat: 0x%08x\n",
9db4a9c7 2596 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
8a905236
JB
2597 /* pipestat has already been acked */
2598 }
2599 if (eir & I915_ERROR_INSTRUCTION) {
a70491cc
JP
2600 pr_err("instruction error\n");
2601 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
050ee91f
BW
2602 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2603 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
a6c45cf0 2604 if (INTEL_INFO(dev)->gen < 4) {
8a905236
JB
2605 u32 ipeir = I915_READ(IPEIR);
2606
a70491cc
JP
2607 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
2608 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
a70491cc 2609 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
8a905236 2610 I915_WRITE(IPEIR, ipeir);
3143a2bf 2611 POSTING_READ(IPEIR);
8a905236
JB
2612 } else {
2613 u32 ipeir = I915_READ(IPEIR_I965);
2614
a70491cc
JP
2615 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2616 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
a70491cc 2617 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
a70491cc 2618 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
8a905236 2619 I915_WRITE(IPEIR_I965, ipeir);
3143a2bf 2620 POSTING_READ(IPEIR_I965);
8a905236
JB
2621 }
2622 }
2623
2624 I915_WRITE(EIR, eir);
3143a2bf 2625 POSTING_READ(EIR);
8a905236
JB
2626 eir = I915_READ(EIR);
2627 if (eir) {
2628 /*
2629 * some errors might have become stuck,
2630 * mask them.
2631 */
2632 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2633 I915_WRITE(EMR, I915_READ(EMR) | eir);
2634 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2635 }
35aed2e6
CW
2636}
2637
2638/**
b8d24a06 2639 * i915_handle_error - handle a gpu error
35aed2e6
CW
2640 * @dev: drm device
2641 *
aafd8581 2642 * Do some basic checking of register state at error time and
35aed2e6
CW
2643 * dump it to the syslog. Also call i915_capture_error_state() to make
2644 * sure we get a record and make it available in debugfs. Fire a uevent
2645 * so userspace knows something bad happened (should trigger collection
2646 * of a ring dump etc.).
2647 */
58174462
MK
2648void i915_handle_error(struct drm_device *dev, bool wedged,
2649 const char *fmt, ...)
35aed2e6
CW
2650{
2651 struct drm_i915_private *dev_priv = dev->dev_private;
58174462
MK
2652 va_list args;
2653 char error_msg[80];
35aed2e6 2654
58174462
MK
2655 va_start(args, fmt);
2656 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2657 va_end(args);
2658
2659 i915_capture_error_state(dev, wedged, error_msg);
35aed2e6 2660 i915_report_and_clear_eir(dev);
8a905236 2661
ba1234d1 2662 if (wedged) {
805de8f4 2663 atomic_or(I915_RESET_IN_PROGRESS_FLAG,
f69061be 2664 &dev_priv->gpu_error.reset_counter);
ba1234d1 2665
11ed50ec 2666 /*
b8d24a06
MK
2667 * Wakeup waiting processes so that the reset function
2668 * i915_reset_and_wakeup doesn't deadlock trying to grab
2669 * various locks. By bumping the reset counter first, the woken
17e1df07
DV
2670 * processes will see a reset in progress and back off,
2671 * releasing their locks and then wait for the reset completion.
2672 * We must do this for _all_ gpu waiters that might hold locks
2673 * that the reset work needs to acquire.
2674 *
2675 * Note: The wake_up serves as the required memory barrier to
2676 * ensure that the waiters see the updated value of the reset
2677 * counter atomic_t.
11ed50ec 2678 */
17e1df07 2679 i915_error_wake_up(dev_priv, false);
11ed50ec
BG
2680 }
2681
b8d24a06 2682 i915_reset_and_wakeup(dev);
8a905236
JB
2683}
2684
42f52ef8
KP
2685/* Called from drm generic code, passed 'crtc' which
2686 * we use as a pipe index
2687 */
88e72717 2688static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe)
0a3e67a4 2689{
2d1013dd 2690 struct drm_i915_private *dev_priv = dev->dev_private;
e9d21d7f 2691 unsigned long irqflags;
71e0ffa5 2692
1ec14ad3 2693 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
f796cf8f 2694 if (INTEL_INFO(dev)->gen >= 4)
7c463586 2695 i915_enable_pipestat(dev_priv, pipe,
755e9019 2696 PIPE_START_VBLANK_INTERRUPT_STATUS);
e9d21d7f 2697 else
7c463586 2698 i915_enable_pipestat(dev_priv, pipe,
755e9019 2699 PIPE_VBLANK_INTERRUPT_STATUS);
1ec14ad3 2700 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
8692d00e 2701
0a3e67a4
JB
2702 return 0;
2703}
2704
88e72717 2705static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
f796cf8f 2706{
2d1013dd 2707 struct drm_i915_private *dev_priv = dev->dev_private;
f796cf8f 2708 unsigned long irqflags;
b518421f 2709 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
40da17c2 2710 DE_PIPE_VBLANK(pipe);
f796cf8f 2711
f796cf8f 2712 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
fbdedaea 2713 ilk_enable_display_irq(dev_priv, bit);
b1f14ad0
JB
2714 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2715
2716 return 0;
2717}
2718
88e72717 2719static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe)
7e231dbe 2720{
2d1013dd 2721 struct drm_i915_private *dev_priv = dev->dev_private;
7e231dbe 2722 unsigned long irqflags;
7e231dbe 2723
7e231dbe 2724 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
31acc7f5 2725 i915_enable_pipestat(dev_priv, pipe,
755e9019 2726 PIPE_START_VBLANK_INTERRUPT_STATUS);
7e231dbe
JB
2727 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2728
2729 return 0;
2730}
2731
88e72717 2732static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
abd58f01
BW
2733{
2734 struct drm_i915_private *dev_priv = dev->dev_private;
2735 unsigned long irqflags;
abd58f01 2736
abd58f01 2737 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
013d3752 2738 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
abd58f01 2739 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
013d3752 2740
abd58f01
BW
2741 return 0;
2742}
2743
42f52ef8
KP
2744/* Called from drm generic code, passed 'crtc' which
2745 * we use as a pipe index
2746 */
88e72717 2747static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe)
0a3e67a4 2748{
2d1013dd 2749 struct drm_i915_private *dev_priv = dev->dev_private;
e9d21d7f 2750 unsigned long irqflags;
0a3e67a4 2751
1ec14ad3 2752 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
f796cf8f 2753 i915_disable_pipestat(dev_priv, pipe,
755e9019
ID
2754 PIPE_VBLANK_INTERRUPT_STATUS |
2755 PIPE_START_VBLANK_INTERRUPT_STATUS);
f796cf8f
JB
2756 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2757}
2758
88e72717 2759static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
f796cf8f 2760{
2d1013dd 2761 struct drm_i915_private *dev_priv = dev->dev_private;
f796cf8f 2762 unsigned long irqflags;
b518421f 2763 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
40da17c2 2764 DE_PIPE_VBLANK(pipe);
f796cf8f
JB
2765
2766 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
fbdedaea 2767 ilk_disable_display_irq(dev_priv, bit);
b1f14ad0
JB
2768 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2769}
2770
88e72717 2771static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe)
7e231dbe 2772{
2d1013dd 2773 struct drm_i915_private *dev_priv = dev->dev_private;
7e231dbe 2774 unsigned long irqflags;
7e231dbe
JB
2775
2776 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
31acc7f5 2777 i915_disable_pipestat(dev_priv, pipe,
755e9019 2778 PIPE_START_VBLANK_INTERRUPT_STATUS);
7e231dbe
JB
2779 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2780}
2781
88e72717 2782static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
abd58f01
BW
2783{
2784 struct drm_i915_private *dev_priv = dev->dev_private;
2785 unsigned long irqflags;
abd58f01 2786
abd58f01 2787 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
013d3752 2788 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
abd58f01
BW
2789 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2790}
2791
9107e9d2 2792static bool
94f7bbe1 2793ring_idle(struct intel_engine_cs *ring, u32 seqno)
9107e9d2
CW
2794{
2795 return (list_empty(&ring->request_list) ||
94f7bbe1 2796 i915_seqno_passed(seqno, ring->last_submitted_seqno));
f65d9421
BG
2797}
2798
a028c4b0
DV
2799static bool
2800ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
2801{
2802 if (INTEL_INFO(dev)->gen >= 8) {
a6cdb93a 2803 return (ipehr >> 23) == 0x1c;
a028c4b0
DV
2804 } else {
2805 ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
2806 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
2807 MI_SEMAPHORE_REGISTER);
2808 }
2809}
2810
a4872ba6 2811static struct intel_engine_cs *
a6cdb93a 2812semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
921d42ea
DV
2813{
2814 struct drm_i915_private *dev_priv = ring->dev->dev_private;
a4872ba6 2815 struct intel_engine_cs *signaller;
921d42ea
DV
2816 int i;
2817
2818 if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
a6cdb93a
RV
2819 for_each_ring(signaller, dev_priv, i) {
2820 if (ring == signaller)
2821 continue;
2822
2823 if (offset == signaller->semaphore.signal_ggtt[ring->id])
2824 return signaller;
2825 }
921d42ea
DV
2826 } else {
2827 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
2828
2829 for_each_ring(signaller, dev_priv, i) {
2830 if(ring == signaller)
2831 continue;
2832
ebc348b2 2833 if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
921d42ea
DV
2834 return signaller;
2835 }
2836 }
2837
a6cdb93a
RV
2838 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
2839 ring->id, ipehr, offset);
921d42ea
DV
2840
2841 return NULL;
2842}
2843
a4872ba6
OM
2844static struct intel_engine_cs *
2845semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
a24a11e6
CW
2846{
2847 struct drm_i915_private *dev_priv = ring->dev->dev_private;
88fe429d 2848 u32 cmd, ipehr, head;
a6cdb93a
RV
2849 u64 offset = 0;
2850 int i, backwards;
a24a11e6 2851
381e8ae3
TE
2852 /*
2853 * This function does not support execlist mode - any attempt to
2854 * proceed further into this function will result in a kernel panic
2855 * when dereferencing ring->buffer, which is not set up in execlist
2856 * mode.
2857 *
2858 * The correct way of doing it would be to derive the currently
2859 * executing ring buffer from the current context, which is derived
2860 * from the currently running request. Unfortunately, to get the
2861 * current request we would have to grab the struct_mutex before doing
2862 * anything else, which would be ill-advised since some other thread
2863 * might have grabbed it already and managed to hang itself, causing
2864 * the hang checker to deadlock.
2865 *
2866 * Therefore, this function does not support execlist mode in its
2867 * current form. Just return NULL and move on.
2868 */
2869 if (ring->buffer == NULL)
2870 return NULL;
2871
a24a11e6 2872 ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
a028c4b0 2873 if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
6274f212 2874 return NULL;
a24a11e6 2875
88fe429d
DV
2876 /*
2877 * HEAD is likely pointing to the dword after the actual command,
2878 * so scan backwards until we find the MBOX. But limit it to just 3
a6cdb93a
RV
2879 * or 4 dwords depending on the semaphore wait command size.
2880 * Note that we don't care about ACTHD here since that might
88fe429d
DV
2881 * point at at batch, and semaphores are always emitted into the
2882 * ringbuffer itself.
a24a11e6 2883 */
88fe429d 2884 head = I915_READ_HEAD(ring) & HEAD_ADDR;
a6cdb93a 2885 backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
88fe429d 2886
a6cdb93a 2887 for (i = backwards; i; --i) {
88fe429d
DV
2888 /*
2889 * Be paranoid and presume the hw has gone off into the wild -
2890 * our ring is smaller than what the hardware (and hence
2891 * HEAD_ADDR) allows. Also handles wrap-around.
2892 */
ee1b1e5e 2893 head &= ring->buffer->size - 1;
88fe429d
DV
2894
2895 /* This here seems to blow up */
ee1b1e5e 2896 cmd = ioread32(ring->buffer->virtual_start + head);
a24a11e6
CW
2897 if (cmd == ipehr)
2898 break;
2899
88fe429d
DV
2900 head -= 4;
2901 }
a24a11e6 2902
88fe429d
DV
2903 if (!i)
2904 return NULL;
a24a11e6 2905
ee1b1e5e 2906 *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
a6cdb93a
RV
2907 if (INTEL_INFO(ring->dev)->gen >= 8) {
2908 offset = ioread32(ring->buffer->virtual_start + head + 12);
2909 offset <<= 32;
2910 offset = ioread32(ring->buffer->virtual_start + head + 8);
2911 }
2912 return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
a24a11e6
CW
2913}
2914
a4872ba6 2915static int semaphore_passed(struct intel_engine_cs *ring)
6274f212
CW
2916{
2917 struct drm_i915_private *dev_priv = ring->dev->dev_private;
a4872ba6 2918 struct intel_engine_cs *signaller;
a0d036b0 2919 u32 seqno;
6274f212 2920
4be17381 2921 ring->hangcheck.deadlock++;
6274f212
CW
2922
2923 signaller = semaphore_waits_for(ring, &seqno);
4be17381
CW
2924 if (signaller == NULL)
2925 return -1;
2926
2927 /* Prevent pathological recursion due to driver bugs */
2928 if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
6274f212
CW
2929 return -1;
2930
4be17381
CW
2931 if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
2932 return 1;
2933
a0d036b0
CW
2934 /* cursory check for an unkickable deadlock */
2935 if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
2936 semaphore_passed(signaller) < 0)
4be17381
CW
2937 return -1;
2938
2939 return 0;
6274f212
CW
2940}
2941
2942static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2943{
a4872ba6 2944 struct intel_engine_cs *ring;
6274f212
CW
2945 int i;
2946
2947 for_each_ring(ring, dev_priv, i)
4be17381 2948 ring->hangcheck.deadlock = 0;
6274f212
CW
2949}
2950
61642ff0 2951static bool subunits_stuck(struct intel_engine_cs *ring)
1ec14ad3 2952{
61642ff0
MK
2953 u32 instdone[I915_NUM_INSTDONE_REG];
2954 bool stuck;
2955 int i;
2956
2957 if (ring->id != RCS)
2958 return true;
2959
2960 i915_get_extra_instdone(ring->dev, instdone);
9107e9d2 2961
61642ff0
MK
2962 /* There might be unstable subunit states even when
2963 * actual head is not moving. Filter out the unstable ones by
2964 * accumulating the undone -> done transitions and only
2965 * consider those as progress.
2966 */
2967 stuck = true;
2968 for (i = 0; i < I915_NUM_INSTDONE_REG; i++) {
2969 const u32 tmp = instdone[i] | ring->hangcheck.instdone[i];
2970
2971 if (tmp != ring->hangcheck.instdone[i])
2972 stuck = false;
2973
2974 ring->hangcheck.instdone[i] |= tmp;
2975 }
2976
2977 return stuck;
2978}
2979
2980static enum intel_ring_hangcheck_action
2981head_stuck(struct intel_engine_cs *ring, u64 acthd)
2982{
f260fe7b 2983 if (acthd != ring->hangcheck.acthd) {
61642ff0
MK
2984
2985 /* Clear subunit states on head movement */
2986 memset(ring->hangcheck.instdone, 0,
2987 sizeof(ring->hangcheck.instdone));
2988
f260fe7b
MK
2989 if (acthd > ring->hangcheck.max_acthd) {
2990 ring->hangcheck.max_acthd = acthd;
2991 return HANGCHECK_ACTIVE;
2992 }
2993
2994 return HANGCHECK_ACTIVE_LOOP;
2995 }
6274f212 2996
61642ff0
MK
2997 if (!subunits_stuck(ring))
2998 return HANGCHECK_ACTIVE;
2999
3000 return HANGCHECK_HUNG;
3001}
3002
3003static enum intel_ring_hangcheck_action
3004ring_stuck(struct intel_engine_cs *ring, u64 acthd)
3005{
3006 struct drm_device *dev = ring->dev;
3007 struct drm_i915_private *dev_priv = dev->dev_private;
3008 enum intel_ring_hangcheck_action ha;
3009 u32 tmp;
3010
3011 ha = head_stuck(ring, acthd);
3012 if (ha != HANGCHECK_HUNG)
3013 return ha;
3014
9107e9d2 3015 if (IS_GEN2(dev))
f2f4d82f 3016 return HANGCHECK_HUNG;
9107e9d2
CW
3017
3018 /* Is the chip hanging on a WAIT_FOR_EVENT?
3019 * If so we can simply poke the RB_WAIT bit
3020 * and break the hang. This should work on
3021 * all but the second generation chipsets.
3022 */
3023 tmp = I915_READ_CTL(ring);
1ec14ad3 3024 if (tmp & RING_WAIT) {
58174462
MK
3025 i915_handle_error(dev, false,
3026 "Kicking stuck wait on %s",
3027 ring->name);
1ec14ad3 3028 I915_WRITE_CTL(ring, tmp);
f2f4d82f 3029 return HANGCHECK_KICK;
6274f212
CW
3030 }
3031
3032 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
3033 switch (semaphore_passed(ring)) {
3034 default:
f2f4d82f 3035 return HANGCHECK_HUNG;
6274f212 3036 case 1:
58174462
MK
3037 i915_handle_error(dev, false,
3038 "Kicking stuck semaphore on %s",
3039 ring->name);
6274f212 3040 I915_WRITE_CTL(ring, tmp);
f2f4d82f 3041 return HANGCHECK_KICK;
6274f212 3042 case 0:
f2f4d82f 3043 return HANGCHECK_WAIT;
6274f212 3044 }
9107e9d2 3045 }
ed5cbb03 3046
f2f4d82f 3047 return HANGCHECK_HUNG;
ed5cbb03
MK
3048}
3049
737b1506 3050/*
f65d9421 3051 * This is called when the chip hasn't reported back with completed
05407ff8
MK
3052 * batchbuffers in a long time. We keep track per ring seqno progress and
3053 * if there are no progress, hangcheck score for that ring is increased.
3054 * Further, acthd is inspected to see if the ring is stuck. On stuck case
3055 * we kick the ring. If we see no progress on three subsequent calls
3056 * we assume chip is wedged and try to fix it by resetting the chip.
f65d9421 3057 */
737b1506 3058static void i915_hangcheck_elapsed(struct work_struct *work)
f65d9421 3059{
737b1506
CW
3060 struct drm_i915_private *dev_priv =
3061 container_of(work, typeof(*dev_priv),
3062 gpu_error.hangcheck_work.work);
3063 struct drm_device *dev = dev_priv->dev;
a4872ba6 3064 struct intel_engine_cs *ring;
b4519513 3065 int i;
05407ff8 3066 int busy_count = 0, rings_hung = 0;
9107e9d2
CW
3067 bool stuck[I915_NUM_RINGS] = { 0 };
3068#define BUSY 1
3069#define KICK 5
3070#define HUNG 20
893eead0 3071
d330a953 3072 if (!i915.enable_hangcheck)
3e0dc6b0
BW
3073 return;
3074
1f814dac
ID
3075 /*
3076 * The hangcheck work is synced during runtime suspend, we don't
3077 * require a wakeref. TODO: instead of disabling the asserts make
3078 * sure that we hold a reference when this work is running.
3079 */
3080 DISABLE_RPM_WAKEREF_ASSERTS(dev_priv);
3081
75714940
MK
3082 /* As enabling the GPU requires fairly extensive mmio access,
3083 * periodically arm the mmio checker to see if we are triggering
3084 * any invalid access.
3085 */
3086 intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
3087
b4519513 3088 for_each_ring(ring, dev_priv, i) {
50877445
CW
3089 u64 acthd;
3090 u32 seqno;
9107e9d2 3091 bool busy = true;
05407ff8 3092
6274f212
CW
3093 semaphore_clear_deadlocks(dev_priv);
3094
05407ff8
MK
3095 seqno = ring->get_seqno(ring, false);
3096 acthd = intel_ring_get_active_head(ring);
b4519513 3097
9107e9d2 3098 if (ring->hangcheck.seqno == seqno) {
94f7bbe1 3099 if (ring_idle(ring, seqno)) {
da661464
MK
3100 ring->hangcheck.action = HANGCHECK_IDLE;
3101
9107e9d2
CW
3102 if (waitqueue_active(&ring->irq_queue)) {
3103 /* Issue a wake-up to catch stuck h/w. */
094f9a54 3104 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
f4adcd24
DV
3105 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
3106 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
3107 ring->name);
3108 else
3109 DRM_INFO("Fake missed irq on %s\n",
3110 ring->name);
094f9a54
CW
3111 wake_up_all(&ring->irq_queue);
3112 }
3113 /* Safeguard against driver failure */
3114 ring->hangcheck.score += BUSY;
9107e9d2
CW
3115 } else
3116 busy = false;
05407ff8 3117 } else {
6274f212
CW
3118 /* We always increment the hangcheck score
3119 * if the ring is busy and still processing
3120 * the same request, so that no single request
3121 * can run indefinitely (such as a chain of
3122 * batches). The only time we do not increment
3123 * the hangcheck score on this ring, if this
3124 * ring is in a legitimate wait for another
3125 * ring. In that case the waiting ring is a
3126 * victim and we want to be sure we catch the
3127 * right culprit. Then every time we do kick
3128 * the ring, add a small increment to the
3129 * score so that we can catch a batch that is
3130 * being repeatedly kicked and so responsible
3131 * for stalling the machine.
3132 */
ad8beaea
MK
3133 ring->hangcheck.action = ring_stuck(ring,
3134 acthd);
3135
3136 switch (ring->hangcheck.action) {
da661464 3137 case HANGCHECK_IDLE:
f2f4d82f 3138 case HANGCHECK_WAIT:
f2f4d82f 3139 case HANGCHECK_ACTIVE:
f260fe7b
MK
3140 break;
3141 case HANGCHECK_ACTIVE_LOOP:
ea04cb31 3142 ring->hangcheck.score += BUSY;
6274f212 3143 break;
f2f4d82f 3144 case HANGCHECK_KICK:
ea04cb31 3145 ring->hangcheck.score += KICK;
6274f212 3146 break;
f2f4d82f 3147 case HANGCHECK_HUNG:
ea04cb31 3148 ring->hangcheck.score += HUNG;
6274f212
CW
3149 stuck[i] = true;
3150 break;
3151 }
05407ff8 3152 }
9107e9d2 3153 } else {
da661464
MK
3154 ring->hangcheck.action = HANGCHECK_ACTIVE;
3155
9107e9d2
CW
3156 /* Gradually reduce the count so that we catch DoS
3157 * attempts across multiple batches.
3158 */
3159 if (ring->hangcheck.score > 0)
3160 ring->hangcheck.score--;
f260fe7b 3161
61642ff0 3162 /* Clear head and subunit states on seqno movement */
f260fe7b 3163 ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
61642ff0
MK
3164
3165 memset(ring->hangcheck.instdone, 0,
3166 sizeof(ring->hangcheck.instdone));
d1e61e7f
CW
3167 }
3168
05407ff8
MK
3169 ring->hangcheck.seqno = seqno;
3170 ring->hangcheck.acthd = acthd;
9107e9d2 3171 busy_count += busy;
893eead0 3172 }
b9201c14 3173
92cab734 3174 for_each_ring(ring, dev_priv, i) {
b6b0fac0 3175 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
b8d88d1d
DV
3176 DRM_INFO("%s on %s\n",
3177 stuck[i] ? "stuck" : "no progress",
3178 ring->name);
a43adf07 3179 rings_hung++;
92cab734
MK
3180 }
3181 }
3182
1f814dac
ID
3183 if (rings_hung) {
3184 i915_handle_error(dev, true, "Ring hung");
3185 goto out;
3186 }
f65d9421 3187
05407ff8
MK
3188 if (busy_count)
3189 /* Reset timer case chip hangs without another request
3190 * being added */
10cd45b6 3191 i915_queue_hangcheck(dev);
1f814dac
ID
3192
3193out:
3194 ENABLE_RPM_WAKEREF_ASSERTS(dev_priv);
10cd45b6
MK
3195}
3196
3197void i915_queue_hangcheck(struct drm_device *dev)
3198{
737b1506 3199 struct i915_gpu_error *e = &to_i915(dev)->gpu_error;
672e7b7c 3200
d330a953 3201 if (!i915.enable_hangcheck)
10cd45b6
MK
3202 return;
3203
737b1506
CW
3204 /* Don't continually defer the hangcheck so that it is always run at
3205 * least once after work has been scheduled on any ring. Otherwise,
3206 * we will ignore a hung ring if a second ring is kept busy.
3207 */
3208
3209 queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work,
3210 round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES));
f65d9421
BG
3211}
3212
1c69eb42 3213static void ibx_irq_reset(struct drm_device *dev)
91738a95
PZ
3214{
3215 struct drm_i915_private *dev_priv = dev->dev_private;
3216
3217 if (HAS_PCH_NOP(dev))
3218 return;
3219
f86f3fb0 3220 GEN5_IRQ_RESET(SDE);
105b122e
PZ
3221
3222 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
3223 I915_WRITE(SERR_INT, 0xffffffff);
622364b6 3224}
105b122e 3225
622364b6
PZ
3226/*
3227 * SDEIER is also touched by the interrupt handler to work around missed PCH
3228 * interrupts. Hence we can't update it after the interrupt handler is enabled -
3229 * instead we unconditionally enable all PCH interrupt sources here, but then
3230 * only unmask them as needed with SDEIMR.
3231 *
3232 * This function needs to be called before interrupts are enabled.
3233 */
3234static void ibx_irq_pre_postinstall(struct drm_device *dev)
3235{
3236 struct drm_i915_private *dev_priv = dev->dev_private;
3237
3238 if (HAS_PCH_NOP(dev))
3239 return;
3240
3241 WARN_ON(I915_READ(SDEIER) != 0);
91738a95
PZ
3242 I915_WRITE(SDEIER, 0xffffffff);
3243 POSTING_READ(SDEIER);
3244}
3245
7c4d664e 3246static void gen5_gt_irq_reset(struct drm_device *dev)
d18ea1b5
DV
3247{
3248 struct drm_i915_private *dev_priv = dev->dev_private;
3249
f86f3fb0 3250 GEN5_IRQ_RESET(GT);
a9d356a6 3251 if (INTEL_INFO(dev)->gen >= 6)
f86f3fb0 3252 GEN5_IRQ_RESET(GEN6_PM);
d18ea1b5
DV
3253}
3254
1da177e4
LT
3255/* drm_dma.h hooks
3256*/
be30b29f 3257static void ironlake_irq_reset(struct drm_device *dev)
036a4a7d 3258{
2d1013dd 3259 struct drm_i915_private *dev_priv = dev->dev_private;
036a4a7d 3260
0c841212 3261 I915_WRITE(HWSTAM, 0xffffffff);
bdfcdb63 3262
f86f3fb0 3263 GEN5_IRQ_RESET(DE);
c6d954c1
PZ
3264 if (IS_GEN7(dev))
3265 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
036a4a7d 3266
7c4d664e 3267 gen5_gt_irq_reset(dev);
c650156a 3268
1c69eb42 3269 ibx_irq_reset(dev);
7d99163d 3270}
c650156a 3271
70591a41
VS
3272static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
3273{
3274 enum pipe pipe;
3275
0706f17c 3276 i915_hotplug_interrupt_update(dev_priv, 0xFFFFFFFF, 0);
70591a41
VS
3277 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3278
3279 for_each_pipe(dev_priv, pipe)
3280 I915_WRITE(PIPESTAT(pipe), 0xffff);
3281
3282 GEN5_IRQ_RESET(VLV_);
3283}
3284
7e231dbe
JB
3285static void valleyview_irq_preinstall(struct drm_device *dev)
3286{
2d1013dd 3287 struct drm_i915_private *dev_priv = dev->dev_private;
7e231dbe 3288
7e231dbe
JB
3289 /* VLV magic */
3290 I915_WRITE(VLV_IMR, 0);
3291 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
3292 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
3293 I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
3294
7c4d664e 3295 gen5_gt_irq_reset(dev);
7e231dbe 3296
7c4cde39 3297 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
7e231dbe 3298
70591a41 3299 vlv_display_irq_reset(dev_priv);
7e231dbe
JB
3300}
3301
d6e3cca3
DV
3302static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3303{
3304 GEN8_IRQ_RESET_NDX(GT, 0);
3305 GEN8_IRQ_RESET_NDX(GT, 1);
3306 GEN8_IRQ_RESET_NDX(GT, 2);
3307 GEN8_IRQ_RESET_NDX(GT, 3);
3308}
3309
823f6b38 3310static void gen8_irq_reset(struct drm_device *dev)
abd58f01
BW
3311{
3312 struct drm_i915_private *dev_priv = dev->dev_private;
3313 int pipe;
3314
abd58f01
BW
3315 I915_WRITE(GEN8_MASTER_IRQ, 0);
3316 POSTING_READ(GEN8_MASTER_IRQ);
3317
d6e3cca3 3318 gen8_gt_irq_reset(dev_priv);
abd58f01 3319
055e393f 3320 for_each_pipe(dev_priv, pipe)
f458ebbc
DV
3321 if (intel_display_power_is_enabled(dev_priv,
3322 POWER_DOMAIN_PIPE(pipe)))
813bde43 3323 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
abd58f01 3324
f86f3fb0
PZ
3325 GEN5_IRQ_RESET(GEN8_DE_PORT_);
3326 GEN5_IRQ_RESET(GEN8_DE_MISC_);
3327 GEN5_IRQ_RESET(GEN8_PCU_);
abd58f01 3328
266ea3d9
SS
3329 if (HAS_PCH_SPLIT(dev))
3330 ibx_irq_reset(dev);
abd58f01 3331}
09f2344d 3332
4c6c03be
DL
3333void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3334 unsigned int pipe_mask)
d49bdb0e 3335{
1180e206 3336 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
d49bdb0e 3337
13321786 3338 spin_lock_irq(&dev_priv->irq_lock);
d14c0343
DL
3339 if (pipe_mask & 1 << PIPE_A)
3340 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_A,
3341 dev_priv->de_irq_mask[PIPE_A],
3342 ~dev_priv->de_irq_mask[PIPE_A] | extra_ier);
4c6c03be
DL
3343 if (pipe_mask & 1 << PIPE_B)
3344 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B,
3345 dev_priv->de_irq_mask[PIPE_B],
3346 ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
3347 if (pipe_mask & 1 << PIPE_C)
3348 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C,
3349 dev_priv->de_irq_mask[PIPE_C],
3350 ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
13321786 3351 spin_unlock_irq(&dev_priv->irq_lock);
d49bdb0e
PZ
3352}
3353
43f328d7
VS
3354static void cherryview_irq_preinstall(struct drm_device *dev)
3355{
3356 struct drm_i915_private *dev_priv = dev->dev_private;
43f328d7
VS
3357
3358 I915_WRITE(GEN8_MASTER_IRQ, 0);
3359 POSTING_READ(GEN8_MASTER_IRQ);
3360
d6e3cca3 3361 gen8_gt_irq_reset(dev_priv);
43f328d7
VS
3362
3363 GEN5_IRQ_RESET(GEN8_PCU_);
3364
43f328d7
VS
3365 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3366
70591a41 3367 vlv_display_irq_reset(dev_priv);
43f328d7
VS
3368}
3369
87a02106
VS
3370static u32 intel_hpd_enabled_irqs(struct drm_device *dev,
3371 const u32 hpd[HPD_NUM_PINS])
3372{
3373 struct drm_i915_private *dev_priv = to_i915(dev);
3374 struct intel_encoder *encoder;
3375 u32 enabled_irqs = 0;
3376
3377 for_each_intel_encoder(dev, encoder)
3378 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
3379 enabled_irqs |= hpd[encoder->hpd_pin];
3380
3381 return enabled_irqs;
3382}
3383
82a28bcf 3384static void ibx_hpd_irq_setup(struct drm_device *dev)
7fe0b973 3385{
2d1013dd 3386 struct drm_i915_private *dev_priv = dev->dev_private;
87a02106 3387 u32 hotplug_irqs, hotplug, enabled_irqs;
82a28bcf
DV
3388
3389 if (HAS_PCH_IBX(dev)) {
fee884ed 3390 hotplug_irqs = SDE_HOTPLUG_MASK;
87a02106 3391 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ibx);
82a28bcf 3392 } else {
fee884ed 3393 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
87a02106 3394 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_cpt);
82a28bcf 3395 }
7fe0b973 3396
fee884ed 3397 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
82a28bcf
DV
3398
3399 /*
3400 * Enable digital hotplug on the PCH, and configure the DP short pulse
6dbf30ce
VS
3401 * duration to 2ms (which is the minimum in the Display Port spec).
3402 * The pulse duration bits are reserved on LPT+.
82a28bcf 3403 */
7fe0b973
KP
3404 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3405 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3406 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3407 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3408 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
0b2eb33e
VS
3409 /*
3410 * When CPU and PCH are on the same package, port A
3411 * HPD must be enabled in both north and south.
3412 */
3413 if (HAS_PCH_LPT_LP(dev))
3414 hotplug |= PORTA_HOTPLUG_ENABLE;
7fe0b973 3415 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
6dbf30ce 3416}
26951caf 3417
6dbf30ce
VS
3418static void spt_hpd_irq_setup(struct drm_device *dev)
3419{
3420 struct drm_i915_private *dev_priv = dev->dev_private;
3421 u32 hotplug_irqs, hotplug, enabled_irqs;
3422
3423 hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
3424 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_spt);
3425
3426 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3427
3428 /* Enable digital hotplug on the PCH */
3429 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3430 hotplug |= PORTD_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE |
74c0b395 3431 PORTB_HOTPLUG_ENABLE | PORTA_HOTPLUG_ENABLE;
6dbf30ce
VS
3432 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3433
3434 hotplug = I915_READ(PCH_PORT_HOTPLUG2);
3435 hotplug |= PORTE_HOTPLUG_ENABLE;
3436 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
7fe0b973
KP
3437}
3438
e4ce95aa
VS
3439static void ilk_hpd_irq_setup(struct drm_device *dev)
3440{
3441 struct drm_i915_private *dev_priv = dev->dev_private;
3442 u32 hotplug_irqs, hotplug, enabled_irqs;
3443
3a3b3c7d
VS
3444 if (INTEL_INFO(dev)->gen >= 8) {
3445 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
3446 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bdw);
3447
3448 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3449 } else if (INTEL_INFO(dev)->gen >= 7) {
23bb4cb5
VS
3450 hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
3451 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ivb);
3a3b3c7d
VS
3452
3453 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
23bb4cb5
VS
3454 } else {
3455 hotplug_irqs = DE_DP_A_HOTPLUG;
3456 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ilk);
e4ce95aa 3457
3a3b3c7d
VS
3458 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3459 }
e4ce95aa
VS
3460
3461 /*
3462 * Enable digital hotplug on the CPU, and configure the DP short pulse
3463 * duration to 2ms (which is the minimum in the Display Port spec)
23bb4cb5 3464 * The pulse duration bits are reserved on HSW+.
e4ce95aa
VS
3465 */
3466 hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
3467 hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
3468 hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms;
3469 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3470
3471 ibx_hpd_irq_setup(dev);
3472}
3473
e0a20ad7
SS
3474static void bxt_hpd_irq_setup(struct drm_device *dev)
3475{
3476 struct drm_i915_private *dev_priv = dev->dev_private;
a52bb15b 3477 u32 hotplug_irqs, hotplug, enabled_irqs;
e0a20ad7 3478
a52bb15b
VS
3479 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bxt);
3480 hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
e0a20ad7 3481
a52bb15b 3482 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
e0a20ad7 3483
a52bb15b
VS
3484 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3485 hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE |
3486 PORTA_HOTPLUG_ENABLE;
3487 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
e0a20ad7
SS
3488}
3489
d46da437
PZ
3490static void ibx_irq_postinstall(struct drm_device *dev)
3491{
2d1013dd 3492 struct drm_i915_private *dev_priv = dev->dev_private;
82a28bcf 3493 u32 mask;
e5868a31 3494
692a04cf
DV
3495 if (HAS_PCH_NOP(dev))
3496 return;
3497
105b122e 3498 if (HAS_PCH_IBX(dev))
5c673b60 3499 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
105b122e 3500 else
5c673b60 3501 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
8664281b 3502
b51a2842 3503 gen5_assert_iir_is_zero(dev_priv, SDEIIR);
d46da437 3504 I915_WRITE(SDEIMR, ~mask);
d46da437
PZ
3505}
3506
0a9a8c91
DV
3507static void gen5_gt_irq_postinstall(struct drm_device *dev)
3508{
3509 struct drm_i915_private *dev_priv = dev->dev_private;
3510 u32 pm_irqs, gt_irqs;
3511
3512 pm_irqs = gt_irqs = 0;
3513
3514 dev_priv->gt_irq_mask = ~0;
040d2baa 3515 if (HAS_L3_DPF(dev)) {
0a9a8c91 3516 /* L3 parity interrupt is always unmasked. */
35a85ac6
BW
3517 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3518 gt_irqs |= GT_PARITY_ERROR(dev);
0a9a8c91
DV
3519 }
3520
3521 gt_irqs |= GT_RENDER_USER_INTERRUPT;
3522 if (IS_GEN5(dev)) {
3523 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
3524 ILK_BSD_USER_INTERRUPT;
3525 } else {
3526 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3527 }
3528
35079899 3529 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
0a9a8c91
DV
3530
3531 if (INTEL_INFO(dev)->gen >= 6) {
78e68d36
ID
3532 /*
3533 * RPS interrupts will get enabled/disabled on demand when RPS
3534 * itself is enabled/disabled.
3535 */
0a9a8c91
DV
3536 if (HAS_VEBOX(dev))
3537 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3538
605cd25b 3539 dev_priv->pm_irq_mask = 0xffffffff;
35079899 3540 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
0a9a8c91
DV
3541 }
3542}
3543
f71d4af4 3544static int ironlake_irq_postinstall(struct drm_device *dev)
036a4a7d 3545{
2d1013dd 3546 struct drm_i915_private *dev_priv = dev->dev_private;
8e76f8dc
PZ
3547 u32 display_mask, extra_mask;
3548
3549 if (INTEL_INFO(dev)->gen >= 7) {
3550 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3551 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3552 DE_PLANEB_FLIP_DONE_IVB |
5c673b60 3553 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
8e76f8dc 3554 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
23bb4cb5
VS
3555 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3556 DE_DP_A_HOTPLUG_IVB);
8e76f8dc
PZ
3557 } else {
3558 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3559 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
5b3a856b 3560 DE_AUX_CHANNEL_A |
5b3a856b
DV
3561 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3562 DE_POISON);
e4ce95aa
VS
3563 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3564 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3565 DE_DP_A_HOTPLUG);
8e76f8dc 3566 }
036a4a7d 3567
1ec14ad3 3568 dev_priv->irq_mask = ~display_mask;
036a4a7d 3569
0c841212
PZ
3570 I915_WRITE(HWSTAM, 0xeffe);
3571
622364b6
PZ
3572 ibx_irq_pre_postinstall(dev);
3573
35079899 3574 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
036a4a7d 3575
0a9a8c91 3576 gen5_gt_irq_postinstall(dev);
036a4a7d 3577
d46da437 3578 ibx_irq_postinstall(dev);
7fe0b973 3579
f97108d1 3580 if (IS_IRONLAKE_M(dev)) {
6005ce42
DV
3581 /* Enable PCU event interrupts
3582 *
3583 * spinlocking not required here for correctness since interrupt
4bc9d430
DV
3584 * setup is guaranteed to run in single-threaded context. But we
3585 * need it to make the assert_spin_locked happy. */
d6207435 3586 spin_lock_irq(&dev_priv->irq_lock);
fbdedaea 3587 ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
d6207435 3588 spin_unlock_irq(&dev_priv->irq_lock);
f97108d1
JB
3589 }
3590
036a4a7d
ZW
3591 return 0;
3592}
3593
f8b79e58
ID
3594static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
3595{
3596 u32 pipestat_mask;
3597 u32 iir_mask;
120dda4f 3598 enum pipe pipe;
f8b79e58
ID
3599
3600 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3601 PIPE_FIFO_UNDERRUN_STATUS;
3602
120dda4f
VS
3603 for_each_pipe(dev_priv, pipe)
3604 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
f8b79e58
ID
3605 POSTING_READ(PIPESTAT(PIPE_A));
3606
3607 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3608 PIPE_CRC_DONE_INTERRUPT_STATUS;
3609
120dda4f
VS
3610 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3611 for_each_pipe(dev_priv, pipe)
3612 i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
f8b79e58
ID
3613
3614 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3615 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3616 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
120dda4f
VS
3617 if (IS_CHERRYVIEW(dev_priv))
3618 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
f8b79e58
ID
3619 dev_priv->irq_mask &= ~iir_mask;
3620
3621 I915_WRITE(VLV_IIR, iir_mask);
3622 I915_WRITE(VLV_IIR, iir_mask);
f8b79e58 3623 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
76e41860
VS
3624 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3625 POSTING_READ(VLV_IMR);
f8b79e58
ID
3626}
3627
3628static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
3629{
3630 u32 pipestat_mask;
3631 u32 iir_mask;
120dda4f 3632 enum pipe pipe;
f8b79e58
ID
3633
3634 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3635 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
6c7fba04 3636 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
120dda4f
VS
3637 if (IS_CHERRYVIEW(dev_priv))
3638 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
f8b79e58
ID
3639
3640 dev_priv->irq_mask |= iir_mask;
f8b79e58 3641 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
76e41860 3642 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
f8b79e58
ID
3643 I915_WRITE(VLV_IIR, iir_mask);
3644 I915_WRITE(VLV_IIR, iir_mask);
3645 POSTING_READ(VLV_IIR);
3646
3647 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3648 PIPE_CRC_DONE_INTERRUPT_STATUS;
3649
120dda4f
VS
3650 i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3651 for_each_pipe(dev_priv, pipe)
3652 i915_disable_pipestat(dev_priv, pipe, pipestat_mask);
f8b79e58
ID
3653
3654 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3655 PIPE_FIFO_UNDERRUN_STATUS;
120dda4f
VS
3656
3657 for_each_pipe(dev_priv, pipe)
3658 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
f8b79e58
ID
3659 POSTING_READ(PIPESTAT(PIPE_A));
3660}
3661
3662void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3663{
3664 assert_spin_locked(&dev_priv->irq_lock);
3665
3666 if (dev_priv->display_irqs_enabled)
3667 return;
3668
3669 dev_priv->display_irqs_enabled = true;
3670
950eabaf 3671 if (intel_irqs_enabled(dev_priv))
f8b79e58
ID
3672 valleyview_display_irqs_install(dev_priv);
3673}
3674
3675void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3676{
3677 assert_spin_locked(&dev_priv->irq_lock);
3678
3679 if (!dev_priv->display_irqs_enabled)
3680 return;
3681
3682 dev_priv->display_irqs_enabled = false;
3683
950eabaf 3684 if (intel_irqs_enabled(dev_priv))
f8b79e58
ID
3685 valleyview_display_irqs_uninstall(dev_priv);
3686}
3687
0e6c9a9e 3688static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
7e231dbe 3689{
f8b79e58 3690 dev_priv->irq_mask = ~0;
7e231dbe 3691
0706f17c 3692 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
20afbda2
DV
3693 POSTING_READ(PORT_HOTPLUG_EN);
3694
7e231dbe 3695 I915_WRITE(VLV_IIR, 0xffffffff);
76e41860
VS
3696 I915_WRITE(VLV_IIR, 0xffffffff);
3697 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3698 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3699 POSTING_READ(VLV_IMR);
7e231dbe 3700
b79480ba
DV
3701 /* Interrupt setup is already guaranteed to be single-threaded, this is
3702 * just to make the assert_spin_locked check happy. */
d6207435 3703 spin_lock_irq(&dev_priv->irq_lock);
f8b79e58
ID
3704 if (dev_priv->display_irqs_enabled)
3705 valleyview_display_irqs_install(dev_priv);
d6207435 3706 spin_unlock_irq(&dev_priv->irq_lock);
0e6c9a9e
VS
3707}
3708
3709static int valleyview_irq_postinstall(struct drm_device *dev)
3710{
3711 struct drm_i915_private *dev_priv = dev->dev_private;
3712
3713 vlv_display_irq_postinstall(dev_priv);
7e231dbe 3714
0a9a8c91 3715 gen5_gt_irq_postinstall(dev);
7e231dbe
JB
3716
3717 /* ack & enable invalid PTE error interrupts */
3718#if 0 /* FIXME: add support to irq handler for checking these bits */
3719 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3720 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
3721#endif
3722
3723 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
20afbda2
DV
3724
3725 return 0;
3726}
3727
abd58f01
BW
3728static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3729{
abd58f01
BW
3730 /* These are interrupts we'll toggle with the ring mask register */
3731 uint32_t gt_interrupts[] = {
3732 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
73d477f6 3733 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
abd58f01 3734 GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
73d477f6
OM
3735 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3736 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
abd58f01 3737 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
73d477f6
OM
3738 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3739 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3740 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
abd58f01 3741 0,
73d477f6
OM
3742 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3743 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
abd58f01
BW
3744 };
3745
0961021a 3746 dev_priv->pm_irq_mask = 0xffffffff;
9a2d2d87
D
3747 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3748 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
78e68d36
ID
3749 /*
3750 * RPS interrupts will get enabled/disabled on demand when RPS itself
3751 * is enabled/disabled.
3752 */
3753 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
9a2d2d87 3754 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
abd58f01
BW
3755}
3756
3757static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3758{
770de83d
DL
3759 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3760 uint32_t de_pipe_enables;
3a3b3c7d
VS
3761 u32 de_port_masked = GEN8_AUX_CHANNEL_A;
3762 u32 de_port_enables;
3763 enum pipe pipe;
770de83d 3764
b4834a50 3765 if (INTEL_INFO(dev_priv)->gen >= 9) {
770de83d
DL
3766 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3767 GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3a3b3c7d
VS
3768 de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
3769 GEN9_AUX_CHANNEL_D;
9e63743e 3770 if (IS_BROXTON(dev_priv))
3a3b3c7d
VS
3771 de_port_masked |= BXT_DE_PORT_GMBUS;
3772 } else {
770de83d
DL
3773 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
3774 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3a3b3c7d 3775 }
770de83d
DL
3776
3777 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3778 GEN8_PIPE_FIFO_UNDERRUN;
3779
3a3b3c7d 3780 de_port_enables = de_port_masked;
a52bb15b
VS
3781 if (IS_BROXTON(dev_priv))
3782 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3783 else if (IS_BROADWELL(dev_priv))
3a3b3c7d
VS
3784 de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
3785
13b3a0a7
DV
3786 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3787 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3788 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
abd58f01 3789
055e393f 3790 for_each_pipe(dev_priv, pipe)
f458ebbc 3791 if (intel_display_power_is_enabled(dev_priv,
813bde43
PZ
3792 POWER_DOMAIN_PIPE(pipe)))
3793 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3794 dev_priv->de_irq_mask[pipe],
3795 de_pipe_enables);
abd58f01 3796
3a3b3c7d 3797 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
abd58f01
BW
3798}
3799
3800static int gen8_irq_postinstall(struct drm_device *dev)
3801{
3802 struct drm_i915_private *dev_priv = dev->dev_private;
3803
266ea3d9
SS
3804 if (HAS_PCH_SPLIT(dev))
3805 ibx_irq_pre_postinstall(dev);
622364b6 3806
abd58f01
BW
3807 gen8_gt_irq_postinstall(dev_priv);
3808 gen8_de_irq_postinstall(dev_priv);
3809
266ea3d9
SS
3810 if (HAS_PCH_SPLIT(dev))
3811 ibx_irq_postinstall(dev);
abd58f01
BW
3812
3813 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
3814 POSTING_READ(GEN8_MASTER_IRQ);
3815
3816 return 0;
3817}
3818
43f328d7
VS
3819static int cherryview_irq_postinstall(struct drm_device *dev)
3820{
3821 struct drm_i915_private *dev_priv = dev->dev_private;
43f328d7 3822
c2b66797 3823 vlv_display_irq_postinstall(dev_priv);
43f328d7
VS
3824
3825 gen8_gt_irq_postinstall(dev_priv);
3826
3827 I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
3828 POSTING_READ(GEN8_MASTER_IRQ);
3829
3830 return 0;
3831}
3832
abd58f01
BW
3833static void gen8_irq_uninstall(struct drm_device *dev)
3834{
3835 struct drm_i915_private *dev_priv = dev->dev_private;
abd58f01
BW
3836
3837 if (!dev_priv)
3838 return;
3839
823f6b38 3840 gen8_irq_reset(dev);
abd58f01
BW
3841}
3842
8ea0be4f
VS
3843static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
3844{
3845 /* Interrupt setup is already guaranteed to be single-threaded, this is
3846 * just to make the assert_spin_locked check happy. */
3847 spin_lock_irq(&dev_priv->irq_lock);
3848 if (dev_priv->display_irqs_enabled)
3849 valleyview_display_irqs_uninstall(dev_priv);
3850 spin_unlock_irq(&dev_priv->irq_lock);
3851
3852 vlv_display_irq_reset(dev_priv);
3853
c352d1ba 3854 dev_priv->irq_mask = ~0;
8ea0be4f
VS
3855}
3856
7e231dbe
JB
3857static void valleyview_irq_uninstall(struct drm_device *dev)
3858{
2d1013dd 3859 struct drm_i915_private *dev_priv = dev->dev_private;
7e231dbe
JB
3860
3861 if (!dev_priv)
3862 return;
3863
843d0e7d
ID
3864 I915_WRITE(VLV_MASTER_IER, 0);
3865
893fce8e
VS
3866 gen5_gt_irq_reset(dev);
3867
7e231dbe 3868 I915_WRITE(HWSTAM, 0xffffffff);
f8b79e58 3869
8ea0be4f 3870 vlv_display_irq_uninstall(dev_priv);
7e231dbe
JB
3871}
3872
43f328d7
VS
3873static void cherryview_irq_uninstall(struct drm_device *dev)
3874{
3875 struct drm_i915_private *dev_priv = dev->dev_private;
43f328d7
VS
3876
3877 if (!dev_priv)
3878 return;
3879
3880 I915_WRITE(GEN8_MASTER_IRQ, 0);
3881 POSTING_READ(GEN8_MASTER_IRQ);
3882
a2c30fba 3883 gen8_gt_irq_reset(dev_priv);
43f328d7 3884
a2c30fba 3885 GEN5_IRQ_RESET(GEN8_PCU_);
43f328d7 3886
c2b66797 3887 vlv_display_irq_uninstall(dev_priv);
43f328d7
VS
3888}
3889
f71d4af4 3890static void ironlake_irq_uninstall(struct drm_device *dev)
036a4a7d 3891{
2d1013dd 3892 struct drm_i915_private *dev_priv = dev->dev_private;
4697995b
JB
3893
3894 if (!dev_priv)
3895 return;
3896
be30b29f 3897 ironlake_irq_reset(dev);
036a4a7d
ZW
3898}
3899
a266c7d5 3900static void i8xx_irq_preinstall(struct drm_device * dev)
1da177e4 3901{
2d1013dd 3902 struct drm_i915_private *dev_priv = dev->dev_private;
9db4a9c7 3903 int pipe;
91e3738e 3904
055e393f 3905 for_each_pipe(dev_priv, pipe)
9db4a9c7 3906 I915_WRITE(PIPESTAT(pipe), 0);
a266c7d5
CW
3907 I915_WRITE16(IMR, 0xffff);
3908 I915_WRITE16(IER, 0x0);
3909 POSTING_READ16(IER);
c2798b19
CW
3910}
3911
3912static int i8xx_irq_postinstall(struct drm_device *dev)
3913{
2d1013dd 3914 struct drm_i915_private *dev_priv = dev->dev_private;
c2798b19 3915
c2798b19
CW
3916 I915_WRITE16(EMR,
3917 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3918
3919 /* Unmask the interrupts that we always want on. */
3920 dev_priv->irq_mask =
3921 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3922 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3923 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
37ef01ab 3924 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
c2798b19
CW
3925 I915_WRITE16(IMR, dev_priv->irq_mask);
3926
3927 I915_WRITE16(IER,
3928 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3929 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
c2798b19
CW
3930 I915_USER_INTERRUPT);
3931 POSTING_READ16(IER);
3932
379ef82d
DV
3933 /* Interrupt setup is already guaranteed to be single-threaded, this is
3934 * just to make the assert_spin_locked check happy. */
d6207435 3935 spin_lock_irq(&dev_priv->irq_lock);
755e9019
ID
3936 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3937 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
d6207435 3938 spin_unlock_irq(&dev_priv->irq_lock);
379ef82d 3939
c2798b19
CW
3940 return 0;
3941}
3942
90a72f87
VS
3943/*
3944 * Returns true when a page flip has completed.
3945 */
3946static bool i8xx_handle_vblank(struct drm_device *dev,
1f1c2e24 3947 int plane, int pipe, u32 iir)
90a72f87 3948{
2d1013dd 3949 struct drm_i915_private *dev_priv = dev->dev_private;
1f1c2e24 3950 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
90a72f87 3951
8d7849db 3952 if (!intel_pipe_handle_vblank(dev, pipe))
90a72f87
VS
3953 return false;
3954
3955 if ((iir & flip_pending) == 0)
d6bbafa1 3956 goto check_page_flip;
90a72f87 3957
90a72f87
VS
3958 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3959 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3960 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3961 * the flip is completed (no longer pending). Since this doesn't raise
3962 * an interrupt per se, we watch for the change at vblank.
3963 */
3964 if (I915_READ16(ISR) & flip_pending)
d6bbafa1 3965 goto check_page_flip;
90a72f87 3966
7d47559e 3967 intel_prepare_page_flip(dev, plane);
90a72f87 3968 intel_finish_page_flip(dev, pipe);
90a72f87 3969 return true;
d6bbafa1
CW
3970
3971check_page_flip:
3972 intel_check_page_flip(dev, pipe);
3973 return false;
90a72f87
VS
3974}
3975
ff1f525e 3976static irqreturn_t i8xx_irq_handler(int irq, void *arg)
c2798b19 3977{
45a83f84 3978 struct drm_device *dev = arg;
2d1013dd 3979 struct drm_i915_private *dev_priv = dev->dev_private;
c2798b19
CW
3980 u16 iir, new_iir;
3981 u32 pipe_stats[2];
c2798b19
CW
3982 int pipe;
3983 u16 flip_mask =
3984 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3985 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
1f814dac 3986 irqreturn_t ret;
c2798b19 3987
2dd2a883
ID
3988 if (!intel_irqs_enabled(dev_priv))
3989 return IRQ_NONE;
3990
1f814dac
ID
3991 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
3992 disable_rpm_wakeref_asserts(dev_priv);
3993
3994 ret = IRQ_NONE;
c2798b19
CW
3995 iir = I915_READ16(IIR);
3996 if (iir == 0)
1f814dac 3997 goto out;
c2798b19
CW
3998
3999 while (iir & ~flip_mask) {
4000 /* Can't rely on pipestat interrupt bit in iir as it might
4001 * have been cleared after the pipestat interrupt was received.
4002 * It doesn't set the bit in iir again, but it still produces
4003 * interrupts (for non-MSI).
4004 */
222c7f51 4005 spin_lock(&dev_priv->irq_lock);
c2798b19 4006 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
aaecdf61 4007 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
c2798b19 4008
055e393f 4009 for_each_pipe(dev_priv, pipe) {
f0f59a00 4010 i915_reg_t reg = PIPESTAT(pipe);
c2798b19
CW
4011 pipe_stats[pipe] = I915_READ(reg);
4012
4013 /*
4014 * Clear the PIPE*STAT regs before the IIR
4015 */
2d9d2b0b 4016 if (pipe_stats[pipe] & 0x8000ffff)
c2798b19 4017 I915_WRITE(reg, pipe_stats[pipe]);
c2798b19 4018 }
222c7f51 4019 spin_unlock(&dev_priv->irq_lock);
c2798b19
CW
4020
4021 I915_WRITE16(IIR, iir & ~flip_mask);
4022 new_iir = I915_READ16(IIR); /* Flush posted writes */
4023
c2798b19 4024 if (iir & I915_USER_INTERRUPT)
74cdb337 4025 notify_ring(&dev_priv->ring[RCS]);
c2798b19 4026
055e393f 4027 for_each_pipe(dev_priv, pipe) {
1f1c2e24 4028 int plane = pipe;
3a77c4c4 4029 if (HAS_FBC(dev))
1f1c2e24
VS
4030 plane = !plane;
4031
4356d586 4032 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
1f1c2e24
VS
4033 i8xx_handle_vblank(dev, plane, pipe, iir))
4034 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
c2798b19 4035
4356d586 4036 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
277de95e 4037 i9xx_pipe_crc_irq_handler(dev, pipe);
2d9d2b0b 4038
1f7247c0
DV
4039 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4040 intel_cpu_fifo_underrun_irq_handler(dev_priv,
4041 pipe);
4356d586 4042 }
c2798b19
CW
4043
4044 iir = new_iir;
4045 }
1f814dac
ID
4046 ret = IRQ_HANDLED;
4047
4048out:
4049 enable_rpm_wakeref_asserts(dev_priv);
c2798b19 4050
1f814dac 4051 return ret;
c2798b19
CW
4052}
4053
4054static void i8xx_irq_uninstall(struct drm_device * dev)
4055{
2d1013dd 4056 struct drm_i915_private *dev_priv = dev->dev_private;
c2798b19
CW
4057 int pipe;
4058
055e393f 4059 for_each_pipe(dev_priv, pipe) {
c2798b19
CW
4060 /* Clear enable bits; then clear status bits */
4061 I915_WRITE(PIPESTAT(pipe), 0);
4062 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4063 }
4064 I915_WRITE16(IMR, 0xffff);
4065 I915_WRITE16(IER, 0x0);
4066 I915_WRITE16(IIR, I915_READ16(IIR));
4067}
4068
a266c7d5
CW
4069static void i915_irq_preinstall(struct drm_device * dev)
4070{
2d1013dd 4071 struct drm_i915_private *dev_priv = dev->dev_private;
a266c7d5
CW
4072 int pipe;
4073
a266c7d5 4074 if (I915_HAS_HOTPLUG(dev)) {
0706f17c 4075 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
a266c7d5
CW
4076 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4077 }
4078
00d98ebd 4079 I915_WRITE16(HWSTAM, 0xeffe);
055e393f 4080 for_each_pipe(dev_priv, pipe)
a266c7d5
CW
4081 I915_WRITE(PIPESTAT(pipe), 0);
4082 I915_WRITE(IMR, 0xffffffff);
4083 I915_WRITE(IER, 0x0);
4084 POSTING_READ(IER);
4085}
4086
4087static int i915_irq_postinstall(struct drm_device *dev)
4088{
2d1013dd 4089 struct drm_i915_private *dev_priv = dev->dev_private;
38bde180 4090 u32 enable_mask;
a266c7d5 4091
38bde180
CW
4092 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
4093
4094 /* Unmask the interrupts that we always want on. */
4095 dev_priv->irq_mask =
4096 ~(I915_ASLE_INTERRUPT |
4097 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4098 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4099 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
37ef01ab 4100 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
38bde180
CW
4101
4102 enable_mask =
4103 I915_ASLE_INTERRUPT |
4104 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4105 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
38bde180
CW
4106 I915_USER_INTERRUPT;
4107
a266c7d5 4108 if (I915_HAS_HOTPLUG(dev)) {
0706f17c 4109 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
20afbda2
DV
4110 POSTING_READ(PORT_HOTPLUG_EN);
4111
a266c7d5
CW
4112 /* Enable in IER... */
4113 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
4114 /* and unmask in IMR */
4115 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
4116 }
4117
a266c7d5
CW
4118 I915_WRITE(IMR, dev_priv->irq_mask);
4119 I915_WRITE(IER, enable_mask);
4120 POSTING_READ(IER);
4121
f49e38dd 4122 i915_enable_asle_pipestat(dev);
20afbda2 4123
379ef82d
DV
4124 /* Interrupt setup is already guaranteed to be single-threaded, this is
4125 * just to make the assert_spin_locked check happy. */
d6207435 4126 spin_lock_irq(&dev_priv->irq_lock);
755e9019
ID
4127 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4128 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
d6207435 4129 spin_unlock_irq(&dev_priv->irq_lock);
379ef82d 4130
20afbda2
DV
4131 return 0;
4132}
4133
90a72f87
VS
4134/*
4135 * Returns true when a page flip has completed.
4136 */
4137static bool i915_handle_vblank(struct drm_device *dev,
4138 int plane, int pipe, u32 iir)
4139{
2d1013dd 4140 struct drm_i915_private *dev_priv = dev->dev_private;
90a72f87
VS
4141 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
4142
8d7849db 4143 if (!intel_pipe_handle_vblank(dev, pipe))
90a72f87
VS
4144 return false;
4145
4146 if ((iir & flip_pending) == 0)
d6bbafa1 4147 goto check_page_flip;
90a72f87 4148
90a72f87
VS
4149 /* We detect FlipDone by looking for the change in PendingFlip from '1'
4150 * to '0' on the following vblank, i.e. IIR has the Pendingflip
4151 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
4152 * the flip is completed (no longer pending). Since this doesn't raise
4153 * an interrupt per se, we watch for the change at vblank.
4154 */
4155 if (I915_READ(ISR) & flip_pending)
d6bbafa1 4156 goto check_page_flip;
90a72f87 4157
7d47559e 4158 intel_prepare_page_flip(dev, plane);
90a72f87 4159 intel_finish_page_flip(dev, pipe);
90a72f87 4160 return true;
d6bbafa1
CW
4161
4162check_page_flip:
4163 intel_check_page_flip(dev, pipe);
4164 return false;
90a72f87
VS
4165}
4166
ff1f525e 4167static irqreturn_t i915_irq_handler(int irq, void *arg)
a266c7d5 4168{
45a83f84 4169 struct drm_device *dev = arg;
2d1013dd 4170 struct drm_i915_private *dev_priv = dev->dev_private;
8291ee90 4171 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
38bde180
CW
4172 u32 flip_mask =
4173 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4174 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
38bde180 4175 int pipe, ret = IRQ_NONE;
a266c7d5 4176
2dd2a883
ID
4177 if (!intel_irqs_enabled(dev_priv))
4178 return IRQ_NONE;
4179
1f814dac
ID
4180 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4181 disable_rpm_wakeref_asserts(dev_priv);
4182
a266c7d5 4183 iir = I915_READ(IIR);
38bde180
CW
4184 do {
4185 bool irq_received = (iir & ~flip_mask) != 0;
8291ee90 4186 bool blc_event = false;
a266c7d5
CW
4187
4188 /* Can't rely on pipestat interrupt bit in iir as it might
4189 * have been cleared after the pipestat interrupt was received.
4190 * It doesn't set the bit in iir again, but it still produces
4191 * interrupts (for non-MSI).
4192 */
222c7f51 4193 spin_lock(&dev_priv->irq_lock);
a266c7d5 4194 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
aaecdf61 4195 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
a266c7d5 4196
055e393f 4197 for_each_pipe(dev_priv, pipe) {
f0f59a00 4198 i915_reg_t reg = PIPESTAT(pipe);
a266c7d5
CW
4199 pipe_stats[pipe] = I915_READ(reg);
4200
38bde180 4201 /* Clear the PIPE*STAT regs before the IIR */
a266c7d5 4202 if (pipe_stats[pipe] & 0x8000ffff) {
a266c7d5 4203 I915_WRITE(reg, pipe_stats[pipe]);
38bde180 4204 irq_received = true;
a266c7d5
CW
4205 }
4206 }
222c7f51 4207 spin_unlock(&dev_priv->irq_lock);
a266c7d5
CW
4208
4209 if (!irq_received)
4210 break;
4211
a266c7d5 4212 /* Consume port. Then clear IIR or we'll miss events */
16c6c56b
VS
4213 if (I915_HAS_HOTPLUG(dev) &&
4214 iir & I915_DISPLAY_PORT_INTERRUPT)
4215 i9xx_hpd_irq_handler(dev);
a266c7d5 4216
38bde180 4217 I915_WRITE(IIR, iir & ~flip_mask);
a266c7d5
CW
4218 new_iir = I915_READ(IIR); /* Flush posted writes */
4219
a266c7d5 4220 if (iir & I915_USER_INTERRUPT)
74cdb337 4221 notify_ring(&dev_priv->ring[RCS]);
a266c7d5 4222
055e393f 4223 for_each_pipe(dev_priv, pipe) {
38bde180 4224 int plane = pipe;
3a77c4c4 4225 if (HAS_FBC(dev))
38bde180 4226 plane = !plane;
90a72f87 4227
8291ee90 4228 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
90a72f87
VS
4229 i915_handle_vblank(dev, plane, pipe, iir))
4230 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
a266c7d5
CW
4231
4232 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4233 blc_event = true;
4356d586
DV
4234
4235 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
277de95e 4236 i9xx_pipe_crc_irq_handler(dev, pipe);
2d9d2b0b 4237
1f7247c0
DV
4238 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4239 intel_cpu_fifo_underrun_irq_handler(dev_priv,
4240 pipe);
a266c7d5
CW
4241 }
4242
a266c7d5
CW
4243 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4244 intel_opregion_asle_intr(dev);
4245
4246 /* With MSI, interrupts are only generated when iir
4247 * transitions from zero to nonzero. If another bit got
4248 * set while we were handling the existing iir bits, then
4249 * we would never get another interrupt.
4250 *
4251 * This is fine on non-MSI as well, as if we hit this path
4252 * we avoid exiting the interrupt handler only to generate
4253 * another one.
4254 *
4255 * Note that for MSI this could cause a stray interrupt report
4256 * if an interrupt landed in the time between writing IIR and
4257 * the posting read. This should be rare enough to never
4258 * trigger the 99% of 100,000 interrupts test for disabling
4259 * stray interrupts.
4260 */
38bde180 4261 ret = IRQ_HANDLED;
a266c7d5 4262 iir = new_iir;
38bde180 4263 } while (iir & ~flip_mask);
a266c7d5 4264
1f814dac
ID
4265 enable_rpm_wakeref_asserts(dev_priv);
4266
a266c7d5
CW
4267 return ret;
4268}
4269
4270static void i915_irq_uninstall(struct drm_device * dev)
4271{
2d1013dd 4272 struct drm_i915_private *dev_priv = dev->dev_private;
a266c7d5
CW
4273 int pipe;
4274
a266c7d5 4275 if (I915_HAS_HOTPLUG(dev)) {
0706f17c 4276 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
a266c7d5
CW
4277 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4278 }
4279
00d98ebd 4280 I915_WRITE16(HWSTAM, 0xffff);
055e393f 4281 for_each_pipe(dev_priv, pipe) {
55b39755 4282 /* Clear enable bits; then clear status bits */
a266c7d5 4283 I915_WRITE(PIPESTAT(pipe), 0);
55b39755
CW
4284 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4285 }
a266c7d5
CW
4286 I915_WRITE(IMR, 0xffffffff);
4287 I915_WRITE(IER, 0x0);
4288
a266c7d5
CW
4289 I915_WRITE(IIR, I915_READ(IIR));
4290}
4291
4292static void i965_irq_preinstall(struct drm_device * dev)
4293{
2d1013dd 4294 struct drm_i915_private *dev_priv = dev->dev_private;
a266c7d5
CW
4295 int pipe;
4296
0706f17c 4297 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
adca4730 4298 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
a266c7d5
CW
4299
4300 I915_WRITE(HWSTAM, 0xeffe);
055e393f 4301 for_each_pipe(dev_priv, pipe)
a266c7d5
CW
4302 I915_WRITE(PIPESTAT(pipe), 0);
4303 I915_WRITE(IMR, 0xffffffff);
4304 I915_WRITE(IER, 0x0);
4305 POSTING_READ(IER);
4306}
4307
4308static int i965_irq_postinstall(struct drm_device *dev)
4309{
2d1013dd 4310 struct drm_i915_private *dev_priv = dev->dev_private;
bbba0a97 4311 u32 enable_mask;
a266c7d5
CW
4312 u32 error_mask;
4313
a266c7d5 4314 /* Unmask the interrupts that we always want on. */
bbba0a97 4315 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
adca4730 4316 I915_DISPLAY_PORT_INTERRUPT |
bbba0a97
CW
4317 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4318 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4319 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4320 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4321 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4322
4323 enable_mask = ~dev_priv->irq_mask;
21ad8330
VS
4324 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4325 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
bbba0a97
CW
4326 enable_mask |= I915_USER_INTERRUPT;
4327
4328 if (IS_G4X(dev))
4329 enable_mask |= I915_BSD_USER_INTERRUPT;
a266c7d5 4330
b79480ba
DV
4331 /* Interrupt setup is already guaranteed to be single-threaded, this is
4332 * just to make the assert_spin_locked check happy. */
d6207435 4333 spin_lock_irq(&dev_priv->irq_lock);
755e9019
ID
4334 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4335 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4336 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
d6207435 4337 spin_unlock_irq(&dev_priv->irq_lock);
a266c7d5 4338
a266c7d5
CW
4339 /*
4340 * Enable some error detection, note the instruction error mask
4341 * bit is reserved, so we leave it masked.
4342 */
4343 if (IS_G4X(dev)) {
4344 error_mask = ~(GM45_ERROR_PAGE_TABLE |
4345 GM45_ERROR_MEM_PRIV |
4346 GM45_ERROR_CP_PRIV |
4347 I915_ERROR_MEMORY_REFRESH);
4348 } else {
4349 error_mask = ~(I915_ERROR_PAGE_TABLE |
4350 I915_ERROR_MEMORY_REFRESH);
4351 }
4352 I915_WRITE(EMR, error_mask);
4353
4354 I915_WRITE(IMR, dev_priv->irq_mask);
4355 I915_WRITE(IER, enable_mask);
4356 POSTING_READ(IER);
4357
0706f17c 4358 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
20afbda2
DV
4359 POSTING_READ(PORT_HOTPLUG_EN);
4360
f49e38dd 4361 i915_enable_asle_pipestat(dev);
20afbda2
DV
4362
4363 return 0;
4364}
4365
bac56d5b 4366static void i915_hpd_irq_setup(struct drm_device *dev)
20afbda2 4367{
2d1013dd 4368 struct drm_i915_private *dev_priv = dev->dev_private;
20afbda2
DV
4369 u32 hotplug_en;
4370
b5ea2d56
DV
4371 assert_spin_locked(&dev_priv->irq_lock);
4372
778eb334
VS
4373 /* Note HDMI and DP share hotplug bits */
4374 /* enable bits are the same for all generations */
0706f17c 4375 hotplug_en = intel_hpd_enabled_irqs(dev, hpd_mask_i915);
778eb334
VS
4376 /* Programming the CRT detection parameters tends
4377 to generate a spurious hotplug event about three
4378 seconds later. So just do it once.
4379 */
4380 if (IS_G4X(dev))
4381 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
778eb334
VS
4382 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4383
4384 /* Ignore TV since it's buggy */
0706f17c 4385 i915_hotplug_interrupt_update_locked(dev_priv,
f9e3dc78
JN
4386 HOTPLUG_INT_EN_MASK |
4387 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
4388 CRT_HOTPLUG_ACTIVATION_PERIOD_64,
4389 hotplug_en);
a266c7d5
CW
4390}
4391
ff1f525e 4392static irqreturn_t i965_irq_handler(int irq, void *arg)
a266c7d5 4393{
45a83f84 4394 struct drm_device *dev = arg;
2d1013dd 4395 struct drm_i915_private *dev_priv = dev->dev_private;
a266c7d5
CW
4396 u32 iir, new_iir;
4397 u32 pipe_stats[I915_MAX_PIPES];
a266c7d5 4398 int ret = IRQ_NONE, pipe;
21ad8330
VS
4399 u32 flip_mask =
4400 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4401 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
a266c7d5 4402
2dd2a883
ID
4403 if (!intel_irqs_enabled(dev_priv))
4404 return IRQ_NONE;
4405
1f814dac
ID
4406 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4407 disable_rpm_wakeref_asserts(dev_priv);
4408
a266c7d5
CW
4409 iir = I915_READ(IIR);
4410
a266c7d5 4411 for (;;) {
501e01d7 4412 bool irq_received = (iir & ~flip_mask) != 0;
2c8ba29f
CW
4413 bool blc_event = false;
4414
a266c7d5
CW
4415 /* Can't rely on pipestat interrupt bit in iir as it might
4416 * have been cleared after the pipestat interrupt was received.
4417 * It doesn't set the bit in iir again, but it still produces
4418 * interrupts (for non-MSI).
4419 */
222c7f51 4420 spin_lock(&dev_priv->irq_lock);
a266c7d5 4421 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
aaecdf61 4422 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
a266c7d5 4423
055e393f 4424 for_each_pipe(dev_priv, pipe) {
f0f59a00 4425 i915_reg_t reg = PIPESTAT(pipe);
a266c7d5
CW
4426 pipe_stats[pipe] = I915_READ(reg);
4427
4428 /*
4429 * Clear the PIPE*STAT regs before the IIR
4430 */
4431 if (pipe_stats[pipe] & 0x8000ffff) {
a266c7d5 4432 I915_WRITE(reg, pipe_stats[pipe]);
501e01d7 4433 irq_received = true;
a266c7d5
CW
4434 }
4435 }
222c7f51 4436 spin_unlock(&dev_priv->irq_lock);
a266c7d5
CW
4437
4438 if (!irq_received)
4439 break;
4440
4441 ret = IRQ_HANDLED;
4442
4443 /* Consume port. Then clear IIR or we'll miss events */
16c6c56b
VS
4444 if (iir & I915_DISPLAY_PORT_INTERRUPT)
4445 i9xx_hpd_irq_handler(dev);
a266c7d5 4446
21ad8330 4447 I915_WRITE(IIR, iir & ~flip_mask);
a266c7d5
CW
4448 new_iir = I915_READ(IIR); /* Flush posted writes */
4449
a266c7d5 4450 if (iir & I915_USER_INTERRUPT)
74cdb337 4451 notify_ring(&dev_priv->ring[RCS]);
a266c7d5 4452 if (iir & I915_BSD_USER_INTERRUPT)
74cdb337 4453 notify_ring(&dev_priv->ring[VCS]);
a266c7d5 4454
055e393f 4455 for_each_pipe(dev_priv, pipe) {
2c8ba29f 4456 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
90a72f87
VS
4457 i915_handle_vblank(dev, pipe, pipe, iir))
4458 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
a266c7d5
CW
4459
4460 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4461 blc_event = true;
4356d586
DV
4462
4463 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
277de95e 4464 i9xx_pipe_crc_irq_handler(dev, pipe);
a266c7d5 4465
1f7247c0
DV
4466 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4467 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2d9d2b0b 4468 }
a266c7d5
CW
4469
4470 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4471 intel_opregion_asle_intr(dev);
4472
515ac2bb
DV
4473 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4474 gmbus_irq_handler(dev);
4475
a266c7d5
CW
4476 /* With MSI, interrupts are only generated when iir
4477 * transitions from zero to nonzero. If another bit got
4478 * set while we were handling the existing iir bits, then
4479 * we would never get another interrupt.
4480 *
4481 * This is fine on non-MSI as well, as if we hit this path
4482 * we avoid exiting the interrupt handler only to generate
4483 * another one.
4484 *
4485 * Note that for MSI this could cause a stray interrupt report
4486 * if an interrupt landed in the time between writing IIR and
4487 * the posting read. This should be rare enough to never
4488 * trigger the 99% of 100,000 interrupts test for disabling
4489 * stray interrupts.
4490 */
4491 iir = new_iir;
4492 }
4493
1f814dac
ID
4494 enable_rpm_wakeref_asserts(dev_priv);
4495
a266c7d5
CW
4496 return ret;
4497}
4498
4499static void i965_irq_uninstall(struct drm_device * dev)
4500{
2d1013dd 4501 struct drm_i915_private *dev_priv = dev->dev_private;
a266c7d5
CW
4502 int pipe;
4503
4504 if (!dev_priv)
4505 return;
4506
0706f17c 4507 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
adca4730 4508 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
a266c7d5
CW
4509
4510 I915_WRITE(HWSTAM, 0xffffffff);
055e393f 4511 for_each_pipe(dev_priv, pipe)
a266c7d5
CW
4512 I915_WRITE(PIPESTAT(pipe), 0);
4513 I915_WRITE(IMR, 0xffffffff);
4514 I915_WRITE(IER, 0x0);
4515
055e393f 4516 for_each_pipe(dev_priv, pipe)
a266c7d5
CW
4517 I915_WRITE(PIPESTAT(pipe),
4518 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4519 I915_WRITE(IIR, I915_READ(IIR));
4520}
4521
fca52a55
DV
4522/**
4523 * intel_irq_init - initializes irq support
4524 * @dev_priv: i915 device instance
4525 *
4526 * This function initializes all the irq support including work items, timers
4527 * and all the vtables. It does not setup the interrupt itself though.
4528 */
b963291c 4529void intel_irq_init(struct drm_i915_private *dev_priv)
f71d4af4 4530{
b963291c 4531 struct drm_device *dev = dev_priv->dev;
8b2e326d 4532
77913b39
JN
4533 intel_hpd_init_work(dev_priv);
4534
c6a828d3 4535 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
a4da4fa4 4536 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
8b2e326d 4537
a6706b45 4538 /* Let's track the enabled rps events */
666a4537 4539 if (IS_VALLEYVIEW(dev_priv))
6c65a587 4540 /* WaGsvRC0ResidencyMethod:vlv */
6f4b12f8 4541 dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
31685c25
D
4542 else
4543 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
a6706b45 4544
737b1506
CW
4545 INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
4546 i915_hangcheck_elapsed);
61bac78e 4547
97a19a24 4548 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
9ee32fea 4549
b963291c 4550 if (IS_GEN2(dev_priv)) {
4cdb83ec
VS
4551 dev->max_vblank_count = 0;
4552 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
b963291c 4553 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
f71d4af4 4554 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
fd8f507c 4555 dev->driver->get_vblank_counter = g4x_get_vblank_counter;
391f75e2
VS
4556 } else {
4557 dev->driver->get_vblank_counter = i915_get_vblank_counter;
4558 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
f71d4af4
JB
4559 }
4560
21da2700
VS
4561 /*
4562 * Opt out of the vblank disable timer on everything except gen2.
4563 * Gen2 doesn't have a hardware frame counter and so depends on
4564 * vblank interrupts to produce sane vblank seuquence numbers.
4565 */
b963291c 4566 if (!IS_GEN2(dev_priv))
21da2700
VS
4567 dev->vblank_disable_immediate = true;
4568
f3a5c3f6
DV
4569 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4570 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
f71d4af4 4571
b963291c 4572 if (IS_CHERRYVIEW(dev_priv)) {
43f328d7
VS
4573 dev->driver->irq_handler = cherryview_irq_handler;
4574 dev->driver->irq_preinstall = cherryview_irq_preinstall;
4575 dev->driver->irq_postinstall = cherryview_irq_postinstall;
4576 dev->driver->irq_uninstall = cherryview_irq_uninstall;
4577 dev->driver->enable_vblank = valleyview_enable_vblank;
4578 dev->driver->disable_vblank = valleyview_disable_vblank;
4579 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
b963291c 4580 } else if (IS_VALLEYVIEW(dev_priv)) {
7e231dbe
JB
4581 dev->driver->irq_handler = valleyview_irq_handler;
4582 dev->driver->irq_preinstall = valleyview_irq_preinstall;
4583 dev->driver->irq_postinstall = valleyview_irq_postinstall;
4584 dev->driver->irq_uninstall = valleyview_irq_uninstall;
4585 dev->driver->enable_vblank = valleyview_enable_vblank;
4586 dev->driver->disable_vblank = valleyview_disable_vblank;
fa00abe0 4587 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
b963291c 4588 } else if (INTEL_INFO(dev_priv)->gen >= 8) {
abd58f01 4589 dev->driver->irq_handler = gen8_irq_handler;
723761b8 4590 dev->driver->irq_preinstall = gen8_irq_reset;
abd58f01
BW
4591 dev->driver->irq_postinstall = gen8_irq_postinstall;
4592 dev->driver->irq_uninstall = gen8_irq_uninstall;
4593 dev->driver->enable_vblank = gen8_enable_vblank;
4594 dev->driver->disable_vblank = gen8_disable_vblank;
6dbf30ce 4595 if (IS_BROXTON(dev))
e0a20ad7 4596 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
6dbf30ce
VS
4597 else if (HAS_PCH_SPT(dev))
4598 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
4599 else
3a3b3c7d 4600 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
f71d4af4
JB
4601 } else if (HAS_PCH_SPLIT(dev)) {
4602 dev->driver->irq_handler = ironlake_irq_handler;
723761b8 4603 dev->driver->irq_preinstall = ironlake_irq_reset;
f71d4af4
JB
4604 dev->driver->irq_postinstall = ironlake_irq_postinstall;
4605 dev->driver->irq_uninstall = ironlake_irq_uninstall;
4606 dev->driver->enable_vblank = ironlake_enable_vblank;
4607 dev->driver->disable_vblank = ironlake_disable_vblank;
23bb4cb5 4608 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
f71d4af4 4609 } else {
b963291c 4610 if (INTEL_INFO(dev_priv)->gen == 2) {
c2798b19
CW
4611 dev->driver->irq_preinstall = i8xx_irq_preinstall;
4612 dev->driver->irq_postinstall = i8xx_irq_postinstall;
4613 dev->driver->irq_handler = i8xx_irq_handler;
4614 dev->driver->irq_uninstall = i8xx_irq_uninstall;
b963291c 4615 } else if (INTEL_INFO(dev_priv)->gen == 3) {
a266c7d5
CW
4616 dev->driver->irq_preinstall = i915_irq_preinstall;
4617 dev->driver->irq_postinstall = i915_irq_postinstall;
4618 dev->driver->irq_uninstall = i915_irq_uninstall;
4619 dev->driver->irq_handler = i915_irq_handler;
c2798b19 4620 } else {
a266c7d5
CW
4621 dev->driver->irq_preinstall = i965_irq_preinstall;
4622 dev->driver->irq_postinstall = i965_irq_postinstall;
4623 dev->driver->irq_uninstall = i965_irq_uninstall;
4624 dev->driver->irq_handler = i965_irq_handler;
c2798b19 4625 }
778eb334
VS
4626 if (I915_HAS_HOTPLUG(dev_priv))
4627 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
f71d4af4
JB
4628 dev->driver->enable_vblank = i915_enable_vblank;
4629 dev->driver->disable_vblank = i915_disable_vblank;
4630 }
4631}
20afbda2 4632
fca52a55
DV
4633/**
4634 * intel_irq_install - enables the hardware interrupt
4635 * @dev_priv: i915 device instance
4636 *
4637 * This function enables the hardware interrupt handling, but leaves the hotplug
4638 * handling still disabled. It is called after intel_irq_init().
4639 *
4640 * In the driver load and resume code we need working interrupts in a few places
4641 * but don't want to deal with the hassle of concurrent probe and hotplug
4642 * workers. Hence the split into this two-stage approach.
4643 */
2aeb7d3a
DV
4644int intel_irq_install(struct drm_i915_private *dev_priv)
4645{
4646 /*
4647 * We enable some interrupt sources in our postinstall hooks, so mark
4648 * interrupts as enabled _before_ actually enabling them to avoid
4649 * special cases in our ordering checks.
4650 */
4651 dev_priv->pm.irqs_enabled = true;
4652
4653 return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
4654}
4655
fca52a55
DV
4656/**
4657 * intel_irq_uninstall - finilizes all irq handling
4658 * @dev_priv: i915 device instance
4659 *
4660 * This stops interrupt and hotplug handling and unregisters and frees all
4661 * resources acquired in the init functions.
4662 */
2aeb7d3a
DV
4663void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4664{
4665 drm_irq_uninstall(dev_priv->dev);
4666 intel_hpd_cancel_work(dev_priv);
4667 dev_priv->pm.irqs_enabled = false;
4668}
4669
fca52a55
DV
4670/**
4671 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4672 * @dev_priv: i915 device instance
4673 *
4674 * This function is used to disable interrupts at runtime, both in the runtime
4675 * pm and the system suspend/resume code.
4676 */
b963291c 4677void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
c67a470b 4678{
b963291c 4679 dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
2aeb7d3a 4680 dev_priv->pm.irqs_enabled = false;
2dd2a883 4681 synchronize_irq(dev_priv->dev->irq);
c67a470b
PZ
4682}
4683
fca52a55
DV
4684/**
4685 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4686 * @dev_priv: i915 device instance
4687 *
4688 * This function is used to enable interrupts at runtime, both in the runtime
4689 * pm and the system suspend/resume code.
4690 */
b963291c 4691void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
c67a470b 4692{
2aeb7d3a 4693 dev_priv->pm.irqs_enabled = true;
b963291c
DV
4694 dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
4695 dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
c67a470b 4696}
This page took 1.465045 seconds and 5 git commands to generate.