drm: Use DRM_ROTATE_MASK and DRM_REFLECT_MASK
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_irq.c
CommitLineData
0d6aa60b 1/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
1da177e4 2 */
0d6aa60b 3/*
1da177e4
LT
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
bc54fd1a
DA
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
0d6aa60b 27 */
1da177e4 28
a70491cc
JP
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
63eeaf38 31#include <linux/sysrq.h>
5a0e3ad6 32#include <linux/slab.h>
b2c88f5b 33#include <linux/circ_buf.h>
760285e7
DH
34#include <drm/drmP.h>
35#include <drm/i915_drm.h>
1da177e4 36#include "i915_drv.h"
1c5d22f7 37#include "i915_trace.h"
79e53945 38#include "intel_drv.h"
1da177e4 39
fca52a55
DV
40/**
41 * DOC: interrupt handling
42 *
43 * These functions provide the basic support for enabling and disabling the
44 * interrupt handling support. There's a lot more functionality in i915_irq.c
45 * and related files, but that will be described in separate chapters.
46 */
47
7c7e10db 48static const u32 hpd_ibx[HPD_NUM_PINS] = {
e5868a31
EE
49 [HPD_CRT] = SDE_CRT_HOTPLUG,
50 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
51 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
52 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
53 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
54};
55
7c7e10db 56static const u32 hpd_cpt[HPD_NUM_PINS] = {
e5868a31 57 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
73c352a2 58 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
e5868a31
EE
59 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
60 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
61 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
62};
63
26951caf
XZ
64static const u32 hpd_spt[HPD_NUM_PINS] = {
65 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
66 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
67 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
68 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
69};
70
7c7e10db 71static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
e5868a31
EE
72 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
73 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
74 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
75 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
76 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
77 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
78};
79
7c7e10db 80static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
e5868a31
EE
81 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
82 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
83 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
84 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
85 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
86 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
87};
88
4bca26d0 89static const u32 hpd_status_i915[HPD_NUM_PINS] = {
e5868a31
EE
90 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
91 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
92 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
93 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
94 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
95 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
96};
97
e0a20ad7
SS
98/* BXT hpd list */
99static const u32 hpd_bxt[HPD_NUM_PINS] = {
100 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
101 [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
102};
103
5c502442 104/* IIR can theoretically queue up two events. Be paranoid. */
f86f3fb0 105#define GEN8_IRQ_RESET_NDX(type, which) do { \
5c502442
PZ
106 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
107 POSTING_READ(GEN8_##type##_IMR(which)); \
108 I915_WRITE(GEN8_##type##_IER(which), 0); \
109 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
110 POSTING_READ(GEN8_##type##_IIR(which)); \
111 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
112 POSTING_READ(GEN8_##type##_IIR(which)); \
113} while (0)
114
f86f3fb0 115#define GEN5_IRQ_RESET(type) do { \
a9d356a6 116 I915_WRITE(type##IMR, 0xffffffff); \
5c502442 117 POSTING_READ(type##IMR); \
a9d356a6 118 I915_WRITE(type##IER, 0); \
5c502442
PZ
119 I915_WRITE(type##IIR, 0xffffffff); \
120 POSTING_READ(type##IIR); \
121 I915_WRITE(type##IIR, 0xffffffff); \
122 POSTING_READ(type##IIR); \
a9d356a6
PZ
123} while (0)
124
337ba017
PZ
125/*
126 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
127 */
128#define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \
129 u32 val = I915_READ(reg); \
130 if (val) { \
131 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \
132 (reg), val); \
133 I915_WRITE((reg), 0xffffffff); \
134 POSTING_READ(reg); \
135 I915_WRITE((reg), 0xffffffff); \
136 POSTING_READ(reg); \
137 } \
138} while (0)
139
35079899 140#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
337ba017 141 GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \
35079899 142 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
7d1bd539
VS
143 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
144 POSTING_READ(GEN8_##type##_IMR(which)); \
35079899
PZ
145} while (0)
146
147#define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
337ba017 148 GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \
35079899 149 I915_WRITE(type##IER, (ier_val)); \
7d1bd539
VS
150 I915_WRITE(type##IMR, (imr_val)); \
151 POSTING_READ(type##IMR); \
35079899
PZ
152} while (0)
153
c9a9a268
ID
154static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
155
036a4a7d 156/* For display hotplug interrupt */
47339cd9 157void
2d1013dd 158ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
036a4a7d 159{
4bc9d430
DV
160 assert_spin_locked(&dev_priv->irq_lock);
161
9df7575f 162 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
c67a470b 163 return;
c67a470b 164
1ec14ad3
CW
165 if ((dev_priv->irq_mask & mask) != 0) {
166 dev_priv->irq_mask &= ~mask;
167 I915_WRITE(DEIMR, dev_priv->irq_mask);
3143a2bf 168 POSTING_READ(DEIMR);
036a4a7d
ZW
169 }
170}
171
47339cd9 172void
2d1013dd 173ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
036a4a7d 174{
4bc9d430
DV
175 assert_spin_locked(&dev_priv->irq_lock);
176
06ffc778 177 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
c67a470b 178 return;
c67a470b 179
1ec14ad3
CW
180 if ((dev_priv->irq_mask & mask) != mask) {
181 dev_priv->irq_mask |= mask;
182 I915_WRITE(DEIMR, dev_priv->irq_mask);
3143a2bf 183 POSTING_READ(DEIMR);
036a4a7d
ZW
184 }
185}
186
43eaea13
PZ
187/**
188 * ilk_update_gt_irq - update GTIMR
189 * @dev_priv: driver private
190 * @interrupt_mask: mask of interrupt bits to update
191 * @enabled_irq_mask: mask of interrupt bits to enable
192 */
193static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
194 uint32_t interrupt_mask,
195 uint32_t enabled_irq_mask)
196{
197 assert_spin_locked(&dev_priv->irq_lock);
198
15a17aae
DV
199 WARN_ON(enabled_irq_mask & ~interrupt_mask);
200
9df7575f 201 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
c67a470b 202 return;
c67a470b 203
43eaea13
PZ
204 dev_priv->gt_irq_mask &= ~interrupt_mask;
205 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
206 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
207 POSTING_READ(GTIMR);
208}
209
480c8033 210void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
43eaea13
PZ
211{
212 ilk_update_gt_irq(dev_priv, mask, mask);
213}
214
480c8033 215void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
43eaea13
PZ
216{
217 ilk_update_gt_irq(dev_priv, mask, 0);
218}
219
b900b949
ID
220static u32 gen6_pm_iir(struct drm_i915_private *dev_priv)
221{
222 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
223}
224
a72fbc3a
ID
225static u32 gen6_pm_imr(struct drm_i915_private *dev_priv)
226{
227 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
228}
229
b900b949
ID
230static u32 gen6_pm_ier(struct drm_i915_private *dev_priv)
231{
232 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
233}
234
edbfdb45
PZ
235/**
236 * snb_update_pm_irq - update GEN6_PMIMR
237 * @dev_priv: driver private
238 * @interrupt_mask: mask of interrupt bits to update
239 * @enabled_irq_mask: mask of interrupt bits to enable
240 */
241static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
242 uint32_t interrupt_mask,
243 uint32_t enabled_irq_mask)
244{
605cd25b 245 uint32_t new_val;
edbfdb45 246
15a17aae
DV
247 WARN_ON(enabled_irq_mask & ~interrupt_mask);
248
edbfdb45
PZ
249 assert_spin_locked(&dev_priv->irq_lock);
250
605cd25b 251 new_val = dev_priv->pm_irq_mask;
f52ecbcf
PZ
252 new_val &= ~interrupt_mask;
253 new_val |= (~enabled_irq_mask & interrupt_mask);
254
605cd25b
PZ
255 if (new_val != dev_priv->pm_irq_mask) {
256 dev_priv->pm_irq_mask = new_val;
a72fbc3a
ID
257 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
258 POSTING_READ(gen6_pm_imr(dev_priv));
f52ecbcf 259 }
edbfdb45
PZ
260}
261
480c8033 262void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
edbfdb45 263{
9939fba2
ID
264 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
265 return;
266
edbfdb45
PZ
267 snb_update_pm_irq(dev_priv, mask, mask);
268}
269
9939fba2
ID
270static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
271 uint32_t mask)
edbfdb45
PZ
272{
273 snb_update_pm_irq(dev_priv, mask, 0);
274}
275
9939fba2
ID
276void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
277{
278 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
279 return;
280
281 __gen6_disable_pm_irq(dev_priv, mask);
282}
283
3cc134e3
ID
284void gen6_reset_rps_interrupts(struct drm_device *dev)
285{
286 struct drm_i915_private *dev_priv = dev->dev_private;
287 uint32_t reg = gen6_pm_iir(dev_priv);
288
289 spin_lock_irq(&dev_priv->irq_lock);
290 I915_WRITE(reg, dev_priv->pm_rps_events);
291 I915_WRITE(reg, dev_priv->pm_rps_events);
292 POSTING_READ(reg);
096fad9e 293 dev_priv->rps.pm_iir = 0;
3cc134e3
ID
294 spin_unlock_irq(&dev_priv->irq_lock);
295}
296
b900b949
ID
297void gen6_enable_rps_interrupts(struct drm_device *dev)
298{
299 struct drm_i915_private *dev_priv = dev->dev_private;
300
301 spin_lock_irq(&dev_priv->irq_lock);
78e68d36 302
b900b949 303 WARN_ON(dev_priv->rps.pm_iir);
3cc134e3 304 WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
d4d70aa5 305 dev_priv->rps.interrupts_enabled = true;
78e68d36
ID
306 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
307 dev_priv->pm_rps_events);
b900b949 308 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
78e68d36 309
b900b949
ID
310 spin_unlock_irq(&dev_priv->irq_lock);
311}
312
59d02a1f
ID
313u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
314{
315 /*
f24eeb19 316 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
59d02a1f 317 * if GEN6_PM_UP_EI_EXPIRED is masked.
f24eeb19
ID
318 *
319 * TODO: verify if this can be reproduced on VLV,CHV.
59d02a1f
ID
320 */
321 if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
322 mask &= ~GEN6_PM_RP_UP_EI_EXPIRED;
323
324 if (INTEL_INFO(dev_priv)->gen >= 8)
325 mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
326
327 return mask;
328}
329
b900b949
ID
330void gen6_disable_rps_interrupts(struct drm_device *dev)
331{
332 struct drm_i915_private *dev_priv = dev->dev_private;
333
d4d70aa5
ID
334 spin_lock_irq(&dev_priv->irq_lock);
335 dev_priv->rps.interrupts_enabled = false;
336 spin_unlock_irq(&dev_priv->irq_lock);
337
338 cancel_work_sync(&dev_priv->rps.work);
339
9939fba2
ID
340 spin_lock_irq(&dev_priv->irq_lock);
341
59d02a1f 342 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
9939fba2
ID
343
344 __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
b900b949
ID
345 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
346 ~dev_priv->pm_rps_events);
58072ccb
ID
347
348 spin_unlock_irq(&dev_priv->irq_lock);
349
350 synchronize_irq(dev->irq);
b900b949
ID
351}
352
fee884ed
DV
353/**
354 * ibx_display_interrupt_update - update SDEIMR
355 * @dev_priv: driver private
356 * @interrupt_mask: mask of interrupt bits to update
357 * @enabled_irq_mask: mask of interrupt bits to enable
358 */
47339cd9
DV
359void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
360 uint32_t interrupt_mask,
361 uint32_t enabled_irq_mask)
fee884ed
DV
362{
363 uint32_t sdeimr = I915_READ(SDEIMR);
364 sdeimr &= ~interrupt_mask;
365 sdeimr |= (~enabled_irq_mask & interrupt_mask);
366
15a17aae
DV
367 WARN_ON(enabled_irq_mask & ~interrupt_mask);
368
fee884ed
DV
369 assert_spin_locked(&dev_priv->irq_lock);
370
9df7575f 371 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
c67a470b 372 return;
c67a470b 373
fee884ed
DV
374 I915_WRITE(SDEIMR, sdeimr);
375 POSTING_READ(SDEIMR);
376}
8664281b 377
b5ea642a 378static void
755e9019
ID
379__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
380 u32 enable_mask, u32 status_mask)
7c463586 381{
46c06a30 382 u32 reg = PIPESTAT(pipe);
755e9019 383 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
7c463586 384
b79480ba 385 assert_spin_locked(&dev_priv->irq_lock);
d518ce50 386 WARN_ON(!intel_irqs_enabled(dev_priv));
b79480ba 387
04feced9
VS
388 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
389 status_mask & ~PIPESTAT_INT_STATUS_MASK,
390 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
391 pipe_name(pipe), enable_mask, status_mask))
755e9019
ID
392 return;
393
394 if ((pipestat & enable_mask) == enable_mask)
46c06a30
VS
395 return;
396
91d181dd
ID
397 dev_priv->pipestat_irq_mask[pipe] |= status_mask;
398
46c06a30 399 /* Enable the interrupt, clear any pending status */
755e9019 400 pipestat |= enable_mask | status_mask;
46c06a30
VS
401 I915_WRITE(reg, pipestat);
402 POSTING_READ(reg);
7c463586
KP
403}
404
b5ea642a 405static void
755e9019
ID
406__i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
407 u32 enable_mask, u32 status_mask)
7c463586 408{
46c06a30 409 u32 reg = PIPESTAT(pipe);
755e9019 410 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
7c463586 411
b79480ba 412 assert_spin_locked(&dev_priv->irq_lock);
d518ce50 413 WARN_ON(!intel_irqs_enabled(dev_priv));
b79480ba 414
04feced9
VS
415 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
416 status_mask & ~PIPESTAT_INT_STATUS_MASK,
417 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
418 pipe_name(pipe), enable_mask, status_mask))
46c06a30
VS
419 return;
420
755e9019
ID
421 if ((pipestat & enable_mask) == 0)
422 return;
423
91d181dd
ID
424 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
425
755e9019 426 pipestat &= ~enable_mask;
46c06a30
VS
427 I915_WRITE(reg, pipestat);
428 POSTING_READ(reg);
7c463586
KP
429}
430
10c59c51
ID
431static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
432{
433 u32 enable_mask = status_mask << 16;
434
435 /*
724a6905
VS
436 * On pipe A we don't support the PSR interrupt yet,
437 * on pipe B and C the same bit MBZ.
10c59c51
ID
438 */
439 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
440 return 0;
724a6905
VS
441 /*
442 * On pipe B and C we don't support the PSR interrupt yet, on pipe
443 * A the same bit is for perf counters which we don't use either.
444 */
445 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
446 return 0;
10c59c51
ID
447
448 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
449 SPRITE0_FLIP_DONE_INT_EN_VLV |
450 SPRITE1_FLIP_DONE_INT_EN_VLV);
451 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
452 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
453 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
454 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
455
456 return enable_mask;
457}
458
755e9019
ID
459void
460i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
461 u32 status_mask)
462{
463 u32 enable_mask;
464
10c59c51
ID
465 if (IS_VALLEYVIEW(dev_priv->dev))
466 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
467 status_mask);
468 else
469 enable_mask = status_mask << 16;
755e9019
ID
470 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
471}
472
473void
474i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
475 u32 status_mask)
476{
477 u32 enable_mask;
478
10c59c51
ID
479 if (IS_VALLEYVIEW(dev_priv->dev))
480 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
481 status_mask);
482 else
483 enable_mask = status_mask << 16;
755e9019
ID
484 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
485}
486
01c66889 487/**
f49e38dd 488 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
01c66889 489 */
f49e38dd 490static void i915_enable_asle_pipestat(struct drm_device *dev)
01c66889 491{
2d1013dd 492 struct drm_i915_private *dev_priv = dev->dev_private;
1ec14ad3 493
f49e38dd
JN
494 if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
495 return;
496
13321786 497 spin_lock_irq(&dev_priv->irq_lock);
01c66889 498
755e9019 499 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
f898780b 500 if (INTEL_INFO(dev)->gen >= 4)
3b6c42e8 501 i915_enable_pipestat(dev_priv, PIPE_A,
755e9019 502 PIPE_LEGACY_BLC_EVENT_STATUS);
1ec14ad3 503
13321786 504 spin_unlock_irq(&dev_priv->irq_lock);
01c66889
ZY
505}
506
f75f3746
VS
507/*
508 * This timing diagram depicts the video signal in and
509 * around the vertical blanking period.
510 *
511 * Assumptions about the fictitious mode used in this example:
512 * vblank_start >= 3
513 * vsync_start = vblank_start + 1
514 * vsync_end = vblank_start + 2
515 * vtotal = vblank_start + 3
516 *
517 * start of vblank:
518 * latch double buffered registers
519 * increment frame counter (ctg+)
520 * generate start of vblank interrupt (gen4+)
521 * |
522 * | frame start:
523 * | generate frame start interrupt (aka. vblank interrupt) (gmch)
524 * | may be shifted forward 1-3 extra lines via PIPECONF
525 * | |
526 * | | start of vsync:
527 * | | generate vsync interrupt
528 * | | |
529 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
530 * . \hs/ . \hs/ \hs/ \hs/ . \hs/
531 * ----va---> <-----------------vb--------------------> <--------va-------------
532 * | | <----vs-----> |
533 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
534 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
535 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
536 * | | |
537 * last visible pixel first visible pixel
538 * | increment frame counter (gen3/4)
539 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
540 *
541 * x = horizontal active
542 * _ = horizontal blanking
543 * hs = horizontal sync
544 * va = vertical active
545 * vb = vertical blanking
546 * vs = vertical sync
547 * vbs = vblank_start (number)
548 *
549 * Summary:
550 * - most events happen at the start of horizontal sync
551 * - frame start happens at the start of horizontal blank, 1-4 lines
552 * (depending on PIPECONF settings) after the start of vblank
553 * - gen3/4 pixel and frame counter are synchronized with the start
554 * of horizontal active on the first line of vertical active
555 */
556
4cdb83ec
VS
557static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
558{
559 /* Gen2 doesn't have a hardware frame counter */
560 return 0;
561}
562
42f52ef8
KP
563/* Called from drm generic code, passed a 'crtc', which
564 * we use as a pipe index
565 */
f71d4af4 566static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
0a3e67a4 567{
2d1013dd 568 struct drm_i915_private *dev_priv = dev->dev_private;
0a3e67a4
JB
569 unsigned long high_frame;
570 unsigned long low_frame;
0b2a8e09 571 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
f3a5c3f6
DV
572 struct intel_crtc *intel_crtc =
573 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
fc467a22 574 const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
0a3e67a4 575
f3a5c3f6
DV
576 htotal = mode->crtc_htotal;
577 hsync_start = mode->crtc_hsync_start;
578 vbl_start = mode->crtc_vblank_start;
579 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
580 vbl_start = DIV_ROUND_UP(vbl_start, 2);
391f75e2 581
0b2a8e09
VS
582 /* Convert to pixel count */
583 vbl_start *= htotal;
584
585 /* Start of vblank event occurs at start of hsync */
586 vbl_start -= htotal - hsync_start;
587
9db4a9c7
JB
588 high_frame = PIPEFRAME(pipe);
589 low_frame = PIPEFRAMEPIXEL(pipe);
5eddb70b 590
0a3e67a4
JB
591 /*
592 * High & low register fields aren't synchronized, so make sure
593 * we get a low value that's stable across two reads of the high
594 * register.
595 */
596 do {
5eddb70b 597 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
391f75e2 598 low = I915_READ(low_frame);
5eddb70b 599 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
0a3e67a4
JB
600 } while (high1 != high2);
601
5eddb70b 602 high1 >>= PIPE_FRAME_HIGH_SHIFT;
391f75e2 603 pixel = low & PIPE_PIXEL_MASK;
5eddb70b 604 low >>= PIPE_FRAME_LOW_SHIFT;
391f75e2
VS
605
606 /*
607 * The frame counter increments at beginning of active.
608 * Cook up a vblank counter by also checking the pixel
609 * counter against vblank start.
610 */
edc08d0a 611 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
0a3e67a4
JB
612}
613
f71d4af4 614static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
9880b7a5 615{
2d1013dd 616 struct drm_i915_private *dev_priv = dev->dev_private;
9db4a9c7 617 int reg = PIPE_FRMCOUNT_GM45(pipe);
9880b7a5 618
9880b7a5
JB
619 return I915_READ(reg);
620}
621
ad3543ed
MK
622/* raw reads, only for fast reads of display block, no need for forcewake etc. */
623#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
ad3543ed 624
a225f079
VS
625static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
626{
627 struct drm_device *dev = crtc->base.dev;
628 struct drm_i915_private *dev_priv = dev->dev_private;
fc467a22 629 const struct drm_display_mode *mode = &crtc->base.hwmode;
a225f079 630 enum pipe pipe = crtc->pipe;
80715b2f 631 int position, vtotal;
a225f079 632
80715b2f 633 vtotal = mode->crtc_vtotal;
a225f079
VS
634 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
635 vtotal /= 2;
636
637 if (IS_GEN2(dev))
638 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
639 else
640 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
641
41b578fb
JB
642 /*
643 * On HSW, the DSL reg (0x70000) appears to return 0 if we
644 * read it just before the start of vblank. So try it again
645 * so we don't accidentally end up spanning a vblank frame
646 * increment, causing the pipe_update_end() code to squak at us.
647 *
648 * The nature of this problem means we can't simply check the ISR
649 * bit and return the vblank start value; nor can we use the scanline
650 * debug register in the transcoder as it appears to have the same
651 * problem. We may need to extend this to include other platforms,
652 * but so far testing only shows the problem on HSW.
653 */
654 if (IS_HASWELL(dev) && !position) {
655 int i, temp;
656
657 for (i = 0; i < 100; i++) {
658 udelay(1);
659 temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) &
660 DSL_LINEMASK_GEN3;
661 if (temp != position) {
662 position = temp;
663 break;
664 }
665 }
666 }
667
a225f079 668 /*
80715b2f
VS
669 * See update_scanline_offset() for the details on the
670 * scanline_offset adjustment.
a225f079 671 */
80715b2f 672 return (position + crtc->scanline_offset) % vtotal;
a225f079
VS
673}
674
f71d4af4 675static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
abca9e45 676 unsigned int flags, int *vpos, int *hpos,
3bb403bf
VS
677 ktime_t *stime, ktime_t *etime,
678 const struct drm_display_mode *mode)
0af7e4df 679{
c2baf4b7
VS
680 struct drm_i915_private *dev_priv = dev->dev_private;
681 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
682 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3aa18df8 683 int position;
78e8fc6b 684 int vbl_start, vbl_end, hsync_start, htotal, vtotal;
0af7e4df
MK
685 bool in_vbl = true;
686 int ret = 0;
ad3543ed 687 unsigned long irqflags;
0af7e4df 688
fc467a22 689 if (WARN_ON(!mode->crtc_clock)) {
0af7e4df 690 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
9db4a9c7 691 "pipe %c\n", pipe_name(pipe));
0af7e4df
MK
692 return 0;
693 }
694
c2baf4b7 695 htotal = mode->crtc_htotal;
78e8fc6b 696 hsync_start = mode->crtc_hsync_start;
c2baf4b7
VS
697 vtotal = mode->crtc_vtotal;
698 vbl_start = mode->crtc_vblank_start;
699 vbl_end = mode->crtc_vblank_end;
0af7e4df 700
d31faf65
VS
701 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
702 vbl_start = DIV_ROUND_UP(vbl_start, 2);
703 vbl_end /= 2;
704 vtotal /= 2;
705 }
706
c2baf4b7
VS
707 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
708
ad3543ed
MK
709 /*
710 * Lock uncore.lock, as we will do multiple timing critical raw
711 * register reads, potentially with preemption disabled, so the
712 * following code must not block on uncore.lock.
713 */
714 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
78e8fc6b 715
ad3543ed
MK
716 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
717
718 /* Get optional system timestamp before query. */
719 if (stime)
720 *stime = ktime_get();
721
7c06b08a 722 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
0af7e4df
MK
723 /* No obvious pixelcount register. Only query vertical
724 * scanout position from Display scan line register.
725 */
a225f079 726 position = __intel_get_crtc_scanline(intel_crtc);
0af7e4df
MK
727 } else {
728 /* Have access to pixelcount since start of frame.
729 * We can split this into vertical and horizontal
730 * scanout position.
731 */
ad3543ed 732 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
0af7e4df 733
3aa18df8
VS
734 /* convert to pixel counts */
735 vbl_start *= htotal;
736 vbl_end *= htotal;
737 vtotal *= htotal;
78e8fc6b 738
7e78f1cb
VS
739 /*
740 * In interlaced modes, the pixel counter counts all pixels,
741 * so one field will have htotal more pixels. In order to avoid
742 * the reported position from jumping backwards when the pixel
743 * counter is beyond the length of the shorter field, just
744 * clamp the position the length of the shorter field. This
745 * matches how the scanline counter based position works since
746 * the scanline counter doesn't count the two half lines.
747 */
748 if (position >= vtotal)
749 position = vtotal - 1;
750
78e8fc6b
VS
751 /*
752 * Start of vblank interrupt is triggered at start of hsync,
753 * just prior to the first active line of vblank. However we
754 * consider lines to start at the leading edge of horizontal
755 * active. So, should we get here before we've crossed into
756 * the horizontal active of the first line in vblank, we would
757 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
758 * always add htotal-hsync_start to the current pixel position.
759 */
760 position = (position + htotal - hsync_start) % vtotal;
0af7e4df
MK
761 }
762
ad3543ed
MK
763 /* Get optional system timestamp after query. */
764 if (etime)
765 *etime = ktime_get();
766
767 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
768
769 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
770
3aa18df8
VS
771 in_vbl = position >= vbl_start && position < vbl_end;
772
773 /*
774 * While in vblank, position will be negative
775 * counting up towards 0 at vbl_end. And outside
776 * vblank, position will be positive counting
777 * up since vbl_end.
778 */
779 if (position >= vbl_start)
780 position -= vbl_end;
781 else
782 position += vtotal - vbl_end;
0af7e4df 783
7c06b08a 784 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
3aa18df8
VS
785 *vpos = position;
786 *hpos = 0;
787 } else {
788 *vpos = position / htotal;
789 *hpos = position - (*vpos * htotal);
790 }
0af7e4df 791
0af7e4df
MK
792 /* In vblank? */
793 if (in_vbl)
3d3cbd84 794 ret |= DRM_SCANOUTPOS_IN_VBLANK;
0af7e4df
MK
795
796 return ret;
797}
798
a225f079
VS
799int intel_get_crtc_scanline(struct intel_crtc *crtc)
800{
801 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
802 unsigned long irqflags;
803 int position;
804
805 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
806 position = __intel_get_crtc_scanline(crtc);
807 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
808
809 return position;
810}
811
f71d4af4 812static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
0af7e4df
MK
813 int *max_error,
814 struct timeval *vblank_time,
815 unsigned flags)
816{
4041b853 817 struct drm_crtc *crtc;
0af7e4df 818
7eb552ae 819 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
4041b853 820 DRM_ERROR("Invalid crtc %d\n", pipe);
0af7e4df
MK
821 return -EINVAL;
822 }
823
824 /* Get drm_crtc to timestamp: */
4041b853
CW
825 crtc = intel_get_crtc_for_pipe(dev, pipe);
826 if (crtc == NULL) {
827 DRM_ERROR("Invalid crtc %d\n", pipe);
828 return -EINVAL;
829 }
830
fc467a22 831 if (!crtc->hwmode.crtc_clock) {
4041b853
CW
832 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
833 return -EBUSY;
834 }
0af7e4df
MK
835
836 /* Helper routine in DRM core does all the work: */
4041b853
CW
837 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
838 vblank_time, flags,
fc467a22 839 &crtc->hwmode);
5ca58282
JB
840}
841
d0ecd7e2 842static void ironlake_rps_change_irq_handler(struct drm_device *dev)
f97108d1 843{
2d1013dd 844 struct drm_i915_private *dev_priv = dev->dev_private;
b5b72e89 845 u32 busy_up, busy_down, max_avg, min_avg;
9270388e 846 u8 new_delay;
9270388e 847
d0ecd7e2 848 spin_lock(&mchdev_lock);
f97108d1 849
73edd18f
DV
850 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
851
20e4d407 852 new_delay = dev_priv->ips.cur_delay;
9270388e 853
7648fa99 854 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
b5b72e89
MG
855 busy_up = I915_READ(RCPREVBSYTUPAVG);
856 busy_down = I915_READ(RCPREVBSYTDNAVG);
f97108d1
JB
857 max_avg = I915_READ(RCBMAXAVG);
858 min_avg = I915_READ(RCBMINAVG);
859
860 /* Handle RCS change request from hw */
b5b72e89 861 if (busy_up > max_avg) {
20e4d407
DV
862 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
863 new_delay = dev_priv->ips.cur_delay - 1;
864 if (new_delay < dev_priv->ips.max_delay)
865 new_delay = dev_priv->ips.max_delay;
b5b72e89 866 } else if (busy_down < min_avg) {
20e4d407
DV
867 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
868 new_delay = dev_priv->ips.cur_delay + 1;
869 if (new_delay > dev_priv->ips.min_delay)
870 new_delay = dev_priv->ips.min_delay;
f97108d1
JB
871 }
872
7648fa99 873 if (ironlake_set_drps(dev, new_delay))
20e4d407 874 dev_priv->ips.cur_delay = new_delay;
f97108d1 875
d0ecd7e2 876 spin_unlock(&mchdev_lock);
9270388e 877
f97108d1
JB
878 return;
879}
880
74cdb337 881static void notify_ring(struct intel_engine_cs *ring)
549f7365 882{
93b0a4e0 883 if (!intel_ring_initialized(ring))
475553de
CW
884 return;
885
bcfcc8ba 886 trace_i915_gem_request_notify(ring);
9862e600 887
549f7365 888 wake_up_all(&ring->irq_queue);
549f7365
CW
889}
890
43cf3bf0
CW
891static void vlv_c0_read(struct drm_i915_private *dev_priv,
892 struct intel_rps_ei *ei)
31685c25 893{
43cf3bf0
CW
894 ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
895 ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
896 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
897}
31685c25 898
43cf3bf0
CW
899static bool vlv_c0_above(struct drm_i915_private *dev_priv,
900 const struct intel_rps_ei *old,
901 const struct intel_rps_ei *now,
902 int threshold)
903{
904 u64 time, c0;
31685c25 905
43cf3bf0
CW
906 if (old->cz_clock == 0)
907 return false;
31685c25 908
43cf3bf0
CW
909 time = now->cz_clock - old->cz_clock;
910 time *= threshold * dev_priv->mem_freq;
31685c25 911
43cf3bf0
CW
912 /* Workload can be split between render + media, e.g. SwapBuffers
913 * being blitted in X after being rendered in mesa. To account for
914 * this we need to combine both engines into our activity counter.
31685c25 915 */
43cf3bf0
CW
916 c0 = now->render_c0 - old->render_c0;
917 c0 += now->media_c0 - old->media_c0;
918 c0 *= 100 * VLV_CZ_CLOCK_TO_MILLI_SEC * 4 / 1000;
31685c25 919
43cf3bf0 920 return c0 >= time;
31685c25
D
921}
922
43cf3bf0 923void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
31685c25 924{
43cf3bf0
CW
925 vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
926 dev_priv->rps.up_ei = dev_priv->rps.down_ei;
43cf3bf0 927}
31685c25 928
43cf3bf0
CW
929static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
930{
931 struct intel_rps_ei now;
932 u32 events = 0;
31685c25 933
6f4b12f8 934 if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
43cf3bf0 935 return 0;
31685c25 936
43cf3bf0
CW
937 vlv_c0_read(dev_priv, &now);
938 if (now.cz_clock == 0)
939 return 0;
31685c25 940
43cf3bf0
CW
941 if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
942 if (!vlv_c0_above(dev_priv,
943 &dev_priv->rps.down_ei, &now,
8fb55197 944 dev_priv->rps.down_threshold))
43cf3bf0
CW
945 events |= GEN6_PM_RP_DOWN_THRESHOLD;
946 dev_priv->rps.down_ei = now;
947 }
31685c25 948
43cf3bf0
CW
949 if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
950 if (vlv_c0_above(dev_priv,
951 &dev_priv->rps.up_ei, &now,
8fb55197 952 dev_priv->rps.up_threshold))
43cf3bf0
CW
953 events |= GEN6_PM_RP_UP_THRESHOLD;
954 dev_priv->rps.up_ei = now;
31685c25
D
955 }
956
43cf3bf0 957 return events;
31685c25
D
958}
959
f5a4c67d
CW
960static bool any_waiters(struct drm_i915_private *dev_priv)
961{
962 struct intel_engine_cs *ring;
963 int i;
964
965 for_each_ring(ring, dev_priv, i)
966 if (ring->irq_refcount)
967 return true;
968
969 return false;
970}
971
4912d041 972static void gen6_pm_rps_work(struct work_struct *work)
3b8d8d91 973{
2d1013dd
JN
974 struct drm_i915_private *dev_priv =
975 container_of(work, struct drm_i915_private, rps.work);
8d3afd7d
CW
976 bool client_boost;
977 int new_delay, adj, min, max;
edbfdb45 978 u32 pm_iir;
4912d041 979
59cdb63d 980 spin_lock_irq(&dev_priv->irq_lock);
d4d70aa5
ID
981 /* Speed up work cancelation during disabling rps interrupts. */
982 if (!dev_priv->rps.interrupts_enabled) {
983 spin_unlock_irq(&dev_priv->irq_lock);
984 return;
985 }
c6a828d3
DV
986 pm_iir = dev_priv->rps.pm_iir;
987 dev_priv->rps.pm_iir = 0;
a72fbc3a
ID
988 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
989 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
8d3afd7d
CW
990 client_boost = dev_priv->rps.client_boost;
991 dev_priv->rps.client_boost = false;
59cdb63d 992 spin_unlock_irq(&dev_priv->irq_lock);
3b8d8d91 993
60611c13 994 /* Make sure we didn't queue anything we're not going to process. */
a6706b45 995 WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
60611c13 996
8d3afd7d 997 if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
3b8d8d91
JB
998 return;
999
4fc688ce 1000 mutex_lock(&dev_priv->rps.hw_lock);
7b9e0ae6 1001
43cf3bf0
CW
1002 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
1003
dd75fdc8 1004 adj = dev_priv->rps.last_adj;
edcf284b 1005 new_delay = dev_priv->rps.cur_freq;
8d3afd7d
CW
1006 min = dev_priv->rps.min_freq_softlimit;
1007 max = dev_priv->rps.max_freq_softlimit;
1008
1009 if (client_boost) {
1010 new_delay = dev_priv->rps.max_freq_softlimit;
1011 adj = 0;
1012 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
dd75fdc8
CW
1013 if (adj > 0)
1014 adj *= 2;
edcf284b
CW
1015 else /* CHV needs even encode values */
1016 adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
7425034a
VS
1017 /*
1018 * For better performance, jump directly
1019 * to RPe if we're below it.
1020 */
edcf284b 1021 if (new_delay < dev_priv->rps.efficient_freq - adj) {
b39fb297 1022 new_delay = dev_priv->rps.efficient_freq;
edcf284b
CW
1023 adj = 0;
1024 }
f5a4c67d
CW
1025 } else if (any_waiters(dev_priv)) {
1026 adj = 0;
dd75fdc8 1027 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
b39fb297
BW
1028 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1029 new_delay = dev_priv->rps.efficient_freq;
dd75fdc8 1030 else
b39fb297 1031 new_delay = dev_priv->rps.min_freq_softlimit;
dd75fdc8
CW
1032 adj = 0;
1033 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1034 if (adj < 0)
1035 adj *= 2;
edcf284b
CW
1036 else /* CHV needs even encode values */
1037 adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
dd75fdc8 1038 } else { /* unknown event */
edcf284b 1039 adj = 0;
dd75fdc8 1040 }
3b8d8d91 1041
edcf284b
CW
1042 dev_priv->rps.last_adj = adj;
1043
79249636
BW
1044 /* sysfs frequency interfaces may have snuck in while servicing the
1045 * interrupt
1046 */
edcf284b 1047 new_delay += adj;
8d3afd7d 1048 new_delay = clamp_t(int, new_delay, min, max);
27544369 1049
ffe02b40 1050 intel_set_rps(dev_priv->dev, new_delay);
3b8d8d91 1051
4fc688ce 1052 mutex_unlock(&dev_priv->rps.hw_lock);
3b8d8d91
JB
1053}
1054
e3689190
BW
1055
1056/**
1057 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1058 * occurred.
1059 * @work: workqueue struct
1060 *
1061 * Doesn't actually do anything except notify userspace. As a consequence of
1062 * this event, userspace should try to remap the bad rows since statistically
1063 * it is likely the same row is more likely to go bad again.
1064 */
1065static void ivybridge_parity_work(struct work_struct *work)
1066{
2d1013dd
JN
1067 struct drm_i915_private *dev_priv =
1068 container_of(work, struct drm_i915_private, l3_parity.error_work);
e3689190 1069 u32 error_status, row, bank, subbank;
35a85ac6 1070 char *parity_event[6];
e3689190 1071 uint32_t misccpctl;
35a85ac6 1072 uint8_t slice = 0;
e3689190
BW
1073
1074 /* We must turn off DOP level clock gating to access the L3 registers.
1075 * In order to prevent a get/put style interface, acquire struct mutex
1076 * any time we access those registers.
1077 */
1078 mutex_lock(&dev_priv->dev->struct_mutex);
1079
35a85ac6
BW
1080 /* If we've screwed up tracking, just let the interrupt fire again */
1081 if (WARN_ON(!dev_priv->l3_parity.which_slice))
1082 goto out;
1083
e3689190
BW
1084 misccpctl = I915_READ(GEN7_MISCCPCTL);
1085 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1086 POSTING_READ(GEN7_MISCCPCTL);
1087
35a85ac6
BW
1088 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1089 u32 reg;
e3689190 1090
35a85ac6
BW
1091 slice--;
1092 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
1093 break;
e3689190 1094
35a85ac6 1095 dev_priv->l3_parity.which_slice &= ~(1<<slice);
e3689190 1096
35a85ac6 1097 reg = GEN7_L3CDERRST1 + (slice * 0x200);
e3689190 1098
35a85ac6
BW
1099 error_status = I915_READ(reg);
1100 row = GEN7_PARITY_ERROR_ROW(error_status);
1101 bank = GEN7_PARITY_ERROR_BANK(error_status);
1102 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1103
1104 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1105 POSTING_READ(reg);
1106
1107 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1108 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1109 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1110 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1111 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1112 parity_event[5] = NULL;
1113
5bdebb18 1114 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
35a85ac6 1115 KOBJ_CHANGE, parity_event);
e3689190 1116
35a85ac6
BW
1117 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1118 slice, row, bank, subbank);
e3689190 1119
35a85ac6
BW
1120 kfree(parity_event[4]);
1121 kfree(parity_event[3]);
1122 kfree(parity_event[2]);
1123 kfree(parity_event[1]);
1124 }
e3689190 1125
35a85ac6 1126 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
e3689190 1127
35a85ac6
BW
1128out:
1129 WARN_ON(dev_priv->l3_parity.which_slice);
4cb21832 1130 spin_lock_irq(&dev_priv->irq_lock);
480c8033 1131 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
4cb21832 1132 spin_unlock_irq(&dev_priv->irq_lock);
35a85ac6
BW
1133
1134 mutex_unlock(&dev_priv->dev->struct_mutex);
e3689190
BW
1135}
1136
35a85ac6 1137static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
e3689190 1138{
2d1013dd 1139 struct drm_i915_private *dev_priv = dev->dev_private;
e3689190 1140
040d2baa 1141 if (!HAS_L3_DPF(dev))
e3689190
BW
1142 return;
1143
d0ecd7e2 1144 spin_lock(&dev_priv->irq_lock);
480c8033 1145 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
d0ecd7e2 1146 spin_unlock(&dev_priv->irq_lock);
e3689190 1147
35a85ac6
BW
1148 iir &= GT_PARITY_ERROR(dev);
1149 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1150 dev_priv->l3_parity.which_slice |= 1 << 1;
1151
1152 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1153 dev_priv->l3_parity.which_slice |= 1 << 0;
1154
a4da4fa4 1155 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
e3689190
BW
1156}
1157
f1af8fc1
PZ
1158static void ilk_gt_irq_handler(struct drm_device *dev,
1159 struct drm_i915_private *dev_priv,
1160 u32 gt_iir)
1161{
1162 if (gt_iir &
1163 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
74cdb337 1164 notify_ring(&dev_priv->ring[RCS]);
f1af8fc1 1165 if (gt_iir & ILK_BSD_USER_INTERRUPT)
74cdb337 1166 notify_ring(&dev_priv->ring[VCS]);
f1af8fc1
PZ
1167}
1168
e7b4c6b1
DV
1169static void snb_gt_irq_handler(struct drm_device *dev,
1170 struct drm_i915_private *dev_priv,
1171 u32 gt_iir)
1172{
1173
cc609d5d
BW
1174 if (gt_iir &
1175 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
74cdb337 1176 notify_ring(&dev_priv->ring[RCS]);
cc609d5d 1177 if (gt_iir & GT_BSD_USER_INTERRUPT)
74cdb337 1178 notify_ring(&dev_priv->ring[VCS]);
cc609d5d 1179 if (gt_iir & GT_BLT_USER_INTERRUPT)
74cdb337 1180 notify_ring(&dev_priv->ring[BCS]);
e7b4c6b1 1181
cc609d5d
BW
1182 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1183 GT_BSD_CS_ERROR_INTERRUPT |
aaecdf61
DV
1184 GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1185 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
e3689190 1186
35a85ac6
BW
1187 if (gt_iir & GT_PARITY_ERROR(dev))
1188 ivybridge_parity_error_irq_handler(dev, gt_iir);
e7b4c6b1
DV
1189}
1190
74cdb337 1191static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
abd58f01
BW
1192 u32 master_ctl)
1193{
abd58f01
BW
1194 irqreturn_t ret = IRQ_NONE;
1195
1196 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
74cdb337 1197 u32 tmp = I915_READ_FW(GEN8_GT_IIR(0));
abd58f01 1198 if (tmp) {
cb0d205e 1199 I915_WRITE_FW(GEN8_GT_IIR(0), tmp);
abd58f01 1200 ret = IRQ_HANDLED;
e981e7b1 1201
74cdb337
CW
1202 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
1203 intel_lrc_irq_handler(&dev_priv->ring[RCS]);
1204 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
1205 notify_ring(&dev_priv->ring[RCS]);
1206
1207 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
1208 intel_lrc_irq_handler(&dev_priv->ring[BCS]);
1209 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
1210 notify_ring(&dev_priv->ring[BCS]);
abd58f01
BW
1211 } else
1212 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1213 }
1214
85f9b5f9 1215 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
74cdb337 1216 u32 tmp = I915_READ_FW(GEN8_GT_IIR(1));
abd58f01 1217 if (tmp) {
cb0d205e 1218 I915_WRITE_FW(GEN8_GT_IIR(1), tmp);
abd58f01 1219 ret = IRQ_HANDLED;
e981e7b1 1220
74cdb337
CW
1221 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
1222 intel_lrc_irq_handler(&dev_priv->ring[VCS]);
1223 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
1224 notify_ring(&dev_priv->ring[VCS]);
abd58f01 1225
74cdb337
CW
1226 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
1227 intel_lrc_irq_handler(&dev_priv->ring[VCS2]);
1228 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
1229 notify_ring(&dev_priv->ring[VCS2]);
0961021a 1230 } else
abd58f01 1231 DRM_ERROR("The master control interrupt lied (GT1)!\n");
0961021a
BW
1232 }
1233
abd58f01 1234 if (master_ctl & GEN8_GT_VECS_IRQ) {
74cdb337 1235 u32 tmp = I915_READ_FW(GEN8_GT_IIR(3));
abd58f01 1236 if (tmp) {
74cdb337 1237 I915_WRITE_FW(GEN8_GT_IIR(3), tmp);
abd58f01 1238 ret = IRQ_HANDLED;
e981e7b1 1239
74cdb337
CW
1240 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
1241 intel_lrc_irq_handler(&dev_priv->ring[VECS]);
1242 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
1243 notify_ring(&dev_priv->ring[VECS]);
abd58f01
BW
1244 } else
1245 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1246 }
1247
0961021a 1248 if (master_ctl & GEN8_GT_PM_IRQ) {
74cdb337 1249 u32 tmp = I915_READ_FW(GEN8_GT_IIR(2));
0961021a 1250 if (tmp & dev_priv->pm_rps_events) {
cb0d205e
CW
1251 I915_WRITE_FW(GEN8_GT_IIR(2),
1252 tmp & dev_priv->pm_rps_events);
38cc46d7 1253 ret = IRQ_HANDLED;
c9a9a268 1254 gen6_rps_irq_handler(dev_priv, tmp);
0961021a
BW
1255 } else
1256 DRM_ERROR("The master control interrupt lied (PM)!\n");
1257 }
1258
abd58f01
BW
1259 return ret;
1260}
1261
63c88d22 1262static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
13cf5504
DA
1263{
1264 switch (port) {
1265 case PORT_A:
63c88d22 1266 return val & BXT_PORTA_HOTPLUG_LONG_DETECT;
13cf5504 1267 case PORT_B:
63c88d22 1268 return val & PORTB_HOTPLUG_LONG_DETECT;
13cf5504 1269 case PORT_C:
63c88d22 1270 return val & PORTC_HOTPLUG_LONG_DETECT;
13cf5504 1271 case PORT_D:
63c88d22
ID
1272 return val & PORTD_HOTPLUG_LONG_DETECT;
1273 default:
1274 return false;
13cf5504
DA
1275 }
1276}
1277
676574df 1278static bool pch_port_hotplug_long_detect(enum port port, u32 val)
13cf5504
DA
1279{
1280 switch (port) {
13cf5504 1281 case PORT_B:
676574df 1282 return val & PORTB_HOTPLUG_LONG_DETECT;
13cf5504 1283 case PORT_C:
676574df 1284 return val & PORTC_HOTPLUG_LONG_DETECT;
13cf5504 1285 case PORT_D:
676574df 1286 return val & PORTD_HOTPLUG_LONG_DETECT;
26951caf
XZ
1287 case PORT_E:
1288 return val & PORTE_HOTPLUG_LONG_DETECT;
676574df
JN
1289 default:
1290 return false;
13cf5504
DA
1291 }
1292}
1293
676574df 1294static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
13cf5504 1295{
13cf5504 1296 switch (port) {
13cf5504 1297 case PORT_B:
676574df 1298 return val & PORTB_HOTPLUG_INT_LONG_PULSE;
13cf5504 1299 case PORT_C:
676574df 1300 return val & PORTC_HOTPLUG_INT_LONG_PULSE;
13cf5504 1301 case PORT_D:
676574df 1302 return val & PORTD_HOTPLUG_INT_LONG_PULSE;
13cf5504 1303 default:
676574df 1304 return false;
13cf5504
DA
1305 }
1306}
1307
676574df 1308/* Get a bit mask of pins that have triggered, and which ones may be long. */
fd63e2a9 1309static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
8c841e57 1310 u32 hotplug_trigger, u32 dig_hotplug_reg,
fd63e2a9
ID
1311 const u32 hpd[HPD_NUM_PINS],
1312 bool long_pulse_detect(enum port port, u32 val))
b543fb04 1313{
13cf5504 1314 enum port port;
676574df 1315 int i;
91d131d2 1316
676574df
JN
1317 *pin_mask = 0;
1318 *long_mask = 0;
cc9bd499 1319
676574df 1320 for_each_hpd_pin(i) {
8c841e57 1321 if ((hpd[i] & hotplug_trigger) == 0)
13cf5504
DA
1322 continue;
1323
8c841e57 1324 *pin_mask |= BIT(i);
3ff04a16 1325
cc24fcdc 1326 if (!intel_hpd_pin_to_port(i, &port))
3ff04a16 1327 continue;
b8f102e8 1328
fd63e2a9 1329 if (long_pulse_detect(port, dig_hotplug_reg))
8c841e57 1330 *long_mask |= BIT(i);
b543fb04
EE
1331 }
1332
676574df
JN
1333 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
1334 hotplug_trigger, dig_hotplug_reg, *pin_mask);
5876fa0d 1335
b543fb04
EE
1336}
1337
515ac2bb
DV
1338static void gmbus_irq_handler(struct drm_device *dev)
1339{
2d1013dd 1340 struct drm_i915_private *dev_priv = dev->dev_private;
28c70f16 1341
28c70f16 1342 wake_up_all(&dev_priv->gmbus_wait_queue);
515ac2bb
DV
1343}
1344
ce99c256
DV
1345static void dp_aux_irq_handler(struct drm_device *dev)
1346{
2d1013dd 1347 struct drm_i915_private *dev_priv = dev->dev_private;
9ee32fea 1348
9ee32fea 1349 wake_up_all(&dev_priv->gmbus_wait_queue);
ce99c256
DV
1350}
1351
8bf1e9f1 1352#if defined(CONFIG_DEBUG_FS)
277de95e
DV
1353static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1354 uint32_t crc0, uint32_t crc1,
1355 uint32_t crc2, uint32_t crc3,
1356 uint32_t crc4)
8bf1e9f1
SH
1357{
1358 struct drm_i915_private *dev_priv = dev->dev_private;
1359 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1360 struct intel_pipe_crc_entry *entry;
ac2300d4 1361 int head, tail;
b2c88f5b 1362
d538bbdf
DL
1363 spin_lock(&pipe_crc->lock);
1364
0c912c79 1365 if (!pipe_crc->entries) {
d538bbdf 1366 spin_unlock(&pipe_crc->lock);
34273620 1367 DRM_DEBUG_KMS("spurious interrupt\n");
0c912c79
DL
1368 return;
1369 }
1370
d538bbdf
DL
1371 head = pipe_crc->head;
1372 tail = pipe_crc->tail;
b2c88f5b
DL
1373
1374 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
d538bbdf 1375 spin_unlock(&pipe_crc->lock);
b2c88f5b
DL
1376 DRM_ERROR("CRC buffer overflowing\n");
1377 return;
1378 }
1379
1380 entry = &pipe_crc->entries[head];
8bf1e9f1 1381
8bc5e955 1382 entry->frame = dev->driver->get_vblank_counter(dev, pipe);
eba94eb9
DV
1383 entry->crc[0] = crc0;
1384 entry->crc[1] = crc1;
1385 entry->crc[2] = crc2;
1386 entry->crc[3] = crc3;
1387 entry->crc[4] = crc4;
b2c88f5b
DL
1388
1389 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
d538bbdf
DL
1390 pipe_crc->head = head;
1391
1392 spin_unlock(&pipe_crc->lock);
07144428
DL
1393
1394 wake_up_interruptible(&pipe_crc->wq);
8bf1e9f1 1395}
277de95e
DV
1396#else
1397static inline void
1398display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1399 uint32_t crc0, uint32_t crc1,
1400 uint32_t crc2, uint32_t crc3,
1401 uint32_t crc4) {}
1402#endif
1403
eba94eb9 1404
277de95e 1405static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
5a69b89f
DV
1406{
1407 struct drm_i915_private *dev_priv = dev->dev_private;
1408
277de95e
DV
1409 display_pipe_crc_irq_handler(dev, pipe,
1410 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1411 0, 0, 0, 0);
5a69b89f
DV
1412}
1413
277de95e 1414static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
eba94eb9
DV
1415{
1416 struct drm_i915_private *dev_priv = dev->dev_private;
1417
277de95e
DV
1418 display_pipe_crc_irq_handler(dev, pipe,
1419 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1420 I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1421 I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1422 I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1423 I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
eba94eb9 1424}
5b3a856b 1425
277de95e 1426static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
5b3a856b
DV
1427{
1428 struct drm_i915_private *dev_priv = dev->dev_private;
0b5c5ed0
DV
1429 uint32_t res1, res2;
1430
1431 if (INTEL_INFO(dev)->gen >= 3)
1432 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1433 else
1434 res1 = 0;
1435
1436 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1437 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1438 else
1439 res2 = 0;
5b3a856b 1440
277de95e
DV
1441 display_pipe_crc_irq_handler(dev, pipe,
1442 I915_READ(PIPE_CRC_RES_RED(pipe)),
1443 I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1444 I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1445 res1, res2);
5b3a856b 1446}
8bf1e9f1 1447
1403c0d4
PZ
1448/* The RPS events need forcewake, so we add them to a work queue and mask their
1449 * IMR bits until the work is done. Other interrupts can be processed without
1450 * the work queue. */
1451static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
baf02a1f 1452{
a6706b45 1453 if (pm_iir & dev_priv->pm_rps_events) {
59cdb63d 1454 spin_lock(&dev_priv->irq_lock);
480c8033 1455 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
d4d70aa5
ID
1456 if (dev_priv->rps.interrupts_enabled) {
1457 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1458 queue_work(dev_priv->wq, &dev_priv->rps.work);
1459 }
59cdb63d 1460 spin_unlock(&dev_priv->irq_lock);
baf02a1f 1461 }
baf02a1f 1462
c9a9a268
ID
1463 if (INTEL_INFO(dev_priv)->gen >= 8)
1464 return;
1465
1403c0d4
PZ
1466 if (HAS_VEBOX(dev_priv->dev)) {
1467 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
74cdb337 1468 notify_ring(&dev_priv->ring[VECS]);
12638c57 1469
aaecdf61
DV
1470 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1471 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
12638c57 1472 }
baf02a1f
BW
1473}
1474
8d7849db
VS
1475static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
1476{
8d7849db
VS
1477 if (!drm_handle_vblank(dev, pipe))
1478 return false;
1479
8d7849db
VS
1480 return true;
1481}
1482
c1874ed7
ID
1483static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
1484{
1485 struct drm_i915_private *dev_priv = dev->dev_private;
91d181dd 1486 u32 pipe_stats[I915_MAX_PIPES] = { };
c1874ed7
ID
1487 int pipe;
1488
58ead0d7 1489 spin_lock(&dev_priv->irq_lock);
055e393f 1490 for_each_pipe(dev_priv, pipe) {
91d181dd 1491 int reg;
bbb5eebf 1492 u32 mask, iir_bit = 0;
91d181dd 1493
bbb5eebf
DV
1494 /*
1495 * PIPESTAT bits get signalled even when the interrupt is
1496 * disabled with the mask bits, and some of the status bits do
1497 * not generate interrupts at all (like the underrun bit). Hence
1498 * we need to be careful that we only handle what we want to
1499 * handle.
1500 */
0f239f4c
DV
1501
1502 /* fifo underruns are filterered in the underrun handler. */
1503 mask = PIPE_FIFO_UNDERRUN_STATUS;
bbb5eebf
DV
1504
1505 switch (pipe) {
1506 case PIPE_A:
1507 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1508 break;
1509 case PIPE_B:
1510 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1511 break;
3278f67f
VS
1512 case PIPE_C:
1513 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1514 break;
bbb5eebf
DV
1515 }
1516 if (iir & iir_bit)
1517 mask |= dev_priv->pipestat_irq_mask[pipe];
1518
1519 if (!mask)
91d181dd
ID
1520 continue;
1521
1522 reg = PIPESTAT(pipe);
bbb5eebf
DV
1523 mask |= PIPESTAT_INT_ENABLE_MASK;
1524 pipe_stats[pipe] = I915_READ(reg) & mask;
c1874ed7
ID
1525
1526 /*
1527 * Clear the PIPE*STAT regs before the IIR
1528 */
91d181dd
ID
1529 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
1530 PIPESTAT_INT_STATUS_MASK))
c1874ed7
ID
1531 I915_WRITE(reg, pipe_stats[pipe]);
1532 }
58ead0d7 1533 spin_unlock(&dev_priv->irq_lock);
c1874ed7 1534
055e393f 1535 for_each_pipe(dev_priv, pipe) {
d6bbafa1
CW
1536 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
1537 intel_pipe_handle_vblank(dev, pipe))
1538 intel_check_page_flip(dev, pipe);
c1874ed7 1539
579a9b0e 1540 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
c1874ed7
ID
1541 intel_prepare_page_flip(dev, pipe);
1542 intel_finish_page_flip(dev, pipe);
1543 }
1544
1545 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1546 i9xx_pipe_crc_irq_handler(dev, pipe);
1547
1f7247c0
DV
1548 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1549 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
c1874ed7
ID
1550 }
1551
1552 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1553 gmbus_irq_handler(dev);
1554}
1555
16c6c56b
VS
1556static void i9xx_hpd_irq_handler(struct drm_device *dev)
1557{
1558 struct drm_i915_private *dev_priv = dev->dev_private;
1559 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
676574df 1560 u32 pin_mask, long_mask;
16c6c56b 1561
0d2e4297
JN
1562 if (!hotplug_status)
1563 return;
16c6c56b 1564
0d2e4297
JN
1565 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1566 /*
1567 * Make sure hotplug status is cleared before we clear IIR, or else we
1568 * may miss hotplug events.
1569 */
1570 POSTING_READ(PORT_HOTPLUG_STAT);
16c6c56b 1571
0d2e4297
JN
1572 if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
1573 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
16c6c56b 1574
fd63e2a9
ID
1575 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1576 hotplug_trigger, hpd_status_g4x,
1577 i9xx_port_hotplug_long_detect);
676574df 1578 intel_hpd_irq_handler(dev, pin_mask, long_mask);
16c6c56b 1579
369712e8 1580 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
3ff60f89 1581 dp_aux_irq_handler(dev);
0d2e4297
JN
1582 } else {
1583 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
16c6c56b 1584
fd63e2a9 1585 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
4e3d1e26 1586 hotplug_trigger, hpd_status_i915,
fd63e2a9 1587 i9xx_port_hotplug_long_detect);
676574df 1588 intel_hpd_irq_handler(dev, pin_mask, long_mask);
3ff60f89 1589 }
16c6c56b
VS
1590}
1591
ff1f525e 1592static irqreturn_t valleyview_irq_handler(int irq, void *arg)
7e231dbe 1593{
45a83f84 1594 struct drm_device *dev = arg;
2d1013dd 1595 struct drm_i915_private *dev_priv = dev->dev_private;
7e231dbe
JB
1596 u32 iir, gt_iir, pm_iir;
1597 irqreturn_t ret = IRQ_NONE;
7e231dbe 1598
2dd2a883
ID
1599 if (!intel_irqs_enabled(dev_priv))
1600 return IRQ_NONE;
1601
7e231dbe 1602 while (true) {
3ff60f89
OM
1603 /* Find, clear, then process each source of interrupt */
1604
7e231dbe 1605 gt_iir = I915_READ(GTIIR);
3ff60f89
OM
1606 if (gt_iir)
1607 I915_WRITE(GTIIR, gt_iir);
1608
7e231dbe 1609 pm_iir = I915_READ(GEN6_PMIIR);
3ff60f89
OM
1610 if (pm_iir)
1611 I915_WRITE(GEN6_PMIIR, pm_iir);
1612
1613 iir = I915_READ(VLV_IIR);
1614 if (iir) {
1615 /* Consume port before clearing IIR or we'll miss events */
1616 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1617 i9xx_hpd_irq_handler(dev);
1618 I915_WRITE(VLV_IIR, iir);
1619 }
7e231dbe
JB
1620
1621 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1622 goto out;
1623
1624 ret = IRQ_HANDLED;
1625
3ff60f89
OM
1626 if (gt_iir)
1627 snb_gt_irq_handler(dev, dev_priv, gt_iir);
60611c13 1628 if (pm_iir)
d0ecd7e2 1629 gen6_rps_irq_handler(dev_priv, pm_iir);
3ff60f89
OM
1630 /* Call regardless, as some status bits might not be
1631 * signalled in iir */
1632 valleyview_pipestat_irq_handler(dev, iir);
7e231dbe
JB
1633 }
1634
1635out:
1636 return ret;
1637}
1638
43f328d7
VS
1639static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1640{
45a83f84 1641 struct drm_device *dev = arg;
43f328d7
VS
1642 struct drm_i915_private *dev_priv = dev->dev_private;
1643 u32 master_ctl, iir;
1644 irqreturn_t ret = IRQ_NONE;
43f328d7 1645
2dd2a883
ID
1646 if (!intel_irqs_enabled(dev_priv))
1647 return IRQ_NONE;
1648
8e5fd599
VS
1649 for (;;) {
1650 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1651 iir = I915_READ(VLV_IIR);
43f328d7 1652
8e5fd599
VS
1653 if (master_ctl == 0 && iir == 0)
1654 break;
43f328d7 1655
27b6c122
OM
1656 ret = IRQ_HANDLED;
1657
8e5fd599 1658 I915_WRITE(GEN8_MASTER_IRQ, 0);
43f328d7 1659
27b6c122 1660 /* Find, clear, then process each source of interrupt */
43f328d7 1661
27b6c122
OM
1662 if (iir) {
1663 /* Consume port before clearing IIR or we'll miss events */
1664 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1665 i9xx_hpd_irq_handler(dev);
1666 I915_WRITE(VLV_IIR, iir);
1667 }
43f328d7 1668
74cdb337 1669 gen8_gt_irq_handler(dev_priv, master_ctl);
43f328d7 1670
27b6c122
OM
1671 /* Call regardless, as some status bits might not be
1672 * signalled in iir */
1673 valleyview_pipestat_irq_handler(dev, iir);
43f328d7 1674
8e5fd599
VS
1675 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
1676 POSTING_READ(GEN8_MASTER_IRQ);
8e5fd599 1677 }
3278f67f 1678
43f328d7
VS
1679 return ret;
1680}
1681
23e81d69 1682static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
776ad806 1683{
2d1013dd 1684 struct drm_i915_private *dev_priv = dev->dev_private;
9db4a9c7 1685 int pipe;
b543fb04 1686 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
13cf5504 1687
aaf5ec2e
SJ
1688 if (hotplug_trigger) {
1689 u32 dig_hotplug_reg, pin_mask, long_mask;
1690
1691 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1692 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
776ad806 1693
fd63e2a9
ID
1694 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1695 dig_hotplug_reg, hpd_ibx,
1696 pch_port_hotplug_long_detect);
aaf5ec2e
SJ
1697 intel_hpd_irq_handler(dev, pin_mask, long_mask);
1698 }
91d131d2 1699
cfc33bf7
VS
1700 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1701 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1702 SDE_AUDIO_POWER_SHIFT);
776ad806 1703 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
cfc33bf7
VS
1704 port_name(port));
1705 }
776ad806 1706
ce99c256
DV
1707 if (pch_iir & SDE_AUX_MASK)
1708 dp_aux_irq_handler(dev);
1709
776ad806 1710 if (pch_iir & SDE_GMBUS)
515ac2bb 1711 gmbus_irq_handler(dev);
776ad806
JB
1712
1713 if (pch_iir & SDE_AUDIO_HDCP_MASK)
1714 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1715
1716 if (pch_iir & SDE_AUDIO_TRANS_MASK)
1717 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1718
1719 if (pch_iir & SDE_POISON)
1720 DRM_ERROR("PCH poison interrupt\n");
1721
9db4a9c7 1722 if (pch_iir & SDE_FDI_MASK)
055e393f 1723 for_each_pipe(dev_priv, pipe)
9db4a9c7
JB
1724 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1725 pipe_name(pipe),
1726 I915_READ(FDI_RX_IIR(pipe)));
776ad806
JB
1727
1728 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1729 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1730
1731 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1732 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1733
776ad806 1734 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1f7247c0 1735 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
8664281b
PZ
1736
1737 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1f7247c0 1738 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
8664281b
PZ
1739}
1740
1741static void ivb_err_int_handler(struct drm_device *dev)
1742{
1743 struct drm_i915_private *dev_priv = dev->dev_private;
1744 u32 err_int = I915_READ(GEN7_ERR_INT);
5a69b89f 1745 enum pipe pipe;
8664281b 1746
de032bf4
PZ
1747 if (err_int & ERR_INT_POISON)
1748 DRM_ERROR("Poison interrupt\n");
1749
055e393f 1750 for_each_pipe(dev_priv, pipe) {
1f7247c0
DV
1751 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1752 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
8bf1e9f1 1753
5a69b89f
DV
1754 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1755 if (IS_IVYBRIDGE(dev))
277de95e 1756 ivb_pipe_crc_irq_handler(dev, pipe);
5a69b89f 1757 else
277de95e 1758 hsw_pipe_crc_irq_handler(dev, pipe);
5a69b89f
DV
1759 }
1760 }
8bf1e9f1 1761
8664281b
PZ
1762 I915_WRITE(GEN7_ERR_INT, err_int);
1763}
1764
1765static void cpt_serr_int_handler(struct drm_device *dev)
1766{
1767 struct drm_i915_private *dev_priv = dev->dev_private;
1768 u32 serr_int = I915_READ(SERR_INT);
1769
de032bf4
PZ
1770 if (serr_int & SERR_INT_POISON)
1771 DRM_ERROR("PCH poison interrupt\n");
1772
8664281b 1773 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
1f7247c0 1774 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
8664281b
PZ
1775
1776 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
1f7247c0 1777 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
8664281b
PZ
1778
1779 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
1f7247c0 1780 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
8664281b
PZ
1781
1782 I915_WRITE(SERR_INT, serr_int);
776ad806
JB
1783}
1784
23e81d69
AJ
1785static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1786{
2d1013dd 1787 struct drm_i915_private *dev_priv = dev->dev_private;
23e81d69 1788 int pipe;
26951caf
XZ
1789 u32 hotplug_trigger;
1790
1791 if (HAS_PCH_SPT(dev))
1792 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT;
1793 else
1794 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
13cf5504 1795
aaf5ec2e
SJ
1796 if (hotplug_trigger) {
1797 u32 dig_hotplug_reg, pin_mask, long_mask;
23e81d69 1798
aaf5ec2e
SJ
1799 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1800 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
fd63e2a9 1801
26951caf
XZ
1802 if (HAS_PCH_SPT(dev)) {
1803 intel_get_hpd_pins(&pin_mask, &long_mask,
1804 hotplug_trigger,
1805 dig_hotplug_reg, hpd_spt,
1806 pch_port_hotplug_long_detect);
1807
1808 /* detect PORTE HP event */
1809 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
1810 if (pch_port_hotplug_long_detect(PORT_E,
1811 dig_hotplug_reg))
1812 long_mask |= 1 << HPD_PORT_E;
1813 } else
1814 intel_get_hpd_pins(&pin_mask, &long_mask,
1815 hotplug_trigger,
1816 dig_hotplug_reg, hpd_cpt,
1817 pch_port_hotplug_long_detect);
1818
aaf5ec2e
SJ
1819 intel_hpd_irq_handler(dev, pin_mask, long_mask);
1820 }
91d131d2 1821
cfc33bf7
VS
1822 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1823 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1824 SDE_AUDIO_POWER_SHIFT_CPT);
1825 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1826 port_name(port));
1827 }
23e81d69
AJ
1828
1829 if (pch_iir & SDE_AUX_MASK_CPT)
ce99c256 1830 dp_aux_irq_handler(dev);
23e81d69
AJ
1831
1832 if (pch_iir & SDE_GMBUS_CPT)
515ac2bb 1833 gmbus_irq_handler(dev);
23e81d69
AJ
1834
1835 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1836 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
1837
1838 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1839 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
1840
1841 if (pch_iir & SDE_FDI_MASK_CPT)
055e393f 1842 for_each_pipe(dev_priv, pipe)
23e81d69
AJ
1843 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1844 pipe_name(pipe),
1845 I915_READ(FDI_RX_IIR(pipe)));
8664281b
PZ
1846
1847 if (pch_iir & SDE_ERROR_CPT)
1848 cpt_serr_int_handler(dev);
23e81d69
AJ
1849}
1850
c008bc6e
PZ
1851static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
1852{
1853 struct drm_i915_private *dev_priv = dev->dev_private;
40da17c2 1854 enum pipe pipe;
c008bc6e
PZ
1855
1856 if (de_iir & DE_AUX_CHANNEL_A)
1857 dp_aux_irq_handler(dev);
1858
1859 if (de_iir & DE_GSE)
1860 intel_opregion_asle_intr(dev);
1861
c008bc6e
PZ
1862 if (de_iir & DE_POISON)
1863 DRM_ERROR("Poison interrupt\n");
1864
055e393f 1865 for_each_pipe(dev_priv, pipe) {
d6bbafa1
CW
1866 if (de_iir & DE_PIPE_VBLANK(pipe) &&
1867 intel_pipe_handle_vblank(dev, pipe))
1868 intel_check_page_flip(dev, pipe);
5b3a856b 1869
40da17c2 1870 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
1f7247c0 1871 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
5b3a856b 1872
40da17c2
DV
1873 if (de_iir & DE_PIPE_CRC_DONE(pipe))
1874 i9xx_pipe_crc_irq_handler(dev, pipe);
c008bc6e 1875
40da17c2
DV
1876 /* plane/pipes map 1:1 on ilk+ */
1877 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
1878 intel_prepare_page_flip(dev, pipe);
1879 intel_finish_page_flip_plane(dev, pipe);
1880 }
c008bc6e
PZ
1881 }
1882
1883 /* check event from PCH */
1884 if (de_iir & DE_PCH_EVENT) {
1885 u32 pch_iir = I915_READ(SDEIIR);
1886
1887 if (HAS_PCH_CPT(dev))
1888 cpt_irq_handler(dev, pch_iir);
1889 else
1890 ibx_irq_handler(dev, pch_iir);
1891
1892 /* should clear PCH hotplug event before clear CPU irq */
1893 I915_WRITE(SDEIIR, pch_iir);
1894 }
1895
1896 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
1897 ironlake_rps_change_irq_handler(dev);
1898}
1899
9719fb98
PZ
1900static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
1901{
1902 struct drm_i915_private *dev_priv = dev->dev_private;
07d27e20 1903 enum pipe pipe;
9719fb98
PZ
1904
1905 if (de_iir & DE_ERR_INT_IVB)
1906 ivb_err_int_handler(dev);
1907
1908 if (de_iir & DE_AUX_CHANNEL_A_IVB)
1909 dp_aux_irq_handler(dev);
1910
1911 if (de_iir & DE_GSE_IVB)
1912 intel_opregion_asle_intr(dev);
1913
055e393f 1914 for_each_pipe(dev_priv, pipe) {
d6bbafa1
CW
1915 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
1916 intel_pipe_handle_vblank(dev, pipe))
1917 intel_check_page_flip(dev, pipe);
40da17c2
DV
1918
1919 /* plane/pipes map 1:1 on ilk+ */
07d27e20
DL
1920 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
1921 intel_prepare_page_flip(dev, pipe);
1922 intel_finish_page_flip_plane(dev, pipe);
9719fb98
PZ
1923 }
1924 }
1925
1926 /* check event from PCH */
1927 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
1928 u32 pch_iir = I915_READ(SDEIIR);
1929
1930 cpt_irq_handler(dev, pch_iir);
1931
1932 /* clear PCH hotplug event before clear CPU irq */
1933 I915_WRITE(SDEIIR, pch_iir);
1934 }
1935}
1936
72c90f62
OM
1937/*
1938 * To handle irqs with the minimum potential races with fresh interrupts, we:
1939 * 1 - Disable Master Interrupt Control.
1940 * 2 - Find the source(s) of the interrupt.
1941 * 3 - Clear the Interrupt Identity bits (IIR).
1942 * 4 - Process the interrupt(s) that had bits set in the IIRs.
1943 * 5 - Re-enable Master Interrupt Control.
1944 */
f1af8fc1 1945static irqreturn_t ironlake_irq_handler(int irq, void *arg)
b1f14ad0 1946{
45a83f84 1947 struct drm_device *dev = arg;
2d1013dd 1948 struct drm_i915_private *dev_priv = dev->dev_private;
f1af8fc1 1949 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
0e43406b 1950 irqreturn_t ret = IRQ_NONE;
b1f14ad0 1951
2dd2a883
ID
1952 if (!intel_irqs_enabled(dev_priv))
1953 return IRQ_NONE;
1954
8664281b
PZ
1955 /* We get interrupts on unclaimed registers, so check for this before we
1956 * do any I915_{READ,WRITE}. */
907b28c5 1957 intel_uncore_check_errors(dev);
8664281b 1958
b1f14ad0
JB
1959 /* disable master interrupt before clearing iir */
1960 de_ier = I915_READ(DEIER);
1961 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
23a78516 1962 POSTING_READ(DEIER);
b1f14ad0 1963
44498aea
PZ
1964 /* Disable south interrupts. We'll only write to SDEIIR once, so further
1965 * interrupts will will be stored on its back queue, and then we'll be
1966 * able to process them after we restore SDEIER (as soon as we restore
1967 * it, we'll get an interrupt if SDEIIR still has something to process
1968 * due to its back queue). */
ab5c608b
BW
1969 if (!HAS_PCH_NOP(dev)) {
1970 sde_ier = I915_READ(SDEIER);
1971 I915_WRITE(SDEIER, 0);
1972 POSTING_READ(SDEIER);
1973 }
44498aea 1974
72c90f62
OM
1975 /* Find, clear, then process each source of interrupt */
1976
b1f14ad0 1977 gt_iir = I915_READ(GTIIR);
0e43406b 1978 if (gt_iir) {
72c90f62
OM
1979 I915_WRITE(GTIIR, gt_iir);
1980 ret = IRQ_HANDLED;
d8fc8a47 1981 if (INTEL_INFO(dev)->gen >= 6)
f1af8fc1 1982 snb_gt_irq_handler(dev, dev_priv, gt_iir);
d8fc8a47
PZ
1983 else
1984 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
b1f14ad0
JB
1985 }
1986
0e43406b
CW
1987 de_iir = I915_READ(DEIIR);
1988 if (de_iir) {
72c90f62
OM
1989 I915_WRITE(DEIIR, de_iir);
1990 ret = IRQ_HANDLED;
f1af8fc1
PZ
1991 if (INTEL_INFO(dev)->gen >= 7)
1992 ivb_display_irq_handler(dev, de_iir);
1993 else
1994 ilk_display_irq_handler(dev, de_iir);
b1f14ad0
JB
1995 }
1996
f1af8fc1
PZ
1997 if (INTEL_INFO(dev)->gen >= 6) {
1998 u32 pm_iir = I915_READ(GEN6_PMIIR);
1999 if (pm_iir) {
f1af8fc1
PZ
2000 I915_WRITE(GEN6_PMIIR, pm_iir);
2001 ret = IRQ_HANDLED;
72c90f62 2002 gen6_rps_irq_handler(dev_priv, pm_iir);
f1af8fc1 2003 }
0e43406b 2004 }
b1f14ad0 2005
b1f14ad0
JB
2006 I915_WRITE(DEIER, de_ier);
2007 POSTING_READ(DEIER);
ab5c608b
BW
2008 if (!HAS_PCH_NOP(dev)) {
2009 I915_WRITE(SDEIER, sde_ier);
2010 POSTING_READ(SDEIER);
2011 }
b1f14ad0
JB
2012
2013 return ret;
2014}
2015
d04a492d
SS
2016static void bxt_hpd_handler(struct drm_device *dev, uint32_t iir_status)
2017{
2018 struct drm_i915_private *dev_priv = dev->dev_private;
676574df
JN
2019 u32 hp_control, hp_trigger;
2020 u32 pin_mask, long_mask;
d04a492d
SS
2021
2022 /* Get the status */
2023 hp_trigger = iir_status & BXT_DE_PORT_HOTPLUG_MASK;
2024 hp_control = I915_READ(BXT_HOTPLUG_CTL);
2025
2026 /* Hotplug not enabled ? */
2027 if (!(hp_control & BXT_HOTPLUG_CTL_MASK)) {
2028 DRM_ERROR("Interrupt when HPD disabled\n");
2029 return;
2030 }
2031
d04a492d
SS
2032 /* Clear sticky bits in hpd status */
2033 I915_WRITE(BXT_HOTPLUG_CTL, hp_control);
d04a492d 2034
fd63e2a9 2035 intel_get_hpd_pins(&pin_mask, &long_mask, hp_trigger, hp_control,
63c88d22 2036 hpd_bxt, bxt_port_hotplug_long_detect);
676574df 2037 intel_hpd_irq_handler(dev, pin_mask, long_mask);
d04a492d
SS
2038}
2039
abd58f01
BW
2040static irqreturn_t gen8_irq_handler(int irq, void *arg)
2041{
2042 struct drm_device *dev = arg;
2043 struct drm_i915_private *dev_priv = dev->dev_private;
2044 u32 master_ctl;
2045 irqreturn_t ret = IRQ_NONE;
2046 uint32_t tmp = 0;
c42664cc 2047 enum pipe pipe;
88e04703
JB
2048 u32 aux_mask = GEN8_AUX_CHANNEL_A;
2049
2dd2a883
ID
2050 if (!intel_irqs_enabled(dev_priv))
2051 return IRQ_NONE;
2052
88e04703
JB
2053 if (IS_GEN9(dev))
2054 aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
2055 GEN9_AUX_CHANNEL_D;
abd58f01 2056
cb0d205e 2057 master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
abd58f01
BW
2058 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2059 if (!master_ctl)
2060 return IRQ_NONE;
2061
cb0d205e 2062 I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
abd58f01 2063
38cc46d7
OM
2064 /* Find, clear, then process each source of interrupt */
2065
74cdb337 2066 ret = gen8_gt_irq_handler(dev_priv, master_ctl);
abd58f01
BW
2067
2068 if (master_ctl & GEN8_DE_MISC_IRQ) {
2069 tmp = I915_READ(GEN8_DE_MISC_IIR);
abd58f01
BW
2070 if (tmp) {
2071 I915_WRITE(GEN8_DE_MISC_IIR, tmp);
2072 ret = IRQ_HANDLED;
38cc46d7
OM
2073 if (tmp & GEN8_DE_MISC_GSE)
2074 intel_opregion_asle_intr(dev);
2075 else
2076 DRM_ERROR("Unexpected DE Misc interrupt\n");
abd58f01 2077 }
38cc46d7
OM
2078 else
2079 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
abd58f01
BW
2080 }
2081
6d766f02
DV
2082 if (master_ctl & GEN8_DE_PORT_IRQ) {
2083 tmp = I915_READ(GEN8_DE_PORT_IIR);
6d766f02 2084 if (tmp) {
d04a492d
SS
2085 bool found = false;
2086
6d766f02
DV
2087 I915_WRITE(GEN8_DE_PORT_IIR, tmp);
2088 ret = IRQ_HANDLED;
88e04703 2089
d04a492d 2090 if (tmp & aux_mask) {
38cc46d7 2091 dp_aux_irq_handler(dev);
d04a492d
SS
2092 found = true;
2093 }
2094
2095 if (IS_BROXTON(dev) && tmp & BXT_DE_PORT_HOTPLUG_MASK) {
2096 bxt_hpd_handler(dev, tmp);
2097 found = true;
2098 }
2099
9e63743e
SS
2100 if (IS_BROXTON(dev) && (tmp & BXT_DE_PORT_GMBUS)) {
2101 gmbus_irq_handler(dev);
2102 found = true;
2103 }
2104
d04a492d 2105 if (!found)
38cc46d7 2106 DRM_ERROR("Unexpected DE Port interrupt\n");
6d766f02 2107 }
38cc46d7
OM
2108 else
2109 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
6d766f02
DV
2110 }
2111
055e393f 2112 for_each_pipe(dev_priv, pipe) {
770de83d 2113 uint32_t pipe_iir, flip_done = 0, fault_errors = 0;
abd58f01 2114
c42664cc
DV
2115 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2116 continue;
abd58f01 2117
c42664cc 2118 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
c42664cc
DV
2119 if (pipe_iir) {
2120 ret = IRQ_HANDLED;
2121 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
770de83d 2122
d6bbafa1
CW
2123 if (pipe_iir & GEN8_PIPE_VBLANK &&
2124 intel_pipe_handle_vblank(dev, pipe))
2125 intel_check_page_flip(dev, pipe);
38cc46d7 2126
770de83d
DL
2127 if (IS_GEN9(dev))
2128 flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
2129 else
2130 flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
2131
2132 if (flip_done) {
38cc46d7
OM
2133 intel_prepare_page_flip(dev, pipe);
2134 intel_finish_page_flip_plane(dev, pipe);
2135 }
2136
2137 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
2138 hsw_pipe_crc_irq_handler(dev, pipe);
2139
1f7247c0
DV
2140 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN)
2141 intel_cpu_fifo_underrun_irq_handler(dev_priv,
2142 pipe);
38cc46d7 2143
770de83d
DL
2144
2145 if (IS_GEN9(dev))
2146 fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2147 else
2148 fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2149
2150 if (fault_errors)
38cc46d7
OM
2151 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2152 pipe_name(pipe),
2153 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
c42664cc 2154 } else
abd58f01
BW
2155 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2156 }
2157
266ea3d9
SS
2158 if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) &&
2159 master_ctl & GEN8_DE_PCH_IRQ) {
92d03a80
DV
2160 /*
2161 * FIXME(BDW): Assume for now that the new interrupt handling
2162 * scheme also closed the SDE interrupt handling race we've seen
2163 * on older pch-split platforms. But this needs testing.
2164 */
2165 u32 pch_iir = I915_READ(SDEIIR);
92d03a80
DV
2166 if (pch_iir) {
2167 I915_WRITE(SDEIIR, pch_iir);
2168 ret = IRQ_HANDLED;
38cc46d7
OM
2169 cpt_irq_handler(dev, pch_iir);
2170 } else
2171 DRM_ERROR("The master control interrupt lied (SDE)!\n");
2172
92d03a80
DV
2173 }
2174
cb0d205e
CW
2175 I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2176 POSTING_READ_FW(GEN8_MASTER_IRQ);
abd58f01
BW
2177
2178 return ret;
2179}
2180
17e1df07
DV
2181static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2182 bool reset_completed)
2183{
a4872ba6 2184 struct intel_engine_cs *ring;
17e1df07
DV
2185 int i;
2186
2187 /*
2188 * Notify all waiters for GPU completion events that reset state has
2189 * been changed, and that they need to restart their wait after
2190 * checking for potential errors (and bail out to drop locks if there is
2191 * a gpu reset pending so that i915_error_work_func can acquire them).
2192 */
2193
2194 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2195 for_each_ring(ring, dev_priv, i)
2196 wake_up_all(&ring->irq_queue);
2197
2198 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2199 wake_up_all(&dev_priv->pending_flip_queue);
2200
2201 /*
2202 * Signal tasks blocked in i915_gem_wait_for_error that the pending
2203 * reset state is cleared.
2204 */
2205 if (reset_completed)
2206 wake_up_all(&dev_priv->gpu_error.reset_queue);
2207}
2208
8a905236 2209/**
b8d24a06 2210 * i915_reset_and_wakeup - do process context error handling work
8a905236
JB
2211 *
2212 * Fire an error uevent so userspace can see that a hang or error
2213 * was detected.
2214 */
b8d24a06 2215static void i915_reset_and_wakeup(struct drm_device *dev)
8a905236 2216{
b8d24a06
MK
2217 struct drm_i915_private *dev_priv = to_i915(dev);
2218 struct i915_gpu_error *error = &dev_priv->gpu_error;
cce723ed
BW
2219 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2220 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2221 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
17e1df07 2222 int ret;
8a905236 2223
5bdebb18 2224 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
f316a42c 2225
7db0ba24
DV
2226 /*
2227 * Note that there's only one work item which does gpu resets, so we
2228 * need not worry about concurrent gpu resets potentially incrementing
2229 * error->reset_counter twice. We only need to take care of another
2230 * racing irq/hangcheck declaring the gpu dead for a second time. A
2231 * quick check for that is good enough: schedule_work ensures the
2232 * correct ordering between hang detection and this work item, and since
2233 * the reset in-progress bit is only ever set by code outside of this
2234 * work we don't need to worry about any other races.
2235 */
2236 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
f803aa55 2237 DRM_DEBUG_DRIVER("resetting chip\n");
5bdebb18 2238 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
7db0ba24 2239 reset_event);
1f83fee0 2240
f454c694
ID
2241 /*
2242 * In most cases it's guaranteed that we get here with an RPM
2243 * reference held, for example because there is a pending GPU
2244 * request that won't finish until the reset is done. This
2245 * isn't the case at least when we get here by doing a
2246 * simulated reset via debugs, so get an RPM reference.
2247 */
2248 intel_runtime_pm_get(dev_priv);
7514747d
VS
2249
2250 intel_prepare_reset(dev);
2251
17e1df07
DV
2252 /*
2253 * All state reset _must_ be completed before we update the
2254 * reset counter, for otherwise waiters might miss the reset
2255 * pending state and not properly drop locks, resulting in
2256 * deadlocks with the reset work.
2257 */
f69061be
DV
2258 ret = i915_reset(dev);
2259
7514747d 2260 intel_finish_reset(dev);
17e1df07 2261
f454c694
ID
2262 intel_runtime_pm_put(dev_priv);
2263
f69061be
DV
2264 if (ret == 0) {
2265 /*
2266 * After all the gem state is reset, increment the reset
2267 * counter and wake up everyone waiting for the reset to
2268 * complete.
2269 *
2270 * Since unlock operations are a one-sided barrier only,
2271 * we need to insert a barrier here to order any seqno
2272 * updates before
2273 * the counter increment.
2274 */
4e857c58 2275 smp_mb__before_atomic();
f69061be
DV
2276 atomic_inc(&dev_priv->gpu_error.reset_counter);
2277
5bdebb18 2278 kobject_uevent_env(&dev->primary->kdev->kobj,
f69061be 2279 KOBJ_CHANGE, reset_done_event);
1f83fee0 2280 } else {
805de8f4 2281 atomic_or(I915_WEDGED, &error->reset_counter);
f316a42c 2282 }
1f83fee0 2283
17e1df07
DV
2284 /*
2285 * Note: The wake_up also serves as a memory barrier so that
2286 * waiters see the update value of the reset counter atomic_t.
2287 */
2288 i915_error_wake_up(dev_priv, true);
f316a42c 2289 }
8a905236
JB
2290}
2291
35aed2e6 2292static void i915_report_and_clear_eir(struct drm_device *dev)
8a905236
JB
2293{
2294 struct drm_i915_private *dev_priv = dev->dev_private;
bd9854f9 2295 uint32_t instdone[I915_NUM_INSTDONE_REG];
8a905236 2296 u32 eir = I915_READ(EIR);
050ee91f 2297 int pipe, i;
8a905236 2298
35aed2e6
CW
2299 if (!eir)
2300 return;
8a905236 2301
a70491cc 2302 pr_err("render error detected, EIR: 0x%08x\n", eir);
8a905236 2303
bd9854f9
BW
2304 i915_get_extra_instdone(dev, instdone);
2305
8a905236
JB
2306 if (IS_G4X(dev)) {
2307 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2308 u32 ipeir = I915_READ(IPEIR_I965);
2309
a70491cc
JP
2310 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2311 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
050ee91f
BW
2312 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2313 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
a70491cc 2314 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
a70491cc 2315 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
8a905236 2316 I915_WRITE(IPEIR_I965, ipeir);
3143a2bf 2317 POSTING_READ(IPEIR_I965);
8a905236
JB
2318 }
2319 if (eir & GM45_ERROR_PAGE_TABLE) {
2320 u32 pgtbl_err = I915_READ(PGTBL_ER);
a70491cc
JP
2321 pr_err("page table error\n");
2322 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
8a905236 2323 I915_WRITE(PGTBL_ER, pgtbl_err);
3143a2bf 2324 POSTING_READ(PGTBL_ER);
8a905236
JB
2325 }
2326 }
2327
a6c45cf0 2328 if (!IS_GEN2(dev)) {
8a905236
JB
2329 if (eir & I915_ERROR_PAGE_TABLE) {
2330 u32 pgtbl_err = I915_READ(PGTBL_ER);
a70491cc
JP
2331 pr_err("page table error\n");
2332 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
8a905236 2333 I915_WRITE(PGTBL_ER, pgtbl_err);
3143a2bf 2334 POSTING_READ(PGTBL_ER);
8a905236
JB
2335 }
2336 }
2337
2338 if (eir & I915_ERROR_MEMORY_REFRESH) {
a70491cc 2339 pr_err("memory refresh error:\n");
055e393f 2340 for_each_pipe(dev_priv, pipe)
a70491cc 2341 pr_err("pipe %c stat: 0x%08x\n",
9db4a9c7 2342 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
8a905236
JB
2343 /* pipestat has already been acked */
2344 }
2345 if (eir & I915_ERROR_INSTRUCTION) {
a70491cc
JP
2346 pr_err("instruction error\n");
2347 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
050ee91f
BW
2348 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2349 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
a6c45cf0 2350 if (INTEL_INFO(dev)->gen < 4) {
8a905236
JB
2351 u32 ipeir = I915_READ(IPEIR);
2352
a70491cc
JP
2353 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
2354 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
a70491cc 2355 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
8a905236 2356 I915_WRITE(IPEIR, ipeir);
3143a2bf 2357 POSTING_READ(IPEIR);
8a905236
JB
2358 } else {
2359 u32 ipeir = I915_READ(IPEIR_I965);
2360
a70491cc
JP
2361 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2362 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
a70491cc 2363 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
a70491cc 2364 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
8a905236 2365 I915_WRITE(IPEIR_I965, ipeir);
3143a2bf 2366 POSTING_READ(IPEIR_I965);
8a905236
JB
2367 }
2368 }
2369
2370 I915_WRITE(EIR, eir);
3143a2bf 2371 POSTING_READ(EIR);
8a905236
JB
2372 eir = I915_READ(EIR);
2373 if (eir) {
2374 /*
2375 * some errors might have become stuck,
2376 * mask them.
2377 */
2378 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2379 I915_WRITE(EMR, I915_READ(EMR) | eir);
2380 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2381 }
35aed2e6
CW
2382}
2383
2384/**
b8d24a06 2385 * i915_handle_error - handle a gpu error
35aed2e6
CW
2386 * @dev: drm device
2387 *
b8d24a06 2388 * Do some basic checking of regsiter state at error time and
35aed2e6
CW
2389 * dump it to the syslog. Also call i915_capture_error_state() to make
2390 * sure we get a record and make it available in debugfs. Fire a uevent
2391 * so userspace knows something bad happened (should trigger collection
2392 * of a ring dump etc.).
2393 */
58174462
MK
2394void i915_handle_error(struct drm_device *dev, bool wedged,
2395 const char *fmt, ...)
35aed2e6
CW
2396{
2397 struct drm_i915_private *dev_priv = dev->dev_private;
58174462
MK
2398 va_list args;
2399 char error_msg[80];
35aed2e6 2400
58174462
MK
2401 va_start(args, fmt);
2402 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2403 va_end(args);
2404
2405 i915_capture_error_state(dev, wedged, error_msg);
35aed2e6 2406 i915_report_and_clear_eir(dev);
8a905236 2407
ba1234d1 2408 if (wedged) {
805de8f4 2409 atomic_or(I915_RESET_IN_PROGRESS_FLAG,
f69061be 2410 &dev_priv->gpu_error.reset_counter);
ba1234d1 2411
11ed50ec 2412 /*
b8d24a06
MK
2413 * Wakeup waiting processes so that the reset function
2414 * i915_reset_and_wakeup doesn't deadlock trying to grab
2415 * various locks. By bumping the reset counter first, the woken
17e1df07
DV
2416 * processes will see a reset in progress and back off,
2417 * releasing their locks and then wait for the reset completion.
2418 * We must do this for _all_ gpu waiters that might hold locks
2419 * that the reset work needs to acquire.
2420 *
2421 * Note: The wake_up serves as the required memory barrier to
2422 * ensure that the waiters see the updated value of the reset
2423 * counter atomic_t.
11ed50ec 2424 */
17e1df07 2425 i915_error_wake_up(dev_priv, false);
11ed50ec
BG
2426 }
2427
b8d24a06 2428 i915_reset_and_wakeup(dev);
8a905236
JB
2429}
2430
42f52ef8
KP
2431/* Called from drm generic code, passed 'crtc' which
2432 * we use as a pipe index
2433 */
f71d4af4 2434static int i915_enable_vblank(struct drm_device *dev, int pipe)
0a3e67a4 2435{
2d1013dd 2436 struct drm_i915_private *dev_priv = dev->dev_private;
e9d21d7f 2437 unsigned long irqflags;
71e0ffa5 2438
1ec14ad3 2439 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
f796cf8f 2440 if (INTEL_INFO(dev)->gen >= 4)
7c463586 2441 i915_enable_pipestat(dev_priv, pipe,
755e9019 2442 PIPE_START_VBLANK_INTERRUPT_STATUS);
e9d21d7f 2443 else
7c463586 2444 i915_enable_pipestat(dev_priv, pipe,
755e9019 2445 PIPE_VBLANK_INTERRUPT_STATUS);
1ec14ad3 2446 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
8692d00e 2447
0a3e67a4
JB
2448 return 0;
2449}
2450
f71d4af4 2451static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
f796cf8f 2452{
2d1013dd 2453 struct drm_i915_private *dev_priv = dev->dev_private;
f796cf8f 2454 unsigned long irqflags;
b518421f 2455 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
40da17c2 2456 DE_PIPE_VBLANK(pipe);
f796cf8f 2457
f796cf8f 2458 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
b518421f 2459 ironlake_enable_display_irq(dev_priv, bit);
b1f14ad0
JB
2460 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2461
2462 return 0;
2463}
2464
7e231dbe
JB
2465static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2466{
2d1013dd 2467 struct drm_i915_private *dev_priv = dev->dev_private;
7e231dbe 2468 unsigned long irqflags;
7e231dbe 2469
7e231dbe 2470 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
31acc7f5 2471 i915_enable_pipestat(dev_priv, pipe,
755e9019 2472 PIPE_START_VBLANK_INTERRUPT_STATUS);
7e231dbe
JB
2473 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2474
2475 return 0;
2476}
2477
abd58f01
BW
2478static int gen8_enable_vblank(struct drm_device *dev, int pipe)
2479{
2480 struct drm_i915_private *dev_priv = dev->dev_private;
2481 unsigned long irqflags;
abd58f01 2482
abd58f01 2483 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
7167d7c6
DV
2484 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
2485 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2486 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
abd58f01
BW
2487 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2488 return 0;
2489}
2490
42f52ef8
KP
2491/* Called from drm generic code, passed 'crtc' which
2492 * we use as a pipe index
2493 */
f71d4af4 2494static void i915_disable_vblank(struct drm_device *dev, int pipe)
0a3e67a4 2495{
2d1013dd 2496 struct drm_i915_private *dev_priv = dev->dev_private;
e9d21d7f 2497 unsigned long irqflags;
0a3e67a4 2498
1ec14ad3 2499 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
f796cf8f 2500 i915_disable_pipestat(dev_priv, pipe,
755e9019
ID
2501 PIPE_VBLANK_INTERRUPT_STATUS |
2502 PIPE_START_VBLANK_INTERRUPT_STATUS);
f796cf8f
JB
2503 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2504}
2505
f71d4af4 2506static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
f796cf8f 2507{
2d1013dd 2508 struct drm_i915_private *dev_priv = dev->dev_private;
f796cf8f 2509 unsigned long irqflags;
b518421f 2510 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
40da17c2 2511 DE_PIPE_VBLANK(pipe);
f796cf8f
JB
2512
2513 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
b518421f 2514 ironlake_disable_display_irq(dev_priv, bit);
b1f14ad0
JB
2515 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2516}
2517
7e231dbe
JB
2518static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
2519{
2d1013dd 2520 struct drm_i915_private *dev_priv = dev->dev_private;
7e231dbe 2521 unsigned long irqflags;
7e231dbe
JB
2522
2523 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
31acc7f5 2524 i915_disable_pipestat(dev_priv, pipe,
755e9019 2525 PIPE_START_VBLANK_INTERRUPT_STATUS);
7e231dbe
JB
2526 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2527}
2528
abd58f01
BW
2529static void gen8_disable_vblank(struct drm_device *dev, int pipe)
2530{
2531 struct drm_i915_private *dev_priv = dev->dev_private;
2532 unsigned long irqflags;
abd58f01 2533
abd58f01 2534 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
7167d7c6
DV
2535 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
2536 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2537 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
abd58f01
BW
2538 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2539}
2540
9107e9d2 2541static bool
94f7bbe1 2542ring_idle(struct intel_engine_cs *ring, u32 seqno)
9107e9d2
CW
2543{
2544 return (list_empty(&ring->request_list) ||
94f7bbe1 2545 i915_seqno_passed(seqno, ring->last_submitted_seqno));
f65d9421
BG
2546}
2547
a028c4b0
DV
2548static bool
2549ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
2550{
2551 if (INTEL_INFO(dev)->gen >= 8) {
a6cdb93a 2552 return (ipehr >> 23) == 0x1c;
a028c4b0
DV
2553 } else {
2554 ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
2555 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
2556 MI_SEMAPHORE_REGISTER);
2557 }
2558}
2559
a4872ba6 2560static struct intel_engine_cs *
a6cdb93a 2561semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
921d42ea
DV
2562{
2563 struct drm_i915_private *dev_priv = ring->dev->dev_private;
a4872ba6 2564 struct intel_engine_cs *signaller;
921d42ea
DV
2565 int i;
2566
2567 if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
a6cdb93a
RV
2568 for_each_ring(signaller, dev_priv, i) {
2569 if (ring == signaller)
2570 continue;
2571
2572 if (offset == signaller->semaphore.signal_ggtt[ring->id])
2573 return signaller;
2574 }
921d42ea
DV
2575 } else {
2576 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
2577
2578 for_each_ring(signaller, dev_priv, i) {
2579 if(ring == signaller)
2580 continue;
2581
ebc348b2 2582 if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
921d42ea
DV
2583 return signaller;
2584 }
2585 }
2586
a6cdb93a
RV
2587 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
2588 ring->id, ipehr, offset);
921d42ea
DV
2589
2590 return NULL;
2591}
2592
a4872ba6
OM
2593static struct intel_engine_cs *
2594semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
a24a11e6
CW
2595{
2596 struct drm_i915_private *dev_priv = ring->dev->dev_private;
88fe429d 2597 u32 cmd, ipehr, head;
a6cdb93a
RV
2598 u64 offset = 0;
2599 int i, backwards;
a24a11e6
CW
2600
2601 ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
a028c4b0 2602 if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
6274f212 2603 return NULL;
a24a11e6 2604
88fe429d
DV
2605 /*
2606 * HEAD is likely pointing to the dword after the actual command,
2607 * so scan backwards until we find the MBOX. But limit it to just 3
a6cdb93a
RV
2608 * or 4 dwords depending on the semaphore wait command size.
2609 * Note that we don't care about ACTHD here since that might
88fe429d
DV
2610 * point at at batch, and semaphores are always emitted into the
2611 * ringbuffer itself.
a24a11e6 2612 */
88fe429d 2613 head = I915_READ_HEAD(ring) & HEAD_ADDR;
a6cdb93a 2614 backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
88fe429d 2615
a6cdb93a 2616 for (i = backwards; i; --i) {
88fe429d
DV
2617 /*
2618 * Be paranoid and presume the hw has gone off into the wild -
2619 * our ring is smaller than what the hardware (and hence
2620 * HEAD_ADDR) allows. Also handles wrap-around.
2621 */
ee1b1e5e 2622 head &= ring->buffer->size - 1;
88fe429d
DV
2623
2624 /* This here seems to blow up */
ee1b1e5e 2625 cmd = ioread32(ring->buffer->virtual_start + head);
a24a11e6
CW
2626 if (cmd == ipehr)
2627 break;
2628
88fe429d
DV
2629 head -= 4;
2630 }
a24a11e6 2631
88fe429d
DV
2632 if (!i)
2633 return NULL;
a24a11e6 2634
ee1b1e5e 2635 *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
a6cdb93a
RV
2636 if (INTEL_INFO(ring->dev)->gen >= 8) {
2637 offset = ioread32(ring->buffer->virtual_start + head + 12);
2638 offset <<= 32;
2639 offset = ioread32(ring->buffer->virtual_start + head + 8);
2640 }
2641 return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
a24a11e6
CW
2642}
2643
a4872ba6 2644static int semaphore_passed(struct intel_engine_cs *ring)
6274f212
CW
2645{
2646 struct drm_i915_private *dev_priv = ring->dev->dev_private;
a4872ba6 2647 struct intel_engine_cs *signaller;
a0d036b0 2648 u32 seqno;
6274f212 2649
4be17381 2650 ring->hangcheck.deadlock++;
6274f212
CW
2651
2652 signaller = semaphore_waits_for(ring, &seqno);
4be17381
CW
2653 if (signaller == NULL)
2654 return -1;
2655
2656 /* Prevent pathological recursion due to driver bugs */
2657 if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
6274f212
CW
2658 return -1;
2659
4be17381
CW
2660 if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
2661 return 1;
2662
a0d036b0
CW
2663 /* cursory check for an unkickable deadlock */
2664 if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
2665 semaphore_passed(signaller) < 0)
4be17381
CW
2666 return -1;
2667
2668 return 0;
6274f212
CW
2669}
2670
2671static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2672{
a4872ba6 2673 struct intel_engine_cs *ring;
6274f212
CW
2674 int i;
2675
2676 for_each_ring(ring, dev_priv, i)
4be17381 2677 ring->hangcheck.deadlock = 0;
6274f212
CW
2678}
2679
ad8beaea 2680static enum intel_ring_hangcheck_action
a4872ba6 2681ring_stuck(struct intel_engine_cs *ring, u64 acthd)
1ec14ad3
CW
2682{
2683 struct drm_device *dev = ring->dev;
2684 struct drm_i915_private *dev_priv = dev->dev_private;
9107e9d2
CW
2685 u32 tmp;
2686
f260fe7b
MK
2687 if (acthd != ring->hangcheck.acthd) {
2688 if (acthd > ring->hangcheck.max_acthd) {
2689 ring->hangcheck.max_acthd = acthd;
2690 return HANGCHECK_ACTIVE;
2691 }
2692
2693 return HANGCHECK_ACTIVE_LOOP;
2694 }
6274f212 2695
9107e9d2 2696 if (IS_GEN2(dev))
f2f4d82f 2697 return HANGCHECK_HUNG;
9107e9d2
CW
2698
2699 /* Is the chip hanging on a WAIT_FOR_EVENT?
2700 * If so we can simply poke the RB_WAIT bit
2701 * and break the hang. This should work on
2702 * all but the second generation chipsets.
2703 */
2704 tmp = I915_READ_CTL(ring);
1ec14ad3 2705 if (tmp & RING_WAIT) {
58174462
MK
2706 i915_handle_error(dev, false,
2707 "Kicking stuck wait on %s",
2708 ring->name);
1ec14ad3 2709 I915_WRITE_CTL(ring, tmp);
f2f4d82f 2710 return HANGCHECK_KICK;
6274f212
CW
2711 }
2712
2713 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
2714 switch (semaphore_passed(ring)) {
2715 default:
f2f4d82f 2716 return HANGCHECK_HUNG;
6274f212 2717 case 1:
58174462
MK
2718 i915_handle_error(dev, false,
2719 "Kicking stuck semaphore on %s",
2720 ring->name);
6274f212 2721 I915_WRITE_CTL(ring, tmp);
f2f4d82f 2722 return HANGCHECK_KICK;
6274f212 2723 case 0:
f2f4d82f 2724 return HANGCHECK_WAIT;
6274f212 2725 }
9107e9d2 2726 }
ed5cbb03 2727
f2f4d82f 2728 return HANGCHECK_HUNG;
ed5cbb03
MK
2729}
2730
737b1506 2731/*
f65d9421 2732 * This is called when the chip hasn't reported back with completed
05407ff8
MK
2733 * batchbuffers in a long time. We keep track per ring seqno progress and
2734 * if there are no progress, hangcheck score for that ring is increased.
2735 * Further, acthd is inspected to see if the ring is stuck. On stuck case
2736 * we kick the ring. If we see no progress on three subsequent calls
2737 * we assume chip is wedged and try to fix it by resetting the chip.
f65d9421 2738 */
737b1506 2739static void i915_hangcheck_elapsed(struct work_struct *work)
f65d9421 2740{
737b1506
CW
2741 struct drm_i915_private *dev_priv =
2742 container_of(work, typeof(*dev_priv),
2743 gpu_error.hangcheck_work.work);
2744 struct drm_device *dev = dev_priv->dev;
a4872ba6 2745 struct intel_engine_cs *ring;
b4519513 2746 int i;
05407ff8 2747 int busy_count = 0, rings_hung = 0;
9107e9d2
CW
2748 bool stuck[I915_NUM_RINGS] = { 0 };
2749#define BUSY 1
2750#define KICK 5
2751#define HUNG 20
893eead0 2752
d330a953 2753 if (!i915.enable_hangcheck)
3e0dc6b0
BW
2754 return;
2755
b4519513 2756 for_each_ring(ring, dev_priv, i) {
50877445
CW
2757 u64 acthd;
2758 u32 seqno;
9107e9d2 2759 bool busy = true;
05407ff8 2760
6274f212
CW
2761 semaphore_clear_deadlocks(dev_priv);
2762
05407ff8
MK
2763 seqno = ring->get_seqno(ring, false);
2764 acthd = intel_ring_get_active_head(ring);
b4519513 2765
9107e9d2 2766 if (ring->hangcheck.seqno == seqno) {
94f7bbe1 2767 if (ring_idle(ring, seqno)) {
da661464
MK
2768 ring->hangcheck.action = HANGCHECK_IDLE;
2769
9107e9d2
CW
2770 if (waitqueue_active(&ring->irq_queue)) {
2771 /* Issue a wake-up to catch stuck h/w. */
094f9a54 2772 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
f4adcd24
DV
2773 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
2774 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2775 ring->name);
2776 else
2777 DRM_INFO("Fake missed irq on %s\n",
2778 ring->name);
094f9a54
CW
2779 wake_up_all(&ring->irq_queue);
2780 }
2781 /* Safeguard against driver failure */
2782 ring->hangcheck.score += BUSY;
9107e9d2
CW
2783 } else
2784 busy = false;
05407ff8 2785 } else {
6274f212
CW
2786 /* We always increment the hangcheck score
2787 * if the ring is busy and still processing
2788 * the same request, so that no single request
2789 * can run indefinitely (such as a chain of
2790 * batches). The only time we do not increment
2791 * the hangcheck score on this ring, if this
2792 * ring is in a legitimate wait for another
2793 * ring. In that case the waiting ring is a
2794 * victim and we want to be sure we catch the
2795 * right culprit. Then every time we do kick
2796 * the ring, add a small increment to the
2797 * score so that we can catch a batch that is
2798 * being repeatedly kicked and so responsible
2799 * for stalling the machine.
2800 */
ad8beaea
MK
2801 ring->hangcheck.action = ring_stuck(ring,
2802 acthd);
2803
2804 switch (ring->hangcheck.action) {
da661464 2805 case HANGCHECK_IDLE:
f2f4d82f 2806 case HANGCHECK_WAIT:
f2f4d82f 2807 case HANGCHECK_ACTIVE:
f260fe7b
MK
2808 break;
2809 case HANGCHECK_ACTIVE_LOOP:
ea04cb31 2810 ring->hangcheck.score += BUSY;
6274f212 2811 break;
f2f4d82f 2812 case HANGCHECK_KICK:
ea04cb31 2813 ring->hangcheck.score += KICK;
6274f212 2814 break;
f2f4d82f 2815 case HANGCHECK_HUNG:
ea04cb31 2816 ring->hangcheck.score += HUNG;
6274f212
CW
2817 stuck[i] = true;
2818 break;
2819 }
05407ff8 2820 }
9107e9d2 2821 } else {
da661464
MK
2822 ring->hangcheck.action = HANGCHECK_ACTIVE;
2823
9107e9d2
CW
2824 /* Gradually reduce the count so that we catch DoS
2825 * attempts across multiple batches.
2826 */
2827 if (ring->hangcheck.score > 0)
2828 ring->hangcheck.score--;
f260fe7b
MK
2829
2830 ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
d1e61e7f
CW
2831 }
2832
05407ff8
MK
2833 ring->hangcheck.seqno = seqno;
2834 ring->hangcheck.acthd = acthd;
9107e9d2 2835 busy_count += busy;
893eead0 2836 }
b9201c14 2837
92cab734 2838 for_each_ring(ring, dev_priv, i) {
b6b0fac0 2839 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
b8d88d1d
DV
2840 DRM_INFO("%s on %s\n",
2841 stuck[i] ? "stuck" : "no progress",
2842 ring->name);
a43adf07 2843 rings_hung++;
92cab734
MK
2844 }
2845 }
2846
05407ff8 2847 if (rings_hung)
58174462 2848 return i915_handle_error(dev, true, "Ring hung");
f65d9421 2849
05407ff8
MK
2850 if (busy_count)
2851 /* Reset timer case chip hangs without another request
2852 * being added */
10cd45b6
MK
2853 i915_queue_hangcheck(dev);
2854}
2855
2856void i915_queue_hangcheck(struct drm_device *dev)
2857{
737b1506 2858 struct i915_gpu_error *e = &to_i915(dev)->gpu_error;
672e7b7c 2859
d330a953 2860 if (!i915.enable_hangcheck)
10cd45b6
MK
2861 return;
2862
737b1506
CW
2863 /* Don't continually defer the hangcheck so that it is always run at
2864 * least once after work has been scheduled on any ring. Otherwise,
2865 * we will ignore a hung ring if a second ring is kept busy.
2866 */
2867
2868 queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work,
2869 round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES));
f65d9421
BG
2870}
2871
1c69eb42 2872static void ibx_irq_reset(struct drm_device *dev)
91738a95
PZ
2873{
2874 struct drm_i915_private *dev_priv = dev->dev_private;
2875
2876 if (HAS_PCH_NOP(dev))
2877 return;
2878
f86f3fb0 2879 GEN5_IRQ_RESET(SDE);
105b122e
PZ
2880
2881 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
2882 I915_WRITE(SERR_INT, 0xffffffff);
622364b6 2883}
105b122e 2884
622364b6
PZ
2885/*
2886 * SDEIER is also touched by the interrupt handler to work around missed PCH
2887 * interrupts. Hence we can't update it after the interrupt handler is enabled -
2888 * instead we unconditionally enable all PCH interrupt sources here, but then
2889 * only unmask them as needed with SDEIMR.
2890 *
2891 * This function needs to be called before interrupts are enabled.
2892 */
2893static void ibx_irq_pre_postinstall(struct drm_device *dev)
2894{
2895 struct drm_i915_private *dev_priv = dev->dev_private;
2896
2897 if (HAS_PCH_NOP(dev))
2898 return;
2899
2900 WARN_ON(I915_READ(SDEIER) != 0);
91738a95
PZ
2901 I915_WRITE(SDEIER, 0xffffffff);
2902 POSTING_READ(SDEIER);
2903}
2904
7c4d664e 2905static void gen5_gt_irq_reset(struct drm_device *dev)
d18ea1b5
DV
2906{
2907 struct drm_i915_private *dev_priv = dev->dev_private;
2908
f86f3fb0 2909 GEN5_IRQ_RESET(GT);
a9d356a6 2910 if (INTEL_INFO(dev)->gen >= 6)
f86f3fb0 2911 GEN5_IRQ_RESET(GEN6_PM);
d18ea1b5
DV
2912}
2913
1da177e4
LT
2914/* drm_dma.h hooks
2915*/
be30b29f 2916static void ironlake_irq_reset(struct drm_device *dev)
036a4a7d 2917{
2d1013dd 2918 struct drm_i915_private *dev_priv = dev->dev_private;
036a4a7d 2919
0c841212 2920 I915_WRITE(HWSTAM, 0xffffffff);
bdfcdb63 2921
f86f3fb0 2922 GEN5_IRQ_RESET(DE);
c6d954c1
PZ
2923 if (IS_GEN7(dev))
2924 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
036a4a7d 2925
7c4d664e 2926 gen5_gt_irq_reset(dev);
c650156a 2927
1c69eb42 2928 ibx_irq_reset(dev);
7d99163d 2929}
c650156a 2930
70591a41
VS
2931static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
2932{
2933 enum pipe pipe;
2934
2935 I915_WRITE(PORT_HOTPLUG_EN, 0);
2936 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2937
2938 for_each_pipe(dev_priv, pipe)
2939 I915_WRITE(PIPESTAT(pipe), 0xffff);
2940
2941 GEN5_IRQ_RESET(VLV_);
2942}
2943
7e231dbe
JB
2944static void valleyview_irq_preinstall(struct drm_device *dev)
2945{
2d1013dd 2946 struct drm_i915_private *dev_priv = dev->dev_private;
7e231dbe 2947
7e231dbe
JB
2948 /* VLV magic */
2949 I915_WRITE(VLV_IMR, 0);
2950 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
2951 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
2952 I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
2953
7c4d664e 2954 gen5_gt_irq_reset(dev);
7e231dbe 2955
7c4cde39 2956 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
7e231dbe 2957
70591a41 2958 vlv_display_irq_reset(dev_priv);
7e231dbe
JB
2959}
2960
d6e3cca3
DV
2961static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
2962{
2963 GEN8_IRQ_RESET_NDX(GT, 0);
2964 GEN8_IRQ_RESET_NDX(GT, 1);
2965 GEN8_IRQ_RESET_NDX(GT, 2);
2966 GEN8_IRQ_RESET_NDX(GT, 3);
2967}
2968
823f6b38 2969static void gen8_irq_reset(struct drm_device *dev)
abd58f01
BW
2970{
2971 struct drm_i915_private *dev_priv = dev->dev_private;
2972 int pipe;
2973
abd58f01
BW
2974 I915_WRITE(GEN8_MASTER_IRQ, 0);
2975 POSTING_READ(GEN8_MASTER_IRQ);
2976
d6e3cca3 2977 gen8_gt_irq_reset(dev_priv);
abd58f01 2978
055e393f 2979 for_each_pipe(dev_priv, pipe)
f458ebbc
DV
2980 if (intel_display_power_is_enabled(dev_priv,
2981 POWER_DOMAIN_PIPE(pipe)))
813bde43 2982 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
abd58f01 2983
f86f3fb0
PZ
2984 GEN5_IRQ_RESET(GEN8_DE_PORT_);
2985 GEN5_IRQ_RESET(GEN8_DE_MISC_);
2986 GEN5_IRQ_RESET(GEN8_PCU_);
abd58f01 2987
266ea3d9
SS
2988 if (HAS_PCH_SPLIT(dev))
2989 ibx_irq_reset(dev);
abd58f01 2990}
09f2344d 2991
4c6c03be
DL
2992void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
2993 unsigned int pipe_mask)
d49bdb0e 2994{
1180e206 2995 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
d49bdb0e 2996
13321786 2997 spin_lock_irq(&dev_priv->irq_lock);
d14c0343
DL
2998 if (pipe_mask & 1 << PIPE_A)
2999 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_A,
3000 dev_priv->de_irq_mask[PIPE_A],
3001 ~dev_priv->de_irq_mask[PIPE_A] | extra_ier);
4c6c03be
DL
3002 if (pipe_mask & 1 << PIPE_B)
3003 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B,
3004 dev_priv->de_irq_mask[PIPE_B],
3005 ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
3006 if (pipe_mask & 1 << PIPE_C)
3007 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C,
3008 dev_priv->de_irq_mask[PIPE_C],
3009 ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
13321786 3010 spin_unlock_irq(&dev_priv->irq_lock);
d49bdb0e
PZ
3011}
3012
43f328d7
VS
3013static void cherryview_irq_preinstall(struct drm_device *dev)
3014{
3015 struct drm_i915_private *dev_priv = dev->dev_private;
43f328d7
VS
3016
3017 I915_WRITE(GEN8_MASTER_IRQ, 0);
3018 POSTING_READ(GEN8_MASTER_IRQ);
3019
d6e3cca3 3020 gen8_gt_irq_reset(dev_priv);
43f328d7
VS
3021
3022 GEN5_IRQ_RESET(GEN8_PCU_);
3023
43f328d7
VS
3024 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3025
70591a41 3026 vlv_display_irq_reset(dev_priv);
43f328d7
VS
3027}
3028
82a28bcf 3029static void ibx_hpd_irq_setup(struct drm_device *dev)
7fe0b973 3030{
2d1013dd 3031 struct drm_i915_private *dev_priv = dev->dev_private;
82a28bcf 3032 struct intel_encoder *intel_encoder;
fee884ed 3033 u32 hotplug_irqs, hotplug, enabled_irqs = 0;
82a28bcf
DV
3034
3035 if (HAS_PCH_IBX(dev)) {
fee884ed 3036 hotplug_irqs = SDE_HOTPLUG_MASK;
b2784e15 3037 for_each_intel_encoder(dev, intel_encoder)
5fcece80 3038 if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
fee884ed 3039 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
26951caf
XZ
3040 } else if (HAS_PCH_SPT(dev)) {
3041 hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
3042 for_each_intel_encoder(dev, intel_encoder)
3043 if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
3044 enabled_irqs |= hpd_spt[intel_encoder->hpd_pin];
82a28bcf 3045 } else {
fee884ed 3046 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
b2784e15 3047 for_each_intel_encoder(dev, intel_encoder)
5fcece80 3048 if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
fee884ed 3049 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
82a28bcf 3050 }
7fe0b973 3051
fee884ed 3052 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
82a28bcf
DV
3053
3054 /*
3055 * Enable digital hotplug on the PCH, and configure the DP short pulse
3056 * duration to 2ms (which is the minimum in the Display Port spec)
3057 *
3058 * This register is the same on all known PCH chips.
3059 */
7fe0b973
KP
3060 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3061 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3062 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3063 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3064 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3065 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
26951caf
XZ
3066
3067 /* enable SPT PORTE hot plug */
3068 if (HAS_PCH_SPT(dev)) {
3069 hotplug = I915_READ(PCH_PORT_HOTPLUG2);
3070 hotplug |= PORTE_HOTPLUG_ENABLE;
3071 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3072 }
7fe0b973
KP
3073}
3074
e0a20ad7
SS
3075static void bxt_hpd_irq_setup(struct drm_device *dev)
3076{
3077 struct drm_i915_private *dev_priv = dev->dev_private;
3078 struct intel_encoder *intel_encoder;
3079 u32 hotplug_port = 0;
3080 u32 hotplug_ctrl;
3081
3082 /* Now, enable HPD */
3083 for_each_intel_encoder(dev, intel_encoder) {
5fcece80 3084 if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state
e0a20ad7
SS
3085 == HPD_ENABLED)
3086 hotplug_port |= hpd_bxt[intel_encoder->hpd_pin];
3087 }
3088
3089 /* Mask all HPD control bits */
3090 hotplug_ctrl = I915_READ(BXT_HOTPLUG_CTL) & ~BXT_HOTPLUG_CTL_MASK;
3091
3092 /* Enable requested port in hotplug control */
3093 /* TODO: implement (short) HPD support on port A */
3094 WARN_ON_ONCE(hotplug_port & BXT_DE_PORT_HP_DDIA);
3095 if (hotplug_port & BXT_DE_PORT_HP_DDIB)
3096 hotplug_ctrl |= BXT_DDIB_HPD_ENABLE;
3097 if (hotplug_port & BXT_DE_PORT_HP_DDIC)
3098 hotplug_ctrl |= BXT_DDIC_HPD_ENABLE;
3099 I915_WRITE(BXT_HOTPLUG_CTL, hotplug_ctrl);
3100
3101 /* Unmask DDI hotplug in IMR */
3102 hotplug_ctrl = I915_READ(GEN8_DE_PORT_IMR) & ~hotplug_port;
3103 I915_WRITE(GEN8_DE_PORT_IMR, hotplug_ctrl);
3104
3105 /* Enable DDI hotplug in IER */
3106 hotplug_ctrl = I915_READ(GEN8_DE_PORT_IER) | hotplug_port;
3107 I915_WRITE(GEN8_DE_PORT_IER, hotplug_ctrl);
3108 POSTING_READ(GEN8_DE_PORT_IER);
3109}
3110
d46da437
PZ
3111static void ibx_irq_postinstall(struct drm_device *dev)
3112{
2d1013dd 3113 struct drm_i915_private *dev_priv = dev->dev_private;
82a28bcf 3114 u32 mask;
e5868a31 3115
692a04cf
DV
3116 if (HAS_PCH_NOP(dev))
3117 return;
3118
105b122e 3119 if (HAS_PCH_IBX(dev))
5c673b60 3120 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
105b122e 3121 else
5c673b60 3122 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
8664281b 3123
337ba017 3124 GEN5_ASSERT_IIR_IS_ZERO(SDEIIR);
d46da437 3125 I915_WRITE(SDEIMR, ~mask);
d46da437
PZ
3126}
3127
0a9a8c91
DV
3128static void gen5_gt_irq_postinstall(struct drm_device *dev)
3129{
3130 struct drm_i915_private *dev_priv = dev->dev_private;
3131 u32 pm_irqs, gt_irqs;
3132
3133 pm_irqs = gt_irqs = 0;
3134
3135 dev_priv->gt_irq_mask = ~0;
040d2baa 3136 if (HAS_L3_DPF(dev)) {
0a9a8c91 3137 /* L3 parity interrupt is always unmasked. */
35a85ac6
BW
3138 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3139 gt_irqs |= GT_PARITY_ERROR(dev);
0a9a8c91
DV
3140 }
3141
3142 gt_irqs |= GT_RENDER_USER_INTERRUPT;
3143 if (IS_GEN5(dev)) {
3144 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
3145 ILK_BSD_USER_INTERRUPT;
3146 } else {
3147 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3148 }
3149
35079899 3150 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
0a9a8c91
DV
3151
3152 if (INTEL_INFO(dev)->gen >= 6) {
78e68d36
ID
3153 /*
3154 * RPS interrupts will get enabled/disabled on demand when RPS
3155 * itself is enabled/disabled.
3156 */
0a9a8c91
DV
3157 if (HAS_VEBOX(dev))
3158 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3159
605cd25b 3160 dev_priv->pm_irq_mask = 0xffffffff;
35079899 3161 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
0a9a8c91
DV
3162 }
3163}
3164
f71d4af4 3165static int ironlake_irq_postinstall(struct drm_device *dev)
036a4a7d 3166{
2d1013dd 3167 struct drm_i915_private *dev_priv = dev->dev_private;
8e76f8dc
PZ
3168 u32 display_mask, extra_mask;
3169
3170 if (INTEL_INFO(dev)->gen >= 7) {
3171 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3172 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3173 DE_PLANEB_FLIP_DONE_IVB |
5c673b60 3174 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
8e76f8dc 3175 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
5c673b60 3176 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB);
8e76f8dc
PZ
3177 } else {
3178 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3179 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
5b3a856b 3180 DE_AUX_CHANNEL_A |
5b3a856b
DV
3181 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3182 DE_POISON);
5c673b60
DV
3183 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3184 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN;
8e76f8dc 3185 }
036a4a7d 3186
1ec14ad3 3187 dev_priv->irq_mask = ~display_mask;
036a4a7d 3188
0c841212
PZ
3189 I915_WRITE(HWSTAM, 0xeffe);
3190
622364b6
PZ
3191 ibx_irq_pre_postinstall(dev);
3192
35079899 3193 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
036a4a7d 3194
0a9a8c91 3195 gen5_gt_irq_postinstall(dev);
036a4a7d 3196
d46da437 3197 ibx_irq_postinstall(dev);
7fe0b973 3198
f97108d1 3199 if (IS_IRONLAKE_M(dev)) {
6005ce42
DV
3200 /* Enable PCU event interrupts
3201 *
3202 * spinlocking not required here for correctness since interrupt
4bc9d430
DV
3203 * setup is guaranteed to run in single-threaded context. But we
3204 * need it to make the assert_spin_locked happy. */
d6207435 3205 spin_lock_irq(&dev_priv->irq_lock);
f97108d1 3206 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
d6207435 3207 spin_unlock_irq(&dev_priv->irq_lock);
f97108d1
JB
3208 }
3209
036a4a7d
ZW
3210 return 0;
3211}
3212
f8b79e58
ID
3213static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
3214{
3215 u32 pipestat_mask;
3216 u32 iir_mask;
120dda4f 3217 enum pipe pipe;
f8b79e58
ID
3218
3219 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3220 PIPE_FIFO_UNDERRUN_STATUS;
3221
120dda4f
VS
3222 for_each_pipe(dev_priv, pipe)
3223 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
f8b79e58
ID
3224 POSTING_READ(PIPESTAT(PIPE_A));
3225
3226 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3227 PIPE_CRC_DONE_INTERRUPT_STATUS;
3228
120dda4f
VS
3229 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3230 for_each_pipe(dev_priv, pipe)
3231 i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
f8b79e58
ID
3232
3233 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3234 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3235 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
120dda4f
VS
3236 if (IS_CHERRYVIEW(dev_priv))
3237 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
f8b79e58
ID
3238 dev_priv->irq_mask &= ~iir_mask;
3239
3240 I915_WRITE(VLV_IIR, iir_mask);
3241 I915_WRITE(VLV_IIR, iir_mask);
f8b79e58 3242 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
76e41860
VS
3243 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3244 POSTING_READ(VLV_IMR);
f8b79e58
ID
3245}
3246
3247static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
3248{
3249 u32 pipestat_mask;
3250 u32 iir_mask;
120dda4f 3251 enum pipe pipe;
f8b79e58
ID
3252
3253 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3254 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
6c7fba04 3255 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
120dda4f
VS
3256 if (IS_CHERRYVIEW(dev_priv))
3257 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
f8b79e58
ID
3258
3259 dev_priv->irq_mask |= iir_mask;
f8b79e58 3260 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
76e41860 3261 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
f8b79e58
ID
3262 I915_WRITE(VLV_IIR, iir_mask);
3263 I915_WRITE(VLV_IIR, iir_mask);
3264 POSTING_READ(VLV_IIR);
3265
3266 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3267 PIPE_CRC_DONE_INTERRUPT_STATUS;
3268
120dda4f
VS
3269 i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3270 for_each_pipe(dev_priv, pipe)
3271 i915_disable_pipestat(dev_priv, pipe, pipestat_mask);
f8b79e58
ID
3272
3273 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3274 PIPE_FIFO_UNDERRUN_STATUS;
120dda4f
VS
3275
3276 for_each_pipe(dev_priv, pipe)
3277 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
f8b79e58
ID
3278 POSTING_READ(PIPESTAT(PIPE_A));
3279}
3280
3281void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3282{
3283 assert_spin_locked(&dev_priv->irq_lock);
3284
3285 if (dev_priv->display_irqs_enabled)
3286 return;
3287
3288 dev_priv->display_irqs_enabled = true;
3289
950eabaf 3290 if (intel_irqs_enabled(dev_priv))
f8b79e58
ID
3291 valleyview_display_irqs_install(dev_priv);
3292}
3293
3294void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3295{
3296 assert_spin_locked(&dev_priv->irq_lock);
3297
3298 if (!dev_priv->display_irqs_enabled)
3299 return;
3300
3301 dev_priv->display_irqs_enabled = false;
3302
950eabaf 3303 if (intel_irqs_enabled(dev_priv))
f8b79e58
ID
3304 valleyview_display_irqs_uninstall(dev_priv);
3305}
3306
0e6c9a9e 3307static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
7e231dbe 3308{
f8b79e58 3309 dev_priv->irq_mask = ~0;
7e231dbe 3310
20afbda2
DV
3311 I915_WRITE(PORT_HOTPLUG_EN, 0);
3312 POSTING_READ(PORT_HOTPLUG_EN);
3313
7e231dbe 3314 I915_WRITE(VLV_IIR, 0xffffffff);
76e41860
VS
3315 I915_WRITE(VLV_IIR, 0xffffffff);
3316 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3317 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3318 POSTING_READ(VLV_IMR);
7e231dbe 3319
b79480ba
DV
3320 /* Interrupt setup is already guaranteed to be single-threaded, this is
3321 * just to make the assert_spin_locked check happy. */
d6207435 3322 spin_lock_irq(&dev_priv->irq_lock);
f8b79e58
ID
3323 if (dev_priv->display_irqs_enabled)
3324 valleyview_display_irqs_install(dev_priv);
d6207435 3325 spin_unlock_irq(&dev_priv->irq_lock);
0e6c9a9e
VS
3326}
3327
3328static int valleyview_irq_postinstall(struct drm_device *dev)
3329{
3330 struct drm_i915_private *dev_priv = dev->dev_private;
3331
3332 vlv_display_irq_postinstall(dev_priv);
7e231dbe 3333
0a9a8c91 3334 gen5_gt_irq_postinstall(dev);
7e231dbe
JB
3335
3336 /* ack & enable invalid PTE error interrupts */
3337#if 0 /* FIXME: add support to irq handler for checking these bits */
3338 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3339 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
3340#endif
3341
3342 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
20afbda2
DV
3343
3344 return 0;
3345}
3346
abd58f01
BW
3347static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3348{
abd58f01
BW
3349 /* These are interrupts we'll toggle with the ring mask register */
3350 uint32_t gt_interrupts[] = {
3351 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
73d477f6 3352 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
abd58f01 3353 GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
73d477f6
OM
3354 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3355 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
abd58f01 3356 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
73d477f6
OM
3357 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3358 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3359 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
abd58f01 3360 0,
73d477f6
OM
3361 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3362 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
abd58f01
BW
3363 };
3364
0961021a 3365 dev_priv->pm_irq_mask = 0xffffffff;
9a2d2d87
D
3366 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3367 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
78e68d36
ID
3368 /*
3369 * RPS interrupts will get enabled/disabled on demand when RPS itself
3370 * is enabled/disabled.
3371 */
3372 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
9a2d2d87 3373 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
abd58f01
BW
3374}
3375
3376static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3377{
770de83d
DL
3378 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3379 uint32_t de_pipe_enables;
abd58f01 3380 int pipe;
9e63743e 3381 u32 de_port_en = GEN8_AUX_CHANNEL_A;
770de83d 3382
88e04703 3383 if (IS_GEN9(dev_priv)) {
770de83d
DL
3384 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3385 GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
9e63743e 3386 de_port_en |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
88e04703 3387 GEN9_AUX_CHANNEL_D;
9e63743e
SS
3388
3389 if (IS_BROXTON(dev_priv))
3390 de_port_en |= BXT_DE_PORT_GMBUS;
88e04703 3391 } else
770de83d
DL
3392 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
3393 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3394
3395 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3396 GEN8_PIPE_FIFO_UNDERRUN;
3397
13b3a0a7
DV
3398 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3399 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3400 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
abd58f01 3401
055e393f 3402 for_each_pipe(dev_priv, pipe)
f458ebbc 3403 if (intel_display_power_is_enabled(dev_priv,
813bde43
PZ
3404 POWER_DOMAIN_PIPE(pipe)))
3405 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3406 dev_priv->de_irq_mask[pipe],
3407 de_pipe_enables);
abd58f01 3408
9e63743e 3409 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_en, de_port_en);
abd58f01
BW
3410}
3411
3412static int gen8_irq_postinstall(struct drm_device *dev)
3413{
3414 struct drm_i915_private *dev_priv = dev->dev_private;
3415
266ea3d9
SS
3416 if (HAS_PCH_SPLIT(dev))
3417 ibx_irq_pre_postinstall(dev);
622364b6 3418
abd58f01
BW
3419 gen8_gt_irq_postinstall(dev_priv);
3420 gen8_de_irq_postinstall(dev_priv);
3421
266ea3d9
SS
3422 if (HAS_PCH_SPLIT(dev))
3423 ibx_irq_postinstall(dev);
abd58f01
BW
3424
3425 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
3426 POSTING_READ(GEN8_MASTER_IRQ);
3427
3428 return 0;
3429}
3430
43f328d7
VS
3431static int cherryview_irq_postinstall(struct drm_device *dev)
3432{
3433 struct drm_i915_private *dev_priv = dev->dev_private;
43f328d7 3434
c2b66797 3435 vlv_display_irq_postinstall(dev_priv);
43f328d7
VS
3436
3437 gen8_gt_irq_postinstall(dev_priv);
3438
3439 I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
3440 POSTING_READ(GEN8_MASTER_IRQ);
3441
3442 return 0;
3443}
3444
abd58f01
BW
3445static void gen8_irq_uninstall(struct drm_device *dev)
3446{
3447 struct drm_i915_private *dev_priv = dev->dev_private;
abd58f01
BW
3448
3449 if (!dev_priv)
3450 return;
3451
823f6b38 3452 gen8_irq_reset(dev);
abd58f01
BW
3453}
3454
8ea0be4f
VS
3455static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
3456{
3457 /* Interrupt setup is already guaranteed to be single-threaded, this is
3458 * just to make the assert_spin_locked check happy. */
3459 spin_lock_irq(&dev_priv->irq_lock);
3460 if (dev_priv->display_irqs_enabled)
3461 valleyview_display_irqs_uninstall(dev_priv);
3462 spin_unlock_irq(&dev_priv->irq_lock);
3463
3464 vlv_display_irq_reset(dev_priv);
3465
c352d1ba 3466 dev_priv->irq_mask = ~0;
8ea0be4f
VS
3467}
3468
7e231dbe
JB
3469static void valleyview_irq_uninstall(struct drm_device *dev)
3470{
2d1013dd 3471 struct drm_i915_private *dev_priv = dev->dev_private;
7e231dbe
JB
3472
3473 if (!dev_priv)
3474 return;
3475
843d0e7d
ID
3476 I915_WRITE(VLV_MASTER_IER, 0);
3477
893fce8e
VS
3478 gen5_gt_irq_reset(dev);
3479
7e231dbe 3480 I915_WRITE(HWSTAM, 0xffffffff);
f8b79e58 3481
8ea0be4f 3482 vlv_display_irq_uninstall(dev_priv);
7e231dbe
JB
3483}
3484
43f328d7
VS
3485static void cherryview_irq_uninstall(struct drm_device *dev)
3486{
3487 struct drm_i915_private *dev_priv = dev->dev_private;
43f328d7
VS
3488
3489 if (!dev_priv)
3490 return;
3491
3492 I915_WRITE(GEN8_MASTER_IRQ, 0);
3493 POSTING_READ(GEN8_MASTER_IRQ);
3494
a2c30fba 3495 gen8_gt_irq_reset(dev_priv);
43f328d7 3496
a2c30fba 3497 GEN5_IRQ_RESET(GEN8_PCU_);
43f328d7 3498
c2b66797 3499 vlv_display_irq_uninstall(dev_priv);
43f328d7
VS
3500}
3501
f71d4af4 3502static void ironlake_irq_uninstall(struct drm_device *dev)
036a4a7d 3503{
2d1013dd 3504 struct drm_i915_private *dev_priv = dev->dev_private;
4697995b
JB
3505
3506 if (!dev_priv)
3507 return;
3508
be30b29f 3509 ironlake_irq_reset(dev);
036a4a7d
ZW
3510}
3511
a266c7d5 3512static void i8xx_irq_preinstall(struct drm_device * dev)
1da177e4 3513{
2d1013dd 3514 struct drm_i915_private *dev_priv = dev->dev_private;
9db4a9c7 3515 int pipe;
91e3738e 3516
055e393f 3517 for_each_pipe(dev_priv, pipe)
9db4a9c7 3518 I915_WRITE(PIPESTAT(pipe), 0);
a266c7d5
CW
3519 I915_WRITE16(IMR, 0xffff);
3520 I915_WRITE16(IER, 0x0);
3521 POSTING_READ16(IER);
c2798b19
CW
3522}
3523
3524static int i8xx_irq_postinstall(struct drm_device *dev)
3525{
2d1013dd 3526 struct drm_i915_private *dev_priv = dev->dev_private;
c2798b19 3527
c2798b19
CW
3528 I915_WRITE16(EMR,
3529 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3530
3531 /* Unmask the interrupts that we always want on. */
3532 dev_priv->irq_mask =
3533 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3534 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3535 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
37ef01ab 3536 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
c2798b19
CW
3537 I915_WRITE16(IMR, dev_priv->irq_mask);
3538
3539 I915_WRITE16(IER,
3540 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3541 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
c2798b19
CW
3542 I915_USER_INTERRUPT);
3543 POSTING_READ16(IER);
3544
379ef82d
DV
3545 /* Interrupt setup is already guaranteed to be single-threaded, this is
3546 * just to make the assert_spin_locked check happy. */
d6207435 3547 spin_lock_irq(&dev_priv->irq_lock);
755e9019
ID
3548 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3549 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
d6207435 3550 spin_unlock_irq(&dev_priv->irq_lock);
379ef82d 3551
c2798b19
CW
3552 return 0;
3553}
3554
90a72f87
VS
3555/*
3556 * Returns true when a page flip has completed.
3557 */
3558static bool i8xx_handle_vblank(struct drm_device *dev,
1f1c2e24 3559 int plane, int pipe, u32 iir)
90a72f87 3560{
2d1013dd 3561 struct drm_i915_private *dev_priv = dev->dev_private;
1f1c2e24 3562 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
90a72f87 3563
8d7849db 3564 if (!intel_pipe_handle_vblank(dev, pipe))
90a72f87
VS
3565 return false;
3566
3567 if ((iir & flip_pending) == 0)
d6bbafa1 3568 goto check_page_flip;
90a72f87 3569
90a72f87
VS
3570 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3571 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3572 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3573 * the flip is completed (no longer pending). Since this doesn't raise
3574 * an interrupt per se, we watch for the change at vblank.
3575 */
3576 if (I915_READ16(ISR) & flip_pending)
d6bbafa1 3577 goto check_page_flip;
90a72f87 3578
7d47559e 3579 intel_prepare_page_flip(dev, plane);
90a72f87 3580 intel_finish_page_flip(dev, pipe);
90a72f87 3581 return true;
d6bbafa1
CW
3582
3583check_page_flip:
3584 intel_check_page_flip(dev, pipe);
3585 return false;
90a72f87
VS
3586}
3587
ff1f525e 3588static irqreturn_t i8xx_irq_handler(int irq, void *arg)
c2798b19 3589{
45a83f84 3590 struct drm_device *dev = arg;
2d1013dd 3591 struct drm_i915_private *dev_priv = dev->dev_private;
c2798b19
CW
3592 u16 iir, new_iir;
3593 u32 pipe_stats[2];
c2798b19
CW
3594 int pipe;
3595 u16 flip_mask =
3596 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3597 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3598
2dd2a883
ID
3599 if (!intel_irqs_enabled(dev_priv))
3600 return IRQ_NONE;
3601
c2798b19
CW
3602 iir = I915_READ16(IIR);
3603 if (iir == 0)
3604 return IRQ_NONE;
3605
3606 while (iir & ~flip_mask) {
3607 /* Can't rely on pipestat interrupt bit in iir as it might
3608 * have been cleared after the pipestat interrupt was received.
3609 * It doesn't set the bit in iir again, but it still produces
3610 * interrupts (for non-MSI).
3611 */
222c7f51 3612 spin_lock(&dev_priv->irq_lock);
c2798b19 3613 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
aaecdf61 3614 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
c2798b19 3615
055e393f 3616 for_each_pipe(dev_priv, pipe) {
c2798b19
CW
3617 int reg = PIPESTAT(pipe);
3618 pipe_stats[pipe] = I915_READ(reg);
3619
3620 /*
3621 * Clear the PIPE*STAT regs before the IIR
3622 */
2d9d2b0b 3623 if (pipe_stats[pipe] & 0x8000ffff)
c2798b19 3624 I915_WRITE(reg, pipe_stats[pipe]);
c2798b19 3625 }
222c7f51 3626 spin_unlock(&dev_priv->irq_lock);
c2798b19
CW
3627
3628 I915_WRITE16(IIR, iir & ~flip_mask);
3629 new_iir = I915_READ16(IIR); /* Flush posted writes */
3630
c2798b19 3631 if (iir & I915_USER_INTERRUPT)
74cdb337 3632 notify_ring(&dev_priv->ring[RCS]);
c2798b19 3633
055e393f 3634 for_each_pipe(dev_priv, pipe) {
1f1c2e24 3635 int plane = pipe;
3a77c4c4 3636 if (HAS_FBC(dev))
1f1c2e24
VS
3637 plane = !plane;
3638
4356d586 3639 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
1f1c2e24
VS
3640 i8xx_handle_vblank(dev, plane, pipe, iir))
3641 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
c2798b19 3642
4356d586 3643 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
277de95e 3644 i9xx_pipe_crc_irq_handler(dev, pipe);
2d9d2b0b 3645
1f7247c0
DV
3646 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3647 intel_cpu_fifo_underrun_irq_handler(dev_priv,
3648 pipe);
4356d586 3649 }
c2798b19
CW
3650
3651 iir = new_iir;
3652 }
3653
3654 return IRQ_HANDLED;
3655}
3656
3657static void i8xx_irq_uninstall(struct drm_device * dev)
3658{
2d1013dd 3659 struct drm_i915_private *dev_priv = dev->dev_private;
c2798b19
CW
3660 int pipe;
3661
055e393f 3662 for_each_pipe(dev_priv, pipe) {
c2798b19
CW
3663 /* Clear enable bits; then clear status bits */
3664 I915_WRITE(PIPESTAT(pipe), 0);
3665 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3666 }
3667 I915_WRITE16(IMR, 0xffff);
3668 I915_WRITE16(IER, 0x0);
3669 I915_WRITE16(IIR, I915_READ16(IIR));
3670}
3671
a266c7d5
CW
3672static void i915_irq_preinstall(struct drm_device * dev)
3673{
2d1013dd 3674 struct drm_i915_private *dev_priv = dev->dev_private;
a266c7d5
CW
3675 int pipe;
3676
a266c7d5
CW
3677 if (I915_HAS_HOTPLUG(dev)) {
3678 I915_WRITE(PORT_HOTPLUG_EN, 0);
3679 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3680 }
3681
00d98ebd 3682 I915_WRITE16(HWSTAM, 0xeffe);
055e393f 3683 for_each_pipe(dev_priv, pipe)
a266c7d5
CW
3684 I915_WRITE(PIPESTAT(pipe), 0);
3685 I915_WRITE(IMR, 0xffffffff);
3686 I915_WRITE(IER, 0x0);
3687 POSTING_READ(IER);
3688}
3689
3690static int i915_irq_postinstall(struct drm_device *dev)
3691{
2d1013dd 3692 struct drm_i915_private *dev_priv = dev->dev_private;
38bde180 3693 u32 enable_mask;
a266c7d5 3694
38bde180
CW
3695 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3696
3697 /* Unmask the interrupts that we always want on. */
3698 dev_priv->irq_mask =
3699 ~(I915_ASLE_INTERRUPT |
3700 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3701 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3702 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
37ef01ab 3703 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
38bde180
CW
3704
3705 enable_mask =
3706 I915_ASLE_INTERRUPT |
3707 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3708 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
38bde180
CW
3709 I915_USER_INTERRUPT;
3710
a266c7d5 3711 if (I915_HAS_HOTPLUG(dev)) {
20afbda2
DV
3712 I915_WRITE(PORT_HOTPLUG_EN, 0);
3713 POSTING_READ(PORT_HOTPLUG_EN);
3714
a266c7d5
CW
3715 /* Enable in IER... */
3716 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3717 /* and unmask in IMR */
3718 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3719 }
3720
a266c7d5
CW
3721 I915_WRITE(IMR, dev_priv->irq_mask);
3722 I915_WRITE(IER, enable_mask);
3723 POSTING_READ(IER);
3724
f49e38dd 3725 i915_enable_asle_pipestat(dev);
20afbda2 3726
379ef82d
DV
3727 /* Interrupt setup is already guaranteed to be single-threaded, this is
3728 * just to make the assert_spin_locked check happy. */
d6207435 3729 spin_lock_irq(&dev_priv->irq_lock);
755e9019
ID
3730 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3731 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
d6207435 3732 spin_unlock_irq(&dev_priv->irq_lock);
379ef82d 3733
20afbda2
DV
3734 return 0;
3735}
3736
90a72f87
VS
3737/*
3738 * Returns true when a page flip has completed.
3739 */
3740static bool i915_handle_vblank(struct drm_device *dev,
3741 int plane, int pipe, u32 iir)
3742{
2d1013dd 3743 struct drm_i915_private *dev_priv = dev->dev_private;
90a72f87
VS
3744 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3745
8d7849db 3746 if (!intel_pipe_handle_vblank(dev, pipe))
90a72f87
VS
3747 return false;
3748
3749 if ((iir & flip_pending) == 0)
d6bbafa1 3750 goto check_page_flip;
90a72f87 3751
90a72f87
VS
3752 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3753 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3754 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3755 * the flip is completed (no longer pending). Since this doesn't raise
3756 * an interrupt per se, we watch for the change at vblank.
3757 */
3758 if (I915_READ(ISR) & flip_pending)
d6bbafa1 3759 goto check_page_flip;
90a72f87 3760
7d47559e 3761 intel_prepare_page_flip(dev, plane);
90a72f87 3762 intel_finish_page_flip(dev, pipe);
90a72f87 3763 return true;
d6bbafa1
CW
3764
3765check_page_flip:
3766 intel_check_page_flip(dev, pipe);
3767 return false;
90a72f87
VS
3768}
3769
ff1f525e 3770static irqreturn_t i915_irq_handler(int irq, void *arg)
a266c7d5 3771{
45a83f84 3772 struct drm_device *dev = arg;
2d1013dd 3773 struct drm_i915_private *dev_priv = dev->dev_private;
8291ee90 3774 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
38bde180
CW
3775 u32 flip_mask =
3776 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3777 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
38bde180 3778 int pipe, ret = IRQ_NONE;
a266c7d5 3779
2dd2a883
ID
3780 if (!intel_irqs_enabled(dev_priv))
3781 return IRQ_NONE;
3782
a266c7d5 3783 iir = I915_READ(IIR);
38bde180
CW
3784 do {
3785 bool irq_received = (iir & ~flip_mask) != 0;
8291ee90 3786 bool blc_event = false;
a266c7d5
CW
3787
3788 /* Can't rely on pipestat interrupt bit in iir as it might
3789 * have been cleared after the pipestat interrupt was received.
3790 * It doesn't set the bit in iir again, but it still produces
3791 * interrupts (for non-MSI).
3792 */
222c7f51 3793 spin_lock(&dev_priv->irq_lock);
a266c7d5 3794 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
aaecdf61 3795 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
a266c7d5 3796
055e393f 3797 for_each_pipe(dev_priv, pipe) {
a266c7d5
CW
3798 int reg = PIPESTAT(pipe);
3799 pipe_stats[pipe] = I915_READ(reg);
3800
38bde180 3801 /* Clear the PIPE*STAT regs before the IIR */
a266c7d5 3802 if (pipe_stats[pipe] & 0x8000ffff) {
a266c7d5 3803 I915_WRITE(reg, pipe_stats[pipe]);
38bde180 3804 irq_received = true;
a266c7d5
CW
3805 }
3806 }
222c7f51 3807 spin_unlock(&dev_priv->irq_lock);
a266c7d5
CW
3808
3809 if (!irq_received)
3810 break;
3811
a266c7d5 3812 /* Consume port. Then clear IIR or we'll miss events */
16c6c56b
VS
3813 if (I915_HAS_HOTPLUG(dev) &&
3814 iir & I915_DISPLAY_PORT_INTERRUPT)
3815 i9xx_hpd_irq_handler(dev);
a266c7d5 3816
38bde180 3817 I915_WRITE(IIR, iir & ~flip_mask);
a266c7d5
CW
3818 new_iir = I915_READ(IIR); /* Flush posted writes */
3819
a266c7d5 3820 if (iir & I915_USER_INTERRUPT)
74cdb337 3821 notify_ring(&dev_priv->ring[RCS]);
a266c7d5 3822
055e393f 3823 for_each_pipe(dev_priv, pipe) {
38bde180 3824 int plane = pipe;
3a77c4c4 3825 if (HAS_FBC(dev))
38bde180 3826 plane = !plane;
90a72f87 3827
8291ee90 3828 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
90a72f87
VS
3829 i915_handle_vblank(dev, plane, pipe, iir))
3830 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
a266c7d5
CW
3831
3832 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3833 blc_event = true;
4356d586
DV
3834
3835 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
277de95e 3836 i9xx_pipe_crc_irq_handler(dev, pipe);
2d9d2b0b 3837
1f7247c0
DV
3838 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3839 intel_cpu_fifo_underrun_irq_handler(dev_priv,
3840 pipe);
a266c7d5
CW
3841 }
3842
a266c7d5
CW
3843 if (blc_event || (iir & I915_ASLE_INTERRUPT))
3844 intel_opregion_asle_intr(dev);
3845
3846 /* With MSI, interrupts are only generated when iir
3847 * transitions from zero to nonzero. If another bit got
3848 * set while we were handling the existing iir bits, then
3849 * we would never get another interrupt.
3850 *
3851 * This is fine on non-MSI as well, as if we hit this path
3852 * we avoid exiting the interrupt handler only to generate
3853 * another one.
3854 *
3855 * Note that for MSI this could cause a stray interrupt report
3856 * if an interrupt landed in the time between writing IIR and
3857 * the posting read. This should be rare enough to never
3858 * trigger the 99% of 100,000 interrupts test for disabling
3859 * stray interrupts.
3860 */
38bde180 3861 ret = IRQ_HANDLED;
a266c7d5 3862 iir = new_iir;
38bde180 3863 } while (iir & ~flip_mask);
a266c7d5
CW
3864
3865 return ret;
3866}
3867
3868static void i915_irq_uninstall(struct drm_device * dev)
3869{
2d1013dd 3870 struct drm_i915_private *dev_priv = dev->dev_private;
a266c7d5
CW
3871 int pipe;
3872
a266c7d5
CW
3873 if (I915_HAS_HOTPLUG(dev)) {
3874 I915_WRITE(PORT_HOTPLUG_EN, 0);
3875 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3876 }
3877
00d98ebd 3878 I915_WRITE16(HWSTAM, 0xffff);
055e393f 3879 for_each_pipe(dev_priv, pipe) {
55b39755 3880 /* Clear enable bits; then clear status bits */
a266c7d5 3881 I915_WRITE(PIPESTAT(pipe), 0);
55b39755
CW
3882 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3883 }
a266c7d5
CW
3884 I915_WRITE(IMR, 0xffffffff);
3885 I915_WRITE(IER, 0x0);
3886
a266c7d5
CW
3887 I915_WRITE(IIR, I915_READ(IIR));
3888}
3889
3890static void i965_irq_preinstall(struct drm_device * dev)
3891{
2d1013dd 3892 struct drm_i915_private *dev_priv = dev->dev_private;
a266c7d5
CW
3893 int pipe;
3894
adca4730
CW
3895 I915_WRITE(PORT_HOTPLUG_EN, 0);
3896 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
a266c7d5
CW
3897
3898 I915_WRITE(HWSTAM, 0xeffe);
055e393f 3899 for_each_pipe(dev_priv, pipe)
a266c7d5
CW
3900 I915_WRITE(PIPESTAT(pipe), 0);
3901 I915_WRITE(IMR, 0xffffffff);
3902 I915_WRITE(IER, 0x0);
3903 POSTING_READ(IER);
3904}
3905
3906static int i965_irq_postinstall(struct drm_device *dev)
3907{
2d1013dd 3908 struct drm_i915_private *dev_priv = dev->dev_private;
bbba0a97 3909 u32 enable_mask;
a266c7d5
CW
3910 u32 error_mask;
3911
a266c7d5 3912 /* Unmask the interrupts that we always want on. */
bbba0a97 3913 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
adca4730 3914 I915_DISPLAY_PORT_INTERRUPT |
bbba0a97
CW
3915 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3916 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3917 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3918 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3919 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3920
3921 enable_mask = ~dev_priv->irq_mask;
21ad8330
VS
3922 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3923 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
bbba0a97
CW
3924 enable_mask |= I915_USER_INTERRUPT;
3925
3926 if (IS_G4X(dev))
3927 enable_mask |= I915_BSD_USER_INTERRUPT;
a266c7d5 3928
b79480ba
DV
3929 /* Interrupt setup is already guaranteed to be single-threaded, this is
3930 * just to make the assert_spin_locked check happy. */
d6207435 3931 spin_lock_irq(&dev_priv->irq_lock);
755e9019
ID
3932 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3933 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3934 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
d6207435 3935 spin_unlock_irq(&dev_priv->irq_lock);
a266c7d5 3936
a266c7d5
CW
3937 /*
3938 * Enable some error detection, note the instruction error mask
3939 * bit is reserved, so we leave it masked.
3940 */
3941 if (IS_G4X(dev)) {
3942 error_mask = ~(GM45_ERROR_PAGE_TABLE |
3943 GM45_ERROR_MEM_PRIV |
3944 GM45_ERROR_CP_PRIV |
3945 I915_ERROR_MEMORY_REFRESH);
3946 } else {
3947 error_mask = ~(I915_ERROR_PAGE_TABLE |
3948 I915_ERROR_MEMORY_REFRESH);
3949 }
3950 I915_WRITE(EMR, error_mask);
3951
3952 I915_WRITE(IMR, dev_priv->irq_mask);
3953 I915_WRITE(IER, enable_mask);
3954 POSTING_READ(IER);
3955
20afbda2
DV
3956 I915_WRITE(PORT_HOTPLUG_EN, 0);
3957 POSTING_READ(PORT_HOTPLUG_EN);
3958
f49e38dd 3959 i915_enable_asle_pipestat(dev);
20afbda2
DV
3960
3961 return 0;
3962}
3963
bac56d5b 3964static void i915_hpd_irq_setup(struct drm_device *dev)
20afbda2 3965{
2d1013dd 3966 struct drm_i915_private *dev_priv = dev->dev_private;
cd569aed 3967 struct intel_encoder *intel_encoder;
20afbda2
DV
3968 u32 hotplug_en;
3969
b5ea2d56
DV
3970 assert_spin_locked(&dev_priv->irq_lock);
3971
778eb334
VS
3972 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
3973 hotplug_en &= ~HOTPLUG_INT_EN_MASK;
3974 /* Note HDMI and DP share hotplug bits */
3975 /* enable bits are the same for all generations */
3976 for_each_intel_encoder(dev, intel_encoder)
5fcece80 3977 if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
778eb334
VS
3978 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
3979 /* Programming the CRT detection parameters tends
3980 to generate a spurious hotplug event about three
3981 seconds later. So just do it once.
3982 */
3983 if (IS_G4X(dev))
3984 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
3985 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
3986 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
3987
3988 /* Ignore TV since it's buggy */
3989 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
a266c7d5
CW
3990}
3991
ff1f525e 3992static irqreturn_t i965_irq_handler(int irq, void *arg)
a266c7d5 3993{
45a83f84 3994 struct drm_device *dev = arg;
2d1013dd 3995 struct drm_i915_private *dev_priv = dev->dev_private;
a266c7d5
CW
3996 u32 iir, new_iir;
3997 u32 pipe_stats[I915_MAX_PIPES];
a266c7d5 3998 int ret = IRQ_NONE, pipe;
21ad8330
VS
3999 u32 flip_mask =
4000 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4001 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
a266c7d5 4002
2dd2a883
ID
4003 if (!intel_irqs_enabled(dev_priv))
4004 return IRQ_NONE;
4005
a266c7d5
CW
4006 iir = I915_READ(IIR);
4007
a266c7d5 4008 for (;;) {
501e01d7 4009 bool irq_received = (iir & ~flip_mask) != 0;
2c8ba29f
CW
4010 bool blc_event = false;
4011
a266c7d5
CW
4012 /* Can't rely on pipestat interrupt bit in iir as it might
4013 * have been cleared after the pipestat interrupt was received.
4014 * It doesn't set the bit in iir again, but it still produces
4015 * interrupts (for non-MSI).
4016 */
222c7f51 4017 spin_lock(&dev_priv->irq_lock);
a266c7d5 4018 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
aaecdf61 4019 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
a266c7d5 4020
055e393f 4021 for_each_pipe(dev_priv, pipe) {
a266c7d5
CW
4022 int reg = PIPESTAT(pipe);
4023 pipe_stats[pipe] = I915_READ(reg);
4024
4025 /*
4026 * Clear the PIPE*STAT regs before the IIR
4027 */
4028 if (pipe_stats[pipe] & 0x8000ffff) {
a266c7d5 4029 I915_WRITE(reg, pipe_stats[pipe]);
501e01d7 4030 irq_received = true;
a266c7d5
CW
4031 }
4032 }
222c7f51 4033 spin_unlock(&dev_priv->irq_lock);
a266c7d5
CW
4034
4035 if (!irq_received)
4036 break;
4037
4038 ret = IRQ_HANDLED;
4039
4040 /* Consume port. Then clear IIR or we'll miss events */
16c6c56b
VS
4041 if (iir & I915_DISPLAY_PORT_INTERRUPT)
4042 i9xx_hpd_irq_handler(dev);
a266c7d5 4043
21ad8330 4044 I915_WRITE(IIR, iir & ~flip_mask);
a266c7d5
CW
4045 new_iir = I915_READ(IIR); /* Flush posted writes */
4046
a266c7d5 4047 if (iir & I915_USER_INTERRUPT)
74cdb337 4048 notify_ring(&dev_priv->ring[RCS]);
a266c7d5 4049 if (iir & I915_BSD_USER_INTERRUPT)
74cdb337 4050 notify_ring(&dev_priv->ring[VCS]);
a266c7d5 4051
055e393f 4052 for_each_pipe(dev_priv, pipe) {
2c8ba29f 4053 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
90a72f87
VS
4054 i915_handle_vblank(dev, pipe, pipe, iir))
4055 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
a266c7d5
CW
4056
4057 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4058 blc_event = true;
4356d586
DV
4059
4060 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
277de95e 4061 i9xx_pipe_crc_irq_handler(dev, pipe);
a266c7d5 4062
1f7247c0
DV
4063 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4064 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2d9d2b0b 4065 }
a266c7d5
CW
4066
4067 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4068 intel_opregion_asle_intr(dev);
4069
515ac2bb
DV
4070 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4071 gmbus_irq_handler(dev);
4072
a266c7d5
CW
4073 /* With MSI, interrupts are only generated when iir
4074 * transitions from zero to nonzero. If another bit got
4075 * set while we were handling the existing iir bits, then
4076 * we would never get another interrupt.
4077 *
4078 * This is fine on non-MSI as well, as if we hit this path
4079 * we avoid exiting the interrupt handler only to generate
4080 * another one.
4081 *
4082 * Note that for MSI this could cause a stray interrupt report
4083 * if an interrupt landed in the time between writing IIR and
4084 * the posting read. This should be rare enough to never
4085 * trigger the 99% of 100,000 interrupts test for disabling
4086 * stray interrupts.
4087 */
4088 iir = new_iir;
4089 }
4090
4091 return ret;
4092}
4093
4094static void i965_irq_uninstall(struct drm_device * dev)
4095{
2d1013dd 4096 struct drm_i915_private *dev_priv = dev->dev_private;
a266c7d5
CW
4097 int pipe;
4098
4099 if (!dev_priv)
4100 return;
4101
adca4730
CW
4102 I915_WRITE(PORT_HOTPLUG_EN, 0);
4103 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
a266c7d5
CW
4104
4105 I915_WRITE(HWSTAM, 0xffffffff);
055e393f 4106 for_each_pipe(dev_priv, pipe)
a266c7d5
CW
4107 I915_WRITE(PIPESTAT(pipe), 0);
4108 I915_WRITE(IMR, 0xffffffff);
4109 I915_WRITE(IER, 0x0);
4110
055e393f 4111 for_each_pipe(dev_priv, pipe)
a266c7d5
CW
4112 I915_WRITE(PIPESTAT(pipe),
4113 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4114 I915_WRITE(IIR, I915_READ(IIR));
4115}
4116
fca52a55
DV
4117/**
4118 * intel_irq_init - initializes irq support
4119 * @dev_priv: i915 device instance
4120 *
4121 * This function initializes all the irq support including work items, timers
4122 * and all the vtables. It does not setup the interrupt itself though.
4123 */
b963291c 4124void intel_irq_init(struct drm_i915_private *dev_priv)
f71d4af4 4125{
b963291c 4126 struct drm_device *dev = dev_priv->dev;
8b2e326d 4127
77913b39
JN
4128 intel_hpd_init_work(dev_priv);
4129
c6a828d3 4130 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
a4da4fa4 4131 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
8b2e326d 4132
a6706b45 4133 /* Let's track the enabled rps events */
b963291c 4134 if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
6c65a587 4135 /* WaGsvRC0ResidencyMethod:vlv */
6f4b12f8 4136 dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
31685c25
D
4137 else
4138 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
a6706b45 4139
737b1506
CW
4140 INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
4141 i915_hangcheck_elapsed);
61bac78e 4142
97a19a24 4143 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
9ee32fea 4144
b963291c 4145 if (IS_GEN2(dev_priv)) {
4cdb83ec
VS
4146 dev->max_vblank_count = 0;
4147 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
b963291c 4148 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
f71d4af4
JB
4149 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4150 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
391f75e2
VS
4151 } else {
4152 dev->driver->get_vblank_counter = i915_get_vblank_counter;
4153 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
f71d4af4
JB
4154 }
4155
21da2700
VS
4156 /*
4157 * Opt out of the vblank disable timer on everything except gen2.
4158 * Gen2 doesn't have a hardware frame counter and so depends on
4159 * vblank interrupts to produce sane vblank seuquence numbers.
4160 */
b963291c 4161 if (!IS_GEN2(dev_priv))
21da2700
VS
4162 dev->vblank_disable_immediate = true;
4163
f3a5c3f6
DV
4164 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4165 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
f71d4af4 4166
b963291c 4167 if (IS_CHERRYVIEW(dev_priv)) {
43f328d7
VS
4168 dev->driver->irq_handler = cherryview_irq_handler;
4169 dev->driver->irq_preinstall = cherryview_irq_preinstall;
4170 dev->driver->irq_postinstall = cherryview_irq_postinstall;
4171 dev->driver->irq_uninstall = cherryview_irq_uninstall;
4172 dev->driver->enable_vblank = valleyview_enable_vblank;
4173 dev->driver->disable_vblank = valleyview_disable_vblank;
4174 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
b963291c 4175 } else if (IS_VALLEYVIEW(dev_priv)) {
7e231dbe
JB
4176 dev->driver->irq_handler = valleyview_irq_handler;
4177 dev->driver->irq_preinstall = valleyview_irq_preinstall;
4178 dev->driver->irq_postinstall = valleyview_irq_postinstall;
4179 dev->driver->irq_uninstall = valleyview_irq_uninstall;
4180 dev->driver->enable_vblank = valleyview_enable_vblank;
4181 dev->driver->disable_vblank = valleyview_disable_vblank;
fa00abe0 4182 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
b963291c 4183 } else if (INTEL_INFO(dev_priv)->gen >= 8) {
abd58f01 4184 dev->driver->irq_handler = gen8_irq_handler;
723761b8 4185 dev->driver->irq_preinstall = gen8_irq_reset;
abd58f01
BW
4186 dev->driver->irq_postinstall = gen8_irq_postinstall;
4187 dev->driver->irq_uninstall = gen8_irq_uninstall;
4188 dev->driver->enable_vblank = gen8_enable_vblank;
4189 dev->driver->disable_vblank = gen8_disable_vblank;
e0a20ad7
SS
4190 if (HAS_PCH_SPLIT(dev))
4191 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
4192 else
4193 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
f71d4af4
JB
4194 } else if (HAS_PCH_SPLIT(dev)) {
4195 dev->driver->irq_handler = ironlake_irq_handler;
723761b8 4196 dev->driver->irq_preinstall = ironlake_irq_reset;
f71d4af4
JB
4197 dev->driver->irq_postinstall = ironlake_irq_postinstall;
4198 dev->driver->irq_uninstall = ironlake_irq_uninstall;
4199 dev->driver->enable_vblank = ironlake_enable_vblank;
4200 dev->driver->disable_vblank = ironlake_disable_vblank;
82a28bcf 4201 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
f71d4af4 4202 } else {
b963291c 4203 if (INTEL_INFO(dev_priv)->gen == 2) {
c2798b19
CW
4204 dev->driver->irq_preinstall = i8xx_irq_preinstall;
4205 dev->driver->irq_postinstall = i8xx_irq_postinstall;
4206 dev->driver->irq_handler = i8xx_irq_handler;
4207 dev->driver->irq_uninstall = i8xx_irq_uninstall;
b963291c 4208 } else if (INTEL_INFO(dev_priv)->gen == 3) {
a266c7d5
CW
4209 dev->driver->irq_preinstall = i915_irq_preinstall;
4210 dev->driver->irq_postinstall = i915_irq_postinstall;
4211 dev->driver->irq_uninstall = i915_irq_uninstall;
4212 dev->driver->irq_handler = i915_irq_handler;
c2798b19 4213 } else {
a266c7d5
CW
4214 dev->driver->irq_preinstall = i965_irq_preinstall;
4215 dev->driver->irq_postinstall = i965_irq_postinstall;
4216 dev->driver->irq_uninstall = i965_irq_uninstall;
4217 dev->driver->irq_handler = i965_irq_handler;
c2798b19 4218 }
778eb334
VS
4219 if (I915_HAS_HOTPLUG(dev_priv))
4220 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
f71d4af4
JB
4221 dev->driver->enable_vblank = i915_enable_vblank;
4222 dev->driver->disable_vblank = i915_disable_vblank;
4223 }
4224}
20afbda2 4225
fca52a55
DV
4226/**
4227 * intel_irq_install - enables the hardware interrupt
4228 * @dev_priv: i915 device instance
4229 *
4230 * This function enables the hardware interrupt handling, but leaves the hotplug
4231 * handling still disabled. It is called after intel_irq_init().
4232 *
4233 * In the driver load and resume code we need working interrupts in a few places
4234 * but don't want to deal with the hassle of concurrent probe and hotplug
4235 * workers. Hence the split into this two-stage approach.
4236 */
2aeb7d3a
DV
4237int intel_irq_install(struct drm_i915_private *dev_priv)
4238{
4239 /*
4240 * We enable some interrupt sources in our postinstall hooks, so mark
4241 * interrupts as enabled _before_ actually enabling them to avoid
4242 * special cases in our ordering checks.
4243 */
4244 dev_priv->pm.irqs_enabled = true;
4245
4246 return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
4247}
4248
fca52a55
DV
4249/**
4250 * intel_irq_uninstall - finilizes all irq handling
4251 * @dev_priv: i915 device instance
4252 *
4253 * This stops interrupt and hotplug handling and unregisters and frees all
4254 * resources acquired in the init functions.
4255 */
2aeb7d3a
DV
4256void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4257{
4258 drm_irq_uninstall(dev_priv->dev);
4259 intel_hpd_cancel_work(dev_priv);
4260 dev_priv->pm.irqs_enabled = false;
4261}
4262
fca52a55
DV
4263/**
4264 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4265 * @dev_priv: i915 device instance
4266 *
4267 * This function is used to disable interrupts at runtime, both in the runtime
4268 * pm and the system suspend/resume code.
4269 */
b963291c 4270void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
c67a470b 4271{
b963291c 4272 dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
2aeb7d3a 4273 dev_priv->pm.irqs_enabled = false;
2dd2a883 4274 synchronize_irq(dev_priv->dev->irq);
c67a470b
PZ
4275}
4276
fca52a55
DV
4277/**
4278 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4279 * @dev_priv: i915 device instance
4280 *
4281 * This function is used to enable interrupts at runtime, both in the runtime
4282 * pm and the system suspend/resume code.
4283 */
b963291c 4284void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
c67a470b 4285{
2aeb7d3a 4286 dev_priv->pm.irqs_enabled = true;
b963291c
DV
4287 dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
4288 dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
c67a470b 4289}
This page took 1.693305 seconds and 5 git commands to generate.