drm/i915: unify gen6/gen8 pm irq helpers
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_irq.c
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2 */
3 /*
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 */
28
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
33 #include <linux/circ_buf.h>
34 #include <drm/drmP.h>
35 #include <drm/i915_drm.h>
36 #include "i915_drv.h"
37 #include "i915_trace.h"
38 #include "intel_drv.h"
39
40 /**
41 * DOC: interrupt handling
42 *
43 * These functions provide the basic support for enabling and disabling the
44 * interrupt handling support. There's a lot more functionality in i915_irq.c
45 * and related files, but that will be described in separate chapters.
46 */
47
48 static const u32 hpd_ibx[] = {
49 [HPD_CRT] = SDE_CRT_HOTPLUG,
50 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
51 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
52 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
53 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
54 };
55
56 static const u32 hpd_cpt[] = {
57 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
58 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
59 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
60 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
61 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
62 };
63
64 static const u32 hpd_mask_i915[] = {
65 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
66 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
67 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
68 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
69 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
70 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
71 };
72
73 static const u32 hpd_status_g4x[] = {
74 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
75 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
76 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
77 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
78 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
79 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
80 };
81
82 static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
83 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
84 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
85 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
86 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
87 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
88 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
89 };
90
91 /* IIR can theoretically queue up two events. Be paranoid. */
92 #define GEN8_IRQ_RESET_NDX(type, which) do { \
93 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
94 POSTING_READ(GEN8_##type##_IMR(which)); \
95 I915_WRITE(GEN8_##type##_IER(which), 0); \
96 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
97 POSTING_READ(GEN8_##type##_IIR(which)); \
98 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
99 POSTING_READ(GEN8_##type##_IIR(which)); \
100 } while (0)
101
102 #define GEN5_IRQ_RESET(type) do { \
103 I915_WRITE(type##IMR, 0xffffffff); \
104 POSTING_READ(type##IMR); \
105 I915_WRITE(type##IER, 0); \
106 I915_WRITE(type##IIR, 0xffffffff); \
107 POSTING_READ(type##IIR); \
108 I915_WRITE(type##IIR, 0xffffffff); \
109 POSTING_READ(type##IIR); \
110 } while (0)
111
112 /*
113 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
114 */
115 #define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \
116 u32 val = I915_READ(reg); \
117 if (val) { \
118 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \
119 (reg), val); \
120 I915_WRITE((reg), 0xffffffff); \
121 POSTING_READ(reg); \
122 I915_WRITE((reg), 0xffffffff); \
123 POSTING_READ(reg); \
124 } \
125 } while (0)
126
127 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
128 GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \
129 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
130 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
131 POSTING_READ(GEN8_##type##_IMR(which)); \
132 } while (0)
133
134 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
135 GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \
136 I915_WRITE(type##IER, (ier_val)); \
137 I915_WRITE(type##IMR, (imr_val)); \
138 POSTING_READ(type##IMR); \
139 } while (0)
140
141 /* For display hotplug interrupt */
142 void
143 ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
144 {
145 assert_spin_locked(&dev_priv->irq_lock);
146
147 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
148 return;
149
150 if ((dev_priv->irq_mask & mask) != 0) {
151 dev_priv->irq_mask &= ~mask;
152 I915_WRITE(DEIMR, dev_priv->irq_mask);
153 POSTING_READ(DEIMR);
154 }
155 }
156
157 void
158 ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
159 {
160 assert_spin_locked(&dev_priv->irq_lock);
161
162 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
163 return;
164
165 if ((dev_priv->irq_mask & mask) != mask) {
166 dev_priv->irq_mask |= mask;
167 I915_WRITE(DEIMR, dev_priv->irq_mask);
168 POSTING_READ(DEIMR);
169 }
170 }
171
172 /**
173 * ilk_update_gt_irq - update GTIMR
174 * @dev_priv: driver private
175 * @interrupt_mask: mask of interrupt bits to update
176 * @enabled_irq_mask: mask of interrupt bits to enable
177 */
178 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
179 uint32_t interrupt_mask,
180 uint32_t enabled_irq_mask)
181 {
182 assert_spin_locked(&dev_priv->irq_lock);
183
184 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
185 return;
186
187 dev_priv->gt_irq_mask &= ~interrupt_mask;
188 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
189 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
190 POSTING_READ(GTIMR);
191 }
192
193 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
194 {
195 ilk_update_gt_irq(dev_priv, mask, mask);
196 }
197
198 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
199 {
200 ilk_update_gt_irq(dev_priv, mask, 0);
201 }
202
203 static u32 gen6_pm_imr(struct drm_i915_private *dev_priv)
204 {
205 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
206 }
207
208 /**
209 * snb_update_pm_irq - update GEN6_PMIMR
210 * @dev_priv: driver private
211 * @interrupt_mask: mask of interrupt bits to update
212 * @enabled_irq_mask: mask of interrupt bits to enable
213 */
214 static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
215 uint32_t interrupt_mask,
216 uint32_t enabled_irq_mask)
217 {
218 uint32_t new_val;
219
220 assert_spin_locked(&dev_priv->irq_lock);
221
222 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
223 return;
224
225 new_val = dev_priv->pm_irq_mask;
226 new_val &= ~interrupt_mask;
227 new_val |= (~enabled_irq_mask & interrupt_mask);
228
229 if (new_val != dev_priv->pm_irq_mask) {
230 dev_priv->pm_irq_mask = new_val;
231 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
232 POSTING_READ(gen6_pm_imr(dev_priv));
233 }
234 }
235
236 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
237 {
238 snb_update_pm_irq(dev_priv, mask, mask);
239 }
240
241 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
242 {
243 snb_update_pm_irq(dev_priv, mask, 0);
244 }
245
246 /**
247 * ibx_display_interrupt_update - update SDEIMR
248 * @dev_priv: driver private
249 * @interrupt_mask: mask of interrupt bits to update
250 * @enabled_irq_mask: mask of interrupt bits to enable
251 */
252 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
253 uint32_t interrupt_mask,
254 uint32_t enabled_irq_mask)
255 {
256 uint32_t sdeimr = I915_READ(SDEIMR);
257 sdeimr &= ~interrupt_mask;
258 sdeimr |= (~enabled_irq_mask & interrupt_mask);
259
260 assert_spin_locked(&dev_priv->irq_lock);
261
262 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
263 return;
264
265 I915_WRITE(SDEIMR, sdeimr);
266 POSTING_READ(SDEIMR);
267 }
268
269 static void
270 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
271 u32 enable_mask, u32 status_mask)
272 {
273 u32 reg = PIPESTAT(pipe);
274 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
275
276 assert_spin_locked(&dev_priv->irq_lock);
277 WARN_ON(!intel_irqs_enabled(dev_priv));
278
279 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
280 status_mask & ~PIPESTAT_INT_STATUS_MASK,
281 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
282 pipe_name(pipe), enable_mask, status_mask))
283 return;
284
285 if ((pipestat & enable_mask) == enable_mask)
286 return;
287
288 dev_priv->pipestat_irq_mask[pipe] |= status_mask;
289
290 /* Enable the interrupt, clear any pending status */
291 pipestat |= enable_mask | status_mask;
292 I915_WRITE(reg, pipestat);
293 POSTING_READ(reg);
294 }
295
296 static void
297 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
298 u32 enable_mask, u32 status_mask)
299 {
300 u32 reg = PIPESTAT(pipe);
301 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
302
303 assert_spin_locked(&dev_priv->irq_lock);
304 WARN_ON(!intel_irqs_enabled(dev_priv));
305
306 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
307 status_mask & ~PIPESTAT_INT_STATUS_MASK,
308 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
309 pipe_name(pipe), enable_mask, status_mask))
310 return;
311
312 if ((pipestat & enable_mask) == 0)
313 return;
314
315 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
316
317 pipestat &= ~enable_mask;
318 I915_WRITE(reg, pipestat);
319 POSTING_READ(reg);
320 }
321
322 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
323 {
324 u32 enable_mask = status_mask << 16;
325
326 /*
327 * On pipe A we don't support the PSR interrupt yet,
328 * on pipe B and C the same bit MBZ.
329 */
330 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
331 return 0;
332 /*
333 * On pipe B and C we don't support the PSR interrupt yet, on pipe
334 * A the same bit is for perf counters which we don't use either.
335 */
336 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
337 return 0;
338
339 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
340 SPRITE0_FLIP_DONE_INT_EN_VLV |
341 SPRITE1_FLIP_DONE_INT_EN_VLV);
342 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
343 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
344 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
345 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
346
347 return enable_mask;
348 }
349
350 void
351 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
352 u32 status_mask)
353 {
354 u32 enable_mask;
355
356 if (IS_VALLEYVIEW(dev_priv->dev))
357 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
358 status_mask);
359 else
360 enable_mask = status_mask << 16;
361 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
362 }
363
364 void
365 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
366 u32 status_mask)
367 {
368 u32 enable_mask;
369
370 if (IS_VALLEYVIEW(dev_priv->dev))
371 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
372 status_mask);
373 else
374 enable_mask = status_mask << 16;
375 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
376 }
377
378 /**
379 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
380 */
381 static void i915_enable_asle_pipestat(struct drm_device *dev)
382 {
383 struct drm_i915_private *dev_priv = dev->dev_private;
384
385 if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
386 return;
387
388 spin_lock_irq(&dev_priv->irq_lock);
389
390 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
391 if (INTEL_INFO(dev)->gen >= 4)
392 i915_enable_pipestat(dev_priv, PIPE_A,
393 PIPE_LEGACY_BLC_EVENT_STATUS);
394
395 spin_unlock_irq(&dev_priv->irq_lock);
396 }
397
398 /**
399 * i915_pipe_enabled - check if a pipe is enabled
400 * @dev: DRM device
401 * @pipe: pipe to check
402 *
403 * Reading certain registers when the pipe is disabled can hang the chip.
404 * Use this routine to make sure the PLL is running and the pipe is active
405 * before reading such registers if unsure.
406 */
407 static int
408 i915_pipe_enabled(struct drm_device *dev, int pipe)
409 {
410 struct drm_i915_private *dev_priv = dev->dev_private;
411
412 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
413 /* Locking is horribly broken here, but whatever. */
414 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
415 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
416
417 return intel_crtc->active;
418 } else {
419 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
420 }
421 }
422
423 /*
424 * This timing diagram depicts the video signal in and
425 * around the vertical blanking period.
426 *
427 * Assumptions about the fictitious mode used in this example:
428 * vblank_start >= 3
429 * vsync_start = vblank_start + 1
430 * vsync_end = vblank_start + 2
431 * vtotal = vblank_start + 3
432 *
433 * start of vblank:
434 * latch double buffered registers
435 * increment frame counter (ctg+)
436 * generate start of vblank interrupt (gen4+)
437 * |
438 * | frame start:
439 * | generate frame start interrupt (aka. vblank interrupt) (gmch)
440 * | may be shifted forward 1-3 extra lines via PIPECONF
441 * | |
442 * | | start of vsync:
443 * | | generate vsync interrupt
444 * | | |
445 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
446 * . \hs/ . \hs/ \hs/ \hs/ . \hs/
447 * ----va---> <-----------------vb--------------------> <--------va-------------
448 * | | <----vs-----> |
449 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
450 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
451 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
452 * | | |
453 * last visible pixel first visible pixel
454 * | increment frame counter (gen3/4)
455 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
456 *
457 * x = horizontal active
458 * _ = horizontal blanking
459 * hs = horizontal sync
460 * va = vertical active
461 * vb = vertical blanking
462 * vs = vertical sync
463 * vbs = vblank_start (number)
464 *
465 * Summary:
466 * - most events happen at the start of horizontal sync
467 * - frame start happens at the start of horizontal blank, 1-4 lines
468 * (depending on PIPECONF settings) after the start of vblank
469 * - gen3/4 pixel and frame counter are synchronized with the start
470 * of horizontal active on the first line of vertical active
471 */
472
473 static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
474 {
475 /* Gen2 doesn't have a hardware frame counter */
476 return 0;
477 }
478
479 /* Called from drm generic code, passed a 'crtc', which
480 * we use as a pipe index
481 */
482 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
483 {
484 struct drm_i915_private *dev_priv = dev->dev_private;
485 unsigned long high_frame;
486 unsigned long low_frame;
487 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
488
489 if (!i915_pipe_enabled(dev, pipe)) {
490 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
491 "pipe %c\n", pipe_name(pipe));
492 return 0;
493 }
494
495 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
496 struct intel_crtc *intel_crtc =
497 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
498 const struct drm_display_mode *mode =
499 &intel_crtc->config.adjusted_mode;
500
501 htotal = mode->crtc_htotal;
502 hsync_start = mode->crtc_hsync_start;
503 vbl_start = mode->crtc_vblank_start;
504 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
505 vbl_start = DIV_ROUND_UP(vbl_start, 2);
506 } else {
507 enum transcoder cpu_transcoder = (enum transcoder) pipe;
508
509 htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
510 hsync_start = (I915_READ(HSYNC(cpu_transcoder)) & 0x1fff) + 1;
511 vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
512 if ((I915_READ(PIPECONF(cpu_transcoder)) &
513 PIPECONF_INTERLACE_MASK) != PIPECONF_PROGRESSIVE)
514 vbl_start = DIV_ROUND_UP(vbl_start, 2);
515 }
516
517 /* Convert to pixel count */
518 vbl_start *= htotal;
519
520 /* Start of vblank event occurs at start of hsync */
521 vbl_start -= htotal - hsync_start;
522
523 high_frame = PIPEFRAME(pipe);
524 low_frame = PIPEFRAMEPIXEL(pipe);
525
526 /*
527 * High & low register fields aren't synchronized, so make sure
528 * we get a low value that's stable across two reads of the high
529 * register.
530 */
531 do {
532 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
533 low = I915_READ(low_frame);
534 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
535 } while (high1 != high2);
536
537 high1 >>= PIPE_FRAME_HIGH_SHIFT;
538 pixel = low & PIPE_PIXEL_MASK;
539 low >>= PIPE_FRAME_LOW_SHIFT;
540
541 /*
542 * The frame counter increments at beginning of active.
543 * Cook up a vblank counter by also checking the pixel
544 * counter against vblank start.
545 */
546 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
547 }
548
549 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
550 {
551 struct drm_i915_private *dev_priv = dev->dev_private;
552 int reg = PIPE_FRMCOUNT_GM45(pipe);
553
554 if (!i915_pipe_enabled(dev, pipe)) {
555 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
556 "pipe %c\n", pipe_name(pipe));
557 return 0;
558 }
559
560 return I915_READ(reg);
561 }
562
563 /* raw reads, only for fast reads of display block, no need for forcewake etc. */
564 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
565
566 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
567 {
568 struct drm_device *dev = crtc->base.dev;
569 struct drm_i915_private *dev_priv = dev->dev_private;
570 const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
571 enum pipe pipe = crtc->pipe;
572 int position, vtotal;
573
574 vtotal = mode->crtc_vtotal;
575 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
576 vtotal /= 2;
577
578 if (IS_GEN2(dev))
579 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
580 else
581 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
582
583 /*
584 * See update_scanline_offset() for the details on the
585 * scanline_offset adjustment.
586 */
587 return (position + crtc->scanline_offset) % vtotal;
588 }
589
590 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
591 unsigned int flags, int *vpos, int *hpos,
592 ktime_t *stime, ktime_t *etime)
593 {
594 struct drm_i915_private *dev_priv = dev->dev_private;
595 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
596 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
597 const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
598 int position;
599 int vbl_start, vbl_end, hsync_start, htotal, vtotal;
600 bool in_vbl = true;
601 int ret = 0;
602 unsigned long irqflags;
603
604 if (!intel_crtc->active) {
605 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
606 "pipe %c\n", pipe_name(pipe));
607 return 0;
608 }
609
610 htotal = mode->crtc_htotal;
611 hsync_start = mode->crtc_hsync_start;
612 vtotal = mode->crtc_vtotal;
613 vbl_start = mode->crtc_vblank_start;
614 vbl_end = mode->crtc_vblank_end;
615
616 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
617 vbl_start = DIV_ROUND_UP(vbl_start, 2);
618 vbl_end /= 2;
619 vtotal /= 2;
620 }
621
622 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
623
624 /*
625 * Lock uncore.lock, as we will do multiple timing critical raw
626 * register reads, potentially with preemption disabled, so the
627 * following code must not block on uncore.lock.
628 */
629 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
630
631 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
632
633 /* Get optional system timestamp before query. */
634 if (stime)
635 *stime = ktime_get();
636
637 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
638 /* No obvious pixelcount register. Only query vertical
639 * scanout position from Display scan line register.
640 */
641 position = __intel_get_crtc_scanline(intel_crtc);
642 } else {
643 /* Have access to pixelcount since start of frame.
644 * We can split this into vertical and horizontal
645 * scanout position.
646 */
647 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
648
649 /* convert to pixel counts */
650 vbl_start *= htotal;
651 vbl_end *= htotal;
652 vtotal *= htotal;
653
654 /*
655 * In interlaced modes, the pixel counter counts all pixels,
656 * so one field will have htotal more pixels. In order to avoid
657 * the reported position from jumping backwards when the pixel
658 * counter is beyond the length of the shorter field, just
659 * clamp the position the length of the shorter field. This
660 * matches how the scanline counter based position works since
661 * the scanline counter doesn't count the two half lines.
662 */
663 if (position >= vtotal)
664 position = vtotal - 1;
665
666 /*
667 * Start of vblank interrupt is triggered at start of hsync,
668 * just prior to the first active line of vblank. However we
669 * consider lines to start at the leading edge of horizontal
670 * active. So, should we get here before we've crossed into
671 * the horizontal active of the first line in vblank, we would
672 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
673 * always add htotal-hsync_start to the current pixel position.
674 */
675 position = (position + htotal - hsync_start) % vtotal;
676 }
677
678 /* Get optional system timestamp after query. */
679 if (etime)
680 *etime = ktime_get();
681
682 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
683
684 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
685
686 in_vbl = position >= vbl_start && position < vbl_end;
687
688 /*
689 * While in vblank, position will be negative
690 * counting up towards 0 at vbl_end. And outside
691 * vblank, position will be positive counting
692 * up since vbl_end.
693 */
694 if (position >= vbl_start)
695 position -= vbl_end;
696 else
697 position += vtotal - vbl_end;
698
699 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
700 *vpos = position;
701 *hpos = 0;
702 } else {
703 *vpos = position / htotal;
704 *hpos = position - (*vpos * htotal);
705 }
706
707 /* In vblank? */
708 if (in_vbl)
709 ret |= DRM_SCANOUTPOS_IN_VBLANK;
710
711 return ret;
712 }
713
714 int intel_get_crtc_scanline(struct intel_crtc *crtc)
715 {
716 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
717 unsigned long irqflags;
718 int position;
719
720 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
721 position = __intel_get_crtc_scanline(crtc);
722 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
723
724 return position;
725 }
726
727 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
728 int *max_error,
729 struct timeval *vblank_time,
730 unsigned flags)
731 {
732 struct drm_crtc *crtc;
733
734 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
735 DRM_ERROR("Invalid crtc %d\n", pipe);
736 return -EINVAL;
737 }
738
739 /* Get drm_crtc to timestamp: */
740 crtc = intel_get_crtc_for_pipe(dev, pipe);
741 if (crtc == NULL) {
742 DRM_ERROR("Invalid crtc %d\n", pipe);
743 return -EINVAL;
744 }
745
746 if (!crtc->enabled) {
747 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
748 return -EBUSY;
749 }
750
751 /* Helper routine in DRM core does all the work: */
752 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
753 vblank_time, flags,
754 crtc,
755 &to_intel_crtc(crtc)->config.adjusted_mode);
756 }
757
758 static bool intel_hpd_irq_event(struct drm_device *dev,
759 struct drm_connector *connector)
760 {
761 enum drm_connector_status old_status;
762
763 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
764 old_status = connector->status;
765
766 connector->status = connector->funcs->detect(connector, false);
767 if (old_status == connector->status)
768 return false;
769
770 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
771 connector->base.id,
772 connector->name,
773 drm_get_connector_status_name(old_status),
774 drm_get_connector_status_name(connector->status));
775
776 return true;
777 }
778
779 static void i915_digport_work_func(struct work_struct *work)
780 {
781 struct drm_i915_private *dev_priv =
782 container_of(work, struct drm_i915_private, dig_port_work);
783 u32 long_port_mask, short_port_mask;
784 struct intel_digital_port *intel_dig_port;
785 int i, ret;
786 u32 old_bits = 0;
787
788 spin_lock_irq(&dev_priv->irq_lock);
789 long_port_mask = dev_priv->long_hpd_port_mask;
790 dev_priv->long_hpd_port_mask = 0;
791 short_port_mask = dev_priv->short_hpd_port_mask;
792 dev_priv->short_hpd_port_mask = 0;
793 spin_unlock_irq(&dev_priv->irq_lock);
794
795 for (i = 0; i < I915_MAX_PORTS; i++) {
796 bool valid = false;
797 bool long_hpd = false;
798 intel_dig_port = dev_priv->hpd_irq_port[i];
799 if (!intel_dig_port || !intel_dig_port->hpd_pulse)
800 continue;
801
802 if (long_port_mask & (1 << i)) {
803 valid = true;
804 long_hpd = true;
805 } else if (short_port_mask & (1 << i))
806 valid = true;
807
808 if (valid) {
809 ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd);
810 if (ret == true) {
811 /* if we get true fallback to old school hpd */
812 old_bits |= (1 << intel_dig_port->base.hpd_pin);
813 }
814 }
815 }
816
817 if (old_bits) {
818 spin_lock_irq(&dev_priv->irq_lock);
819 dev_priv->hpd_event_bits |= old_bits;
820 spin_unlock_irq(&dev_priv->irq_lock);
821 schedule_work(&dev_priv->hotplug_work);
822 }
823 }
824
825 /*
826 * Handle hotplug events outside the interrupt handler proper.
827 */
828 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
829
830 static void i915_hotplug_work_func(struct work_struct *work)
831 {
832 struct drm_i915_private *dev_priv =
833 container_of(work, struct drm_i915_private, hotplug_work);
834 struct drm_device *dev = dev_priv->dev;
835 struct drm_mode_config *mode_config = &dev->mode_config;
836 struct intel_connector *intel_connector;
837 struct intel_encoder *intel_encoder;
838 struct drm_connector *connector;
839 bool hpd_disabled = false;
840 bool changed = false;
841 u32 hpd_event_bits;
842
843 mutex_lock(&mode_config->mutex);
844 DRM_DEBUG_KMS("running encoder hotplug functions\n");
845
846 spin_lock_irq(&dev_priv->irq_lock);
847
848 hpd_event_bits = dev_priv->hpd_event_bits;
849 dev_priv->hpd_event_bits = 0;
850 list_for_each_entry(connector, &mode_config->connector_list, head) {
851 intel_connector = to_intel_connector(connector);
852 if (!intel_connector->encoder)
853 continue;
854 intel_encoder = intel_connector->encoder;
855 if (intel_encoder->hpd_pin > HPD_NONE &&
856 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
857 connector->polled == DRM_CONNECTOR_POLL_HPD) {
858 DRM_INFO("HPD interrupt storm detected on connector %s: "
859 "switching from hotplug detection to polling\n",
860 connector->name);
861 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
862 connector->polled = DRM_CONNECTOR_POLL_CONNECT
863 | DRM_CONNECTOR_POLL_DISCONNECT;
864 hpd_disabled = true;
865 }
866 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
867 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
868 connector->name, intel_encoder->hpd_pin);
869 }
870 }
871 /* if there were no outputs to poll, poll was disabled,
872 * therefore make sure it's enabled when disabling HPD on
873 * some connectors */
874 if (hpd_disabled) {
875 drm_kms_helper_poll_enable(dev);
876 mod_delayed_work(system_wq, &dev_priv->hotplug_reenable_work,
877 msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
878 }
879
880 spin_unlock_irq(&dev_priv->irq_lock);
881
882 list_for_each_entry(connector, &mode_config->connector_list, head) {
883 intel_connector = to_intel_connector(connector);
884 if (!intel_connector->encoder)
885 continue;
886 intel_encoder = intel_connector->encoder;
887 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
888 if (intel_encoder->hot_plug)
889 intel_encoder->hot_plug(intel_encoder);
890 if (intel_hpd_irq_event(dev, connector))
891 changed = true;
892 }
893 }
894 mutex_unlock(&mode_config->mutex);
895
896 if (changed)
897 drm_kms_helper_hotplug_event(dev);
898 }
899
900 static void ironlake_rps_change_irq_handler(struct drm_device *dev)
901 {
902 struct drm_i915_private *dev_priv = dev->dev_private;
903 u32 busy_up, busy_down, max_avg, min_avg;
904 u8 new_delay;
905
906 spin_lock(&mchdev_lock);
907
908 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
909
910 new_delay = dev_priv->ips.cur_delay;
911
912 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
913 busy_up = I915_READ(RCPREVBSYTUPAVG);
914 busy_down = I915_READ(RCPREVBSYTDNAVG);
915 max_avg = I915_READ(RCBMAXAVG);
916 min_avg = I915_READ(RCBMINAVG);
917
918 /* Handle RCS change request from hw */
919 if (busy_up > max_avg) {
920 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
921 new_delay = dev_priv->ips.cur_delay - 1;
922 if (new_delay < dev_priv->ips.max_delay)
923 new_delay = dev_priv->ips.max_delay;
924 } else if (busy_down < min_avg) {
925 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
926 new_delay = dev_priv->ips.cur_delay + 1;
927 if (new_delay > dev_priv->ips.min_delay)
928 new_delay = dev_priv->ips.min_delay;
929 }
930
931 if (ironlake_set_drps(dev, new_delay))
932 dev_priv->ips.cur_delay = new_delay;
933
934 spin_unlock(&mchdev_lock);
935
936 return;
937 }
938
939 static void notify_ring(struct drm_device *dev,
940 struct intel_engine_cs *ring)
941 {
942 if (!intel_ring_initialized(ring))
943 return;
944
945 trace_i915_gem_request_complete(ring);
946
947 wake_up_all(&ring->irq_queue);
948 i915_queue_hangcheck(dev);
949 }
950
951 static u32 vlv_c0_residency(struct drm_i915_private *dev_priv,
952 struct intel_rps_ei *rps_ei)
953 {
954 u32 cz_ts, cz_freq_khz;
955 u32 render_count, media_count;
956 u32 elapsed_render, elapsed_media, elapsed_time;
957 u32 residency = 0;
958
959 cz_ts = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
960 cz_freq_khz = DIV_ROUND_CLOSEST(dev_priv->mem_freq * 1000, 4);
961
962 render_count = I915_READ(VLV_RENDER_C0_COUNT_REG);
963 media_count = I915_READ(VLV_MEDIA_C0_COUNT_REG);
964
965 if (rps_ei->cz_clock == 0) {
966 rps_ei->cz_clock = cz_ts;
967 rps_ei->render_c0 = render_count;
968 rps_ei->media_c0 = media_count;
969
970 return dev_priv->rps.cur_freq;
971 }
972
973 elapsed_time = cz_ts - rps_ei->cz_clock;
974 rps_ei->cz_clock = cz_ts;
975
976 elapsed_render = render_count - rps_ei->render_c0;
977 rps_ei->render_c0 = render_count;
978
979 elapsed_media = media_count - rps_ei->media_c0;
980 rps_ei->media_c0 = media_count;
981
982 /* Convert all the counters into common unit of milli sec */
983 elapsed_time /= VLV_CZ_CLOCK_TO_MILLI_SEC;
984 elapsed_render /= cz_freq_khz;
985 elapsed_media /= cz_freq_khz;
986
987 /*
988 * Calculate overall C0 residency percentage
989 * only if elapsed time is non zero
990 */
991 if (elapsed_time) {
992 residency =
993 ((max(elapsed_render, elapsed_media) * 100)
994 / elapsed_time);
995 }
996
997 return residency;
998 }
999
1000 /**
1001 * vlv_calc_delay_from_C0_counters - Increase/Decrease freq based on GPU
1002 * busy-ness calculated from C0 counters of render & media power wells
1003 * @dev_priv: DRM device private
1004 *
1005 */
1006 static int vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv)
1007 {
1008 u32 residency_C0_up = 0, residency_C0_down = 0;
1009 int new_delay, adj;
1010
1011 dev_priv->rps.ei_interrupt_count++;
1012
1013 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
1014
1015
1016 if (dev_priv->rps.up_ei.cz_clock == 0) {
1017 vlv_c0_residency(dev_priv, &dev_priv->rps.up_ei);
1018 vlv_c0_residency(dev_priv, &dev_priv->rps.down_ei);
1019 return dev_priv->rps.cur_freq;
1020 }
1021
1022
1023 /*
1024 * To down throttle, C0 residency should be less than down threshold
1025 * for continous EI intervals. So calculate down EI counters
1026 * once in VLV_INT_COUNT_FOR_DOWN_EI
1027 */
1028 if (dev_priv->rps.ei_interrupt_count == VLV_INT_COUNT_FOR_DOWN_EI) {
1029
1030 dev_priv->rps.ei_interrupt_count = 0;
1031
1032 residency_C0_down = vlv_c0_residency(dev_priv,
1033 &dev_priv->rps.down_ei);
1034 } else {
1035 residency_C0_up = vlv_c0_residency(dev_priv,
1036 &dev_priv->rps.up_ei);
1037 }
1038
1039 new_delay = dev_priv->rps.cur_freq;
1040
1041 adj = dev_priv->rps.last_adj;
1042 /* C0 residency is greater than UP threshold. Increase Frequency */
1043 if (residency_C0_up >= VLV_RP_UP_EI_THRESHOLD) {
1044 if (adj > 0)
1045 adj *= 2;
1046 else
1047 adj = 1;
1048
1049 if (dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit)
1050 new_delay = dev_priv->rps.cur_freq + adj;
1051
1052 /*
1053 * For better performance, jump directly
1054 * to RPe if we're below it.
1055 */
1056 if (new_delay < dev_priv->rps.efficient_freq)
1057 new_delay = dev_priv->rps.efficient_freq;
1058
1059 } else if (!dev_priv->rps.ei_interrupt_count &&
1060 (residency_C0_down < VLV_RP_DOWN_EI_THRESHOLD)) {
1061 if (adj < 0)
1062 adj *= 2;
1063 else
1064 adj = -1;
1065 /*
1066 * This means, C0 residency is less than down threshold over
1067 * a period of VLV_INT_COUNT_FOR_DOWN_EI. So, reduce the freq
1068 */
1069 if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit)
1070 new_delay = dev_priv->rps.cur_freq + adj;
1071 }
1072
1073 return new_delay;
1074 }
1075
1076 static void gen6_pm_rps_work(struct work_struct *work)
1077 {
1078 struct drm_i915_private *dev_priv =
1079 container_of(work, struct drm_i915_private, rps.work);
1080 u32 pm_iir;
1081 int new_delay, adj;
1082
1083 spin_lock_irq(&dev_priv->irq_lock);
1084 pm_iir = dev_priv->rps.pm_iir;
1085 dev_priv->rps.pm_iir = 0;
1086 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
1087 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1088 spin_unlock_irq(&dev_priv->irq_lock);
1089
1090 /* Make sure we didn't queue anything we're not going to process. */
1091 WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1092
1093 if ((pm_iir & dev_priv->pm_rps_events) == 0)
1094 return;
1095
1096 mutex_lock(&dev_priv->rps.hw_lock);
1097
1098 adj = dev_priv->rps.last_adj;
1099 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1100 if (adj > 0)
1101 adj *= 2;
1102 else {
1103 /* CHV needs even encode values */
1104 adj = IS_CHERRYVIEW(dev_priv->dev) ? 2 : 1;
1105 }
1106 new_delay = dev_priv->rps.cur_freq + adj;
1107
1108 /*
1109 * For better performance, jump directly
1110 * to RPe if we're below it.
1111 */
1112 if (new_delay < dev_priv->rps.efficient_freq)
1113 new_delay = dev_priv->rps.efficient_freq;
1114 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1115 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1116 new_delay = dev_priv->rps.efficient_freq;
1117 else
1118 new_delay = dev_priv->rps.min_freq_softlimit;
1119 adj = 0;
1120 } else if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
1121 new_delay = vlv_calc_delay_from_C0_counters(dev_priv);
1122 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1123 if (adj < 0)
1124 adj *= 2;
1125 else {
1126 /* CHV needs even encode values */
1127 adj = IS_CHERRYVIEW(dev_priv->dev) ? -2 : -1;
1128 }
1129 new_delay = dev_priv->rps.cur_freq + adj;
1130 } else { /* unknown event */
1131 new_delay = dev_priv->rps.cur_freq;
1132 }
1133
1134 /* sysfs frequency interfaces may have snuck in while servicing the
1135 * interrupt
1136 */
1137 new_delay = clamp_t(int, new_delay,
1138 dev_priv->rps.min_freq_softlimit,
1139 dev_priv->rps.max_freq_softlimit);
1140
1141 dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq;
1142
1143 if (IS_VALLEYVIEW(dev_priv->dev))
1144 valleyview_set_rps(dev_priv->dev, new_delay);
1145 else
1146 gen6_set_rps(dev_priv->dev, new_delay);
1147
1148 mutex_unlock(&dev_priv->rps.hw_lock);
1149 }
1150
1151
1152 /**
1153 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1154 * occurred.
1155 * @work: workqueue struct
1156 *
1157 * Doesn't actually do anything except notify userspace. As a consequence of
1158 * this event, userspace should try to remap the bad rows since statistically
1159 * it is likely the same row is more likely to go bad again.
1160 */
1161 static void ivybridge_parity_work(struct work_struct *work)
1162 {
1163 struct drm_i915_private *dev_priv =
1164 container_of(work, struct drm_i915_private, l3_parity.error_work);
1165 u32 error_status, row, bank, subbank;
1166 char *parity_event[6];
1167 uint32_t misccpctl;
1168 uint8_t slice = 0;
1169
1170 /* We must turn off DOP level clock gating to access the L3 registers.
1171 * In order to prevent a get/put style interface, acquire struct mutex
1172 * any time we access those registers.
1173 */
1174 mutex_lock(&dev_priv->dev->struct_mutex);
1175
1176 /* If we've screwed up tracking, just let the interrupt fire again */
1177 if (WARN_ON(!dev_priv->l3_parity.which_slice))
1178 goto out;
1179
1180 misccpctl = I915_READ(GEN7_MISCCPCTL);
1181 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1182 POSTING_READ(GEN7_MISCCPCTL);
1183
1184 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1185 u32 reg;
1186
1187 slice--;
1188 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
1189 break;
1190
1191 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1192
1193 reg = GEN7_L3CDERRST1 + (slice * 0x200);
1194
1195 error_status = I915_READ(reg);
1196 row = GEN7_PARITY_ERROR_ROW(error_status);
1197 bank = GEN7_PARITY_ERROR_BANK(error_status);
1198 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1199
1200 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1201 POSTING_READ(reg);
1202
1203 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1204 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1205 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1206 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1207 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1208 parity_event[5] = NULL;
1209
1210 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
1211 KOBJ_CHANGE, parity_event);
1212
1213 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1214 slice, row, bank, subbank);
1215
1216 kfree(parity_event[4]);
1217 kfree(parity_event[3]);
1218 kfree(parity_event[2]);
1219 kfree(parity_event[1]);
1220 }
1221
1222 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1223
1224 out:
1225 WARN_ON(dev_priv->l3_parity.which_slice);
1226 spin_lock_irq(&dev_priv->irq_lock);
1227 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
1228 spin_unlock_irq(&dev_priv->irq_lock);
1229
1230 mutex_unlock(&dev_priv->dev->struct_mutex);
1231 }
1232
1233 static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
1234 {
1235 struct drm_i915_private *dev_priv = dev->dev_private;
1236
1237 if (!HAS_L3_DPF(dev))
1238 return;
1239
1240 spin_lock(&dev_priv->irq_lock);
1241 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
1242 spin_unlock(&dev_priv->irq_lock);
1243
1244 iir &= GT_PARITY_ERROR(dev);
1245 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1246 dev_priv->l3_parity.which_slice |= 1 << 1;
1247
1248 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1249 dev_priv->l3_parity.which_slice |= 1 << 0;
1250
1251 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1252 }
1253
1254 static void ilk_gt_irq_handler(struct drm_device *dev,
1255 struct drm_i915_private *dev_priv,
1256 u32 gt_iir)
1257 {
1258 if (gt_iir &
1259 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1260 notify_ring(dev, &dev_priv->ring[RCS]);
1261 if (gt_iir & ILK_BSD_USER_INTERRUPT)
1262 notify_ring(dev, &dev_priv->ring[VCS]);
1263 }
1264
1265 static void snb_gt_irq_handler(struct drm_device *dev,
1266 struct drm_i915_private *dev_priv,
1267 u32 gt_iir)
1268 {
1269
1270 if (gt_iir &
1271 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1272 notify_ring(dev, &dev_priv->ring[RCS]);
1273 if (gt_iir & GT_BSD_USER_INTERRUPT)
1274 notify_ring(dev, &dev_priv->ring[VCS]);
1275 if (gt_iir & GT_BLT_USER_INTERRUPT)
1276 notify_ring(dev, &dev_priv->ring[BCS]);
1277
1278 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1279 GT_BSD_CS_ERROR_INTERRUPT |
1280 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
1281 i915_handle_error(dev, false, "GT error interrupt 0x%08x",
1282 gt_iir);
1283 }
1284
1285 if (gt_iir & GT_PARITY_ERROR(dev))
1286 ivybridge_parity_error_irq_handler(dev, gt_iir);
1287 }
1288
1289 static void gen8_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1290 {
1291 if ((pm_iir & dev_priv->pm_rps_events) == 0)
1292 return;
1293
1294 spin_lock(&dev_priv->irq_lock);
1295 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1296 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1297 spin_unlock(&dev_priv->irq_lock);
1298
1299 queue_work(dev_priv->wq, &dev_priv->rps.work);
1300 }
1301
1302 static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1303 struct drm_i915_private *dev_priv,
1304 u32 master_ctl)
1305 {
1306 struct intel_engine_cs *ring;
1307 u32 rcs, bcs, vcs;
1308 uint32_t tmp = 0;
1309 irqreturn_t ret = IRQ_NONE;
1310
1311 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1312 tmp = I915_READ(GEN8_GT_IIR(0));
1313 if (tmp) {
1314 I915_WRITE(GEN8_GT_IIR(0), tmp);
1315 ret = IRQ_HANDLED;
1316
1317 rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
1318 ring = &dev_priv->ring[RCS];
1319 if (rcs & GT_RENDER_USER_INTERRUPT)
1320 notify_ring(dev, ring);
1321 if (rcs & GT_CONTEXT_SWITCH_INTERRUPT)
1322 intel_execlists_handle_ctx_events(ring);
1323
1324 bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
1325 ring = &dev_priv->ring[BCS];
1326 if (bcs & GT_RENDER_USER_INTERRUPT)
1327 notify_ring(dev, ring);
1328 if (bcs & GT_CONTEXT_SWITCH_INTERRUPT)
1329 intel_execlists_handle_ctx_events(ring);
1330 } else
1331 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1332 }
1333
1334 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1335 tmp = I915_READ(GEN8_GT_IIR(1));
1336 if (tmp) {
1337 I915_WRITE(GEN8_GT_IIR(1), tmp);
1338 ret = IRQ_HANDLED;
1339
1340 vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
1341 ring = &dev_priv->ring[VCS];
1342 if (vcs & GT_RENDER_USER_INTERRUPT)
1343 notify_ring(dev, ring);
1344 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1345 intel_execlists_handle_ctx_events(ring);
1346
1347 vcs = tmp >> GEN8_VCS2_IRQ_SHIFT;
1348 ring = &dev_priv->ring[VCS2];
1349 if (vcs & GT_RENDER_USER_INTERRUPT)
1350 notify_ring(dev, ring);
1351 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1352 intel_execlists_handle_ctx_events(ring);
1353 } else
1354 DRM_ERROR("The master control interrupt lied (GT1)!\n");
1355 }
1356
1357 if (master_ctl & GEN8_GT_PM_IRQ) {
1358 tmp = I915_READ(GEN8_GT_IIR(2));
1359 if (tmp & dev_priv->pm_rps_events) {
1360 I915_WRITE(GEN8_GT_IIR(2),
1361 tmp & dev_priv->pm_rps_events);
1362 ret = IRQ_HANDLED;
1363 gen8_rps_irq_handler(dev_priv, tmp);
1364 } else
1365 DRM_ERROR("The master control interrupt lied (PM)!\n");
1366 }
1367
1368 if (master_ctl & GEN8_GT_VECS_IRQ) {
1369 tmp = I915_READ(GEN8_GT_IIR(3));
1370 if (tmp) {
1371 I915_WRITE(GEN8_GT_IIR(3), tmp);
1372 ret = IRQ_HANDLED;
1373
1374 vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
1375 ring = &dev_priv->ring[VECS];
1376 if (vcs & GT_RENDER_USER_INTERRUPT)
1377 notify_ring(dev, ring);
1378 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1379 intel_execlists_handle_ctx_events(ring);
1380 } else
1381 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1382 }
1383
1384 return ret;
1385 }
1386
1387 #define HPD_STORM_DETECT_PERIOD 1000
1388 #define HPD_STORM_THRESHOLD 5
1389
1390 static int pch_port_to_hotplug_shift(enum port port)
1391 {
1392 switch (port) {
1393 case PORT_A:
1394 case PORT_E:
1395 default:
1396 return -1;
1397 case PORT_B:
1398 return 0;
1399 case PORT_C:
1400 return 8;
1401 case PORT_D:
1402 return 16;
1403 }
1404 }
1405
1406 static int i915_port_to_hotplug_shift(enum port port)
1407 {
1408 switch (port) {
1409 case PORT_A:
1410 case PORT_E:
1411 default:
1412 return -1;
1413 case PORT_B:
1414 return 17;
1415 case PORT_C:
1416 return 19;
1417 case PORT_D:
1418 return 21;
1419 }
1420 }
1421
1422 static inline enum port get_port_from_pin(enum hpd_pin pin)
1423 {
1424 switch (pin) {
1425 case HPD_PORT_B:
1426 return PORT_B;
1427 case HPD_PORT_C:
1428 return PORT_C;
1429 case HPD_PORT_D:
1430 return PORT_D;
1431 default:
1432 return PORT_A; /* no hpd */
1433 }
1434 }
1435
1436 static inline void intel_hpd_irq_handler(struct drm_device *dev,
1437 u32 hotplug_trigger,
1438 u32 dig_hotplug_reg,
1439 const u32 *hpd)
1440 {
1441 struct drm_i915_private *dev_priv = dev->dev_private;
1442 int i;
1443 enum port port;
1444 bool storm_detected = false;
1445 bool queue_dig = false, queue_hp = false;
1446 u32 dig_shift;
1447 u32 dig_port_mask = 0;
1448
1449 if (!hotplug_trigger)
1450 return;
1451
1452 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n",
1453 hotplug_trigger, dig_hotplug_reg);
1454
1455 spin_lock(&dev_priv->irq_lock);
1456 for (i = 1; i < HPD_NUM_PINS; i++) {
1457 if (!(hpd[i] & hotplug_trigger))
1458 continue;
1459
1460 port = get_port_from_pin(i);
1461 if (port && dev_priv->hpd_irq_port[port]) {
1462 bool long_hpd;
1463
1464 if (HAS_PCH_SPLIT(dev)) {
1465 dig_shift = pch_port_to_hotplug_shift(port);
1466 long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
1467 } else {
1468 dig_shift = i915_port_to_hotplug_shift(port);
1469 long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
1470 }
1471
1472 DRM_DEBUG_DRIVER("digital hpd port %c - %s\n",
1473 port_name(port),
1474 long_hpd ? "long" : "short");
1475 /* for long HPD pulses we want to have the digital queue happen,
1476 but we still want HPD storm detection to function. */
1477 if (long_hpd) {
1478 dev_priv->long_hpd_port_mask |= (1 << port);
1479 dig_port_mask |= hpd[i];
1480 } else {
1481 /* for short HPD just trigger the digital queue */
1482 dev_priv->short_hpd_port_mask |= (1 << port);
1483 hotplug_trigger &= ~hpd[i];
1484 }
1485 queue_dig = true;
1486 }
1487 }
1488
1489 for (i = 1; i < HPD_NUM_PINS; i++) {
1490 if (hpd[i] & hotplug_trigger &&
1491 dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) {
1492 /*
1493 * On GMCH platforms the interrupt mask bits only
1494 * prevent irq generation, not the setting of the
1495 * hotplug bits itself. So only WARN about unexpected
1496 * interrupts on saner platforms.
1497 */
1498 WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev),
1499 "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
1500 hotplug_trigger, i, hpd[i]);
1501
1502 continue;
1503 }
1504
1505 if (!(hpd[i] & hotplug_trigger) ||
1506 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
1507 continue;
1508
1509 if (!(dig_port_mask & hpd[i])) {
1510 dev_priv->hpd_event_bits |= (1 << i);
1511 queue_hp = true;
1512 }
1513
1514 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
1515 dev_priv->hpd_stats[i].hpd_last_jiffies
1516 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
1517 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
1518 dev_priv->hpd_stats[i].hpd_cnt = 0;
1519 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
1520 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
1521 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
1522 dev_priv->hpd_event_bits &= ~(1 << i);
1523 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
1524 storm_detected = true;
1525 } else {
1526 dev_priv->hpd_stats[i].hpd_cnt++;
1527 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
1528 dev_priv->hpd_stats[i].hpd_cnt);
1529 }
1530 }
1531
1532 if (storm_detected)
1533 dev_priv->display.hpd_irq_setup(dev);
1534 spin_unlock(&dev_priv->irq_lock);
1535
1536 /*
1537 * Our hotplug handler can grab modeset locks (by calling down into the
1538 * fb helpers). Hence it must not be run on our own dev-priv->wq work
1539 * queue for otherwise the flush_work in the pageflip code will
1540 * deadlock.
1541 */
1542 if (queue_dig)
1543 queue_work(dev_priv->dp_wq, &dev_priv->dig_port_work);
1544 if (queue_hp)
1545 schedule_work(&dev_priv->hotplug_work);
1546 }
1547
1548 static void gmbus_irq_handler(struct drm_device *dev)
1549 {
1550 struct drm_i915_private *dev_priv = dev->dev_private;
1551
1552 wake_up_all(&dev_priv->gmbus_wait_queue);
1553 }
1554
1555 static void dp_aux_irq_handler(struct drm_device *dev)
1556 {
1557 struct drm_i915_private *dev_priv = dev->dev_private;
1558
1559 wake_up_all(&dev_priv->gmbus_wait_queue);
1560 }
1561
1562 #if defined(CONFIG_DEBUG_FS)
1563 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1564 uint32_t crc0, uint32_t crc1,
1565 uint32_t crc2, uint32_t crc3,
1566 uint32_t crc4)
1567 {
1568 struct drm_i915_private *dev_priv = dev->dev_private;
1569 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1570 struct intel_pipe_crc_entry *entry;
1571 int head, tail;
1572
1573 spin_lock(&pipe_crc->lock);
1574
1575 if (!pipe_crc->entries) {
1576 spin_unlock(&pipe_crc->lock);
1577 DRM_ERROR("spurious interrupt\n");
1578 return;
1579 }
1580
1581 head = pipe_crc->head;
1582 tail = pipe_crc->tail;
1583
1584 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1585 spin_unlock(&pipe_crc->lock);
1586 DRM_ERROR("CRC buffer overflowing\n");
1587 return;
1588 }
1589
1590 entry = &pipe_crc->entries[head];
1591
1592 entry->frame = dev->driver->get_vblank_counter(dev, pipe);
1593 entry->crc[0] = crc0;
1594 entry->crc[1] = crc1;
1595 entry->crc[2] = crc2;
1596 entry->crc[3] = crc3;
1597 entry->crc[4] = crc4;
1598
1599 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1600 pipe_crc->head = head;
1601
1602 spin_unlock(&pipe_crc->lock);
1603
1604 wake_up_interruptible(&pipe_crc->wq);
1605 }
1606 #else
1607 static inline void
1608 display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1609 uint32_t crc0, uint32_t crc1,
1610 uint32_t crc2, uint32_t crc3,
1611 uint32_t crc4) {}
1612 #endif
1613
1614
1615 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1616 {
1617 struct drm_i915_private *dev_priv = dev->dev_private;
1618
1619 display_pipe_crc_irq_handler(dev, pipe,
1620 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1621 0, 0, 0, 0);
1622 }
1623
1624 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1625 {
1626 struct drm_i915_private *dev_priv = dev->dev_private;
1627
1628 display_pipe_crc_irq_handler(dev, pipe,
1629 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1630 I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1631 I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1632 I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1633 I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1634 }
1635
1636 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1637 {
1638 struct drm_i915_private *dev_priv = dev->dev_private;
1639 uint32_t res1, res2;
1640
1641 if (INTEL_INFO(dev)->gen >= 3)
1642 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1643 else
1644 res1 = 0;
1645
1646 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1647 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1648 else
1649 res2 = 0;
1650
1651 display_pipe_crc_irq_handler(dev, pipe,
1652 I915_READ(PIPE_CRC_RES_RED(pipe)),
1653 I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1654 I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1655 res1, res2);
1656 }
1657
1658 /* The RPS events need forcewake, so we add them to a work queue and mask their
1659 * IMR bits until the work is done. Other interrupts can be processed without
1660 * the work queue. */
1661 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1662 {
1663 if (pm_iir & dev_priv->pm_rps_events) {
1664 spin_lock(&dev_priv->irq_lock);
1665 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1666 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1667 spin_unlock(&dev_priv->irq_lock);
1668
1669 queue_work(dev_priv->wq, &dev_priv->rps.work);
1670 }
1671
1672 if (HAS_VEBOX(dev_priv->dev)) {
1673 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1674 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
1675
1676 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
1677 i915_handle_error(dev_priv->dev, false,
1678 "VEBOX CS error interrupt 0x%08x",
1679 pm_iir);
1680 }
1681 }
1682 }
1683
1684 static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
1685 {
1686 if (!drm_handle_vblank(dev, pipe))
1687 return false;
1688
1689 return true;
1690 }
1691
1692 static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
1693 {
1694 struct drm_i915_private *dev_priv = dev->dev_private;
1695 u32 pipe_stats[I915_MAX_PIPES] = { };
1696 int pipe;
1697
1698 spin_lock(&dev_priv->irq_lock);
1699 for_each_pipe(dev_priv, pipe) {
1700 int reg;
1701 u32 mask, iir_bit = 0;
1702
1703 /*
1704 * PIPESTAT bits get signalled even when the interrupt is
1705 * disabled with the mask bits, and some of the status bits do
1706 * not generate interrupts at all (like the underrun bit). Hence
1707 * we need to be careful that we only handle what we want to
1708 * handle.
1709 */
1710
1711 /* fifo underruns are filterered in the underrun handler. */
1712 mask = PIPE_FIFO_UNDERRUN_STATUS;
1713
1714 switch (pipe) {
1715 case PIPE_A:
1716 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1717 break;
1718 case PIPE_B:
1719 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1720 break;
1721 case PIPE_C:
1722 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1723 break;
1724 }
1725 if (iir & iir_bit)
1726 mask |= dev_priv->pipestat_irq_mask[pipe];
1727
1728 if (!mask)
1729 continue;
1730
1731 reg = PIPESTAT(pipe);
1732 mask |= PIPESTAT_INT_ENABLE_MASK;
1733 pipe_stats[pipe] = I915_READ(reg) & mask;
1734
1735 /*
1736 * Clear the PIPE*STAT regs before the IIR
1737 */
1738 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
1739 PIPESTAT_INT_STATUS_MASK))
1740 I915_WRITE(reg, pipe_stats[pipe]);
1741 }
1742 spin_unlock(&dev_priv->irq_lock);
1743
1744 for_each_pipe(dev_priv, pipe) {
1745 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
1746 intel_pipe_handle_vblank(dev, pipe))
1747 intel_check_page_flip(dev, pipe);
1748
1749 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
1750 intel_prepare_page_flip(dev, pipe);
1751 intel_finish_page_flip(dev, pipe);
1752 }
1753
1754 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1755 i9xx_pipe_crc_irq_handler(dev, pipe);
1756
1757 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1758 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1759 }
1760
1761 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1762 gmbus_irq_handler(dev);
1763 }
1764
1765 static void i9xx_hpd_irq_handler(struct drm_device *dev)
1766 {
1767 struct drm_i915_private *dev_priv = dev->dev_private;
1768 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1769
1770 if (hotplug_status) {
1771 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1772 /*
1773 * Make sure hotplug status is cleared before we clear IIR, or else we
1774 * may miss hotplug events.
1775 */
1776 POSTING_READ(PORT_HOTPLUG_STAT);
1777
1778 if (IS_G4X(dev)) {
1779 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1780
1781 intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x);
1782 } else {
1783 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1784
1785 intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_i915);
1786 }
1787
1788 if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) &&
1789 hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1790 dp_aux_irq_handler(dev);
1791 }
1792 }
1793
1794 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1795 {
1796 struct drm_device *dev = arg;
1797 struct drm_i915_private *dev_priv = dev->dev_private;
1798 u32 iir, gt_iir, pm_iir;
1799 irqreturn_t ret = IRQ_NONE;
1800
1801 while (true) {
1802 /* Find, clear, then process each source of interrupt */
1803
1804 gt_iir = I915_READ(GTIIR);
1805 if (gt_iir)
1806 I915_WRITE(GTIIR, gt_iir);
1807
1808 pm_iir = I915_READ(GEN6_PMIIR);
1809 if (pm_iir)
1810 I915_WRITE(GEN6_PMIIR, pm_iir);
1811
1812 iir = I915_READ(VLV_IIR);
1813 if (iir) {
1814 /* Consume port before clearing IIR or we'll miss events */
1815 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1816 i9xx_hpd_irq_handler(dev);
1817 I915_WRITE(VLV_IIR, iir);
1818 }
1819
1820 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1821 goto out;
1822
1823 ret = IRQ_HANDLED;
1824
1825 if (gt_iir)
1826 snb_gt_irq_handler(dev, dev_priv, gt_iir);
1827 if (pm_iir)
1828 gen6_rps_irq_handler(dev_priv, pm_iir);
1829 /* Call regardless, as some status bits might not be
1830 * signalled in iir */
1831 valleyview_pipestat_irq_handler(dev, iir);
1832 }
1833
1834 out:
1835 return ret;
1836 }
1837
1838 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1839 {
1840 struct drm_device *dev = arg;
1841 struct drm_i915_private *dev_priv = dev->dev_private;
1842 u32 master_ctl, iir;
1843 irqreturn_t ret = IRQ_NONE;
1844
1845 for (;;) {
1846 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1847 iir = I915_READ(VLV_IIR);
1848
1849 if (master_ctl == 0 && iir == 0)
1850 break;
1851
1852 ret = IRQ_HANDLED;
1853
1854 I915_WRITE(GEN8_MASTER_IRQ, 0);
1855
1856 /* Find, clear, then process each source of interrupt */
1857
1858 if (iir) {
1859 /* Consume port before clearing IIR or we'll miss events */
1860 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1861 i9xx_hpd_irq_handler(dev);
1862 I915_WRITE(VLV_IIR, iir);
1863 }
1864
1865 gen8_gt_irq_handler(dev, dev_priv, master_ctl);
1866
1867 /* Call regardless, as some status bits might not be
1868 * signalled in iir */
1869 valleyview_pipestat_irq_handler(dev, iir);
1870
1871 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
1872 POSTING_READ(GEN8_MASTER_IRQ);
1873 }
1874
1875 return ret;
1876 }
1877
1878 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1879 {
1880 struct drm_i915_private *dev_priv = dev->dev_private;
1881 int pipe;
1882 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1883 u32 dig_hotplug_reg;
1884
1885 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1886 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1887
1888 intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_ibx);
1889
1890 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1891 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1892 SDE_AUDIO_POWER_SHIFT);
1893 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1894 port_name(port));
1895 }
1896
1897 if (pch_iir & SDE_AUX_MASK)
1898 dp_aux_irq_handler(dev);
1899
1900 if (pch_iir & SDE_GMBUS)
1901 gmbus_irq_handler(dev);
1902
1903 if (pch_iir & SDE_AUDIO_HDCP_MASK)
1904 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1905
1906 if (pch_iir & SDE_AUDIO_TRANS_MASK)
1907 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1908
1909 if (pch_iir & SDE_POISON)
1910 DRM_ERROR("PCH poison interrupt\n");
1911
1912 if (pch_iir & SDE_FDI_MASK)
1913 for_each_pipe(dev_priv, pipe)
1914 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1915 pipe_name(pipe),
1916 I915_READ(FDI_RX_IIR(pipe)));
1917
1918 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1919 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1920
1921 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1922 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1923
1924 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1925 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
1926
1927 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1928 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
1929 }
1930
1931 static void ivb_err_int_handler(struct drm_device *dev)
1932 {
1933 struct drm_i915_private *dev_priv = dev->dev_private;
1934 u32 err_int = I915_READ(GEN7_ERR_INT);
1935 enum pipe pipe;
1936
1937 if (err_int & ERR_INT_POISON)
1938 DRM_ERROR("Poison interrupt\n");
1939
1940 for_each_pipe(dev_priv, pipe) {
1941 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1942 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1943
1944 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1945 if (IS_IVYBRIDGE(dev))
1946 ivb_pipe_crc_irq_handler(dev, pipe);
1947 else
1948 hsw_pipe_crc_irq_handler(dev, pipe);
1949 }
1950 }
1951
1952 I915_WRITE(GEN7_ERR_INT, err_int);
1953 }
1954
1955 static void cpt_serr_int_handler(struct drm_device *dev)
1956 {
1957 struct drm_i915_private *dev_priv = dev->dev_private;
1958 u32 serr_int = I915_READ(SERR_INT);
1959
1960 if (serr_int & SERR_INT_POISON)
1961 DRM_ERROR("PCH poison interrupt\n");
1962
1963 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
1964 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
1965
1966 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
1967 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
1968
1969 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
1970 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
1971
1972 I915_WRITE(SERR_INT, serr_int);
1973 }
1974
1975 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1976 {
1977 struct drm_i915_private *dev_priv = dev->dev_private;
1978 int pipe;
1979 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1980 u32 dig_hotplug_reg;
1981
1982 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1983 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1984
1985 intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_cpt);
1986
1987 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1988 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1989 SDE_AUDIO_POWER_SHIFT_CPT);
1990 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1991 port_name(port));
1992 }
1993
1994 if (pch_iir & SDE_AUX_MASK_CPT)
1995 dp_aux_irq_handler(dev);
1996
1997 if (pch_iir & SDE_GMBUS_CPT)
1998 gmbus_irq_handler(dev);
1999
2000 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
2001 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
2002
2003 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
2004 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
2005
2006 if (pch_iir & SDE_FDI_MASK_CPT)
2007 for_each_pipe(dev_priv, pipe)
2008 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2009 pipe_name(pipe),
2010 I915_READ(FDI_RX_IIR(pipe)));
2011
2012 if (pch_iir & SDE_ERROR_CPT)
2013 cpt_serr_int_handler(dev);
2014 }
2015
2016 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
2017 {
2018 struct drm_i915_private *dev_priv = dev->dev_private;
2019 enum pipe pipe;
2020
2021 if (de_iir & DE_AUX_CHANNEL_A)
2022 dp_aux_irq_handler(dev);
2023
2024 if (de_iir & DE_GSE)
2025 intel_opregion_asle_intr(dev);
2026
2027 if (de_iir & DE_POISON)
2028 DRM_ERROR("Poison interrupt\n");
2029
2030 for_each_pipe(dev_priv, pipe) {
2031 if (de_iir & DE_PIPE_VBLANK(pipe) &&
2032 intel_pipe_handle_vblank(dev, pipe))
2033 intel_check_page_flip(dev, pipe);
2034
2035 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2036 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2037
2038 if (de_iir & DE_PIPE_CRC_DONE(pipe))
2039 i9xx_pipe_crc_irq_handler(dev, pipe);
2040
2041 /* plane/pipes map 1:1 on ilk+ */
2042 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
2043 intel_prepare_page_flip(dev, pipe);
2044 intel_finish_page_flip_plane(dev, pipe);
2045 }
2046 }
2047
2048 /* check event from PCH */
2049 if (de_iir & DE_PCH_EVENT) {
2050 u32 pch_iir = I915_READ(SDEIIR);
2051
2052 if (HAS_PCH_CPT(dev))
2053 cpt_irq_handler(dev, pch_iir);
2054 else
2055 ibx_irq_handler(dev, pch_iir);
2056
2057 /* should clear PCH hotplug event before clear CPU irq */
2058 I915_WRITE(SDEIIR, pch_iir);
2059 }
2060
2061 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
2062 ironlake_rps_change_irq_handler(dev);
2063 }
2064
2065 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
2066 {
2067 struct drm_i915_private *dev_priv = dev->dev_private;
2068 enum pipe pipe;
2069
2070 if (de_iir & DE_ERR_INT_IVB)
2071 ivb_err_int_handler(dev);
2072
2073 if (de_iir & DE_AUX_CHANNEL_A_IVB)
2074 dp_aux_irq_handler(dev);
2075
2076 if (de_iir & DE_GSE_IVB)
2077 intel_opregion_asle_intr(dev);
2078
2079 for_each_pipe(dev_priv, pipe) {
2080 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
2081 intel_pipe_handle_vblank(dev, pipe))
2082 intel_check_page_flip(dev, pipe);
2083
2084 /* plane/pipes map 1:1 on ilk+ */
2085 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
2086 intel_prepare_page_flip(dev, pipe);
2087 intel_finish_page_flip_plane(dev, pipe);
2088 }
2089 }
2090
2091 /* check event from PCH */
2092 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
2093 u32 pch_iir = I915_READ(SDEIIR);
2094
2095 cpt_irq_handler(dev, pch_iir);
2096
2097 /* clear PCH hotplug event before clear CPU irq */
2098 I915_WRITE(SDEIIR, pch_iir);
2099 }
2100 }
2101
2102 /*
2103 * To handle irqs with the minimum potential races with fresh interrupts, we:
2104 * 1 - Disable Master Interrupt Control.
2105 * 2 - Find the source(s) of the interrupt.
2106 * 3 - Clear the Interrupt Identity bits (IIR).
2107 * 4 - Process the interrupt(s) that had bits set in the IIRs.
2108 * 5 - Re-enable Master Interrupt Control.
2109 */
2110 static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2111 {
2112 struct drm_device *dev = arg;
2113 struct drm_i915_private *dev_priv = dev->dev_private;
2114 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2115 irqreturn_t ret = IRQ_NONE;
2116
2117 /* We get interrupts on unclaimed registers, so check for this before we
2118 * do any I915_{READ,WRITE}. */
2119 intel_uncore_check_errors(dev);
2120
2121 /* disable master interrupt before clearing iir */
2122 de_ier = I915_READ(DEIER);
2123 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2124 POSTING_READ(DEIER);
2125
2126 /* Disable south interrupts. We'll only write to SDEIIR once, so further
2127 * interrupts will will be stored on its back queue, and then we'll be
2128 * able to process them after we restore SDEIER (as soon as we restore
2129 * it, we'll get an interrupt if SDEIIR still has something to process
2130 * due to its back queue). */
2131 if (!HAS_PCH_NOP(dev)) {
2132 sde_ier = I915_READ(SDEIER);
2133 I915_WRITE(SDEIER, 0);
2134 POSTING_READ(SDEIER);
2135 }
2136
2137 /* Find, clear, then process each source of interrupt */
2138
2139 gt_iir = I915_READ(GTIIR);
2140 if (gt_iir) {
2141 I915_WRITE(GTIIR, gt_iir);
2142 ret = IRQ_HANDLED;
2143 if (INTEL_INFO(dev)->gen >= 6)
2144 snb_gt_irq_handler(dev, dev_priv, gt_iir);
2145 else
2146 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
2147 }
2148
2149 de_iir = I915_READ(DEIIR);
2150 if (de_iir) {
2151 I915_WRITE(DEIIR, de_iir);
2152 ret = IRQ_HANDLED;
2153 if (INTEL_INFO(dev)->gen >= 7)
2154 ivb_display_irq_handler(dev, de_iir);
2155 else
2156 ilk_display_irq_handler(dev, de_iir);
2157 }
2158
2159 if (INTEL_INFO(dev)->gen >= 6) {
2160 u32 pm_iir = I915_READ(GEN6_PMIIR);
2161 if (pm_iir) {
2162 I915_WRITE(GEN6_PMIIR, pm_iir);
2163 ret = IRQ_HANDLED;
2164 gen6_rps_irq_handler(dev_priv, pm_iir);
2165 }
2166 }
2167
2168 I915_WRITE(DEIER, de_ier);
2169 POSTING_READ(DEIER);
2170 if (!HAS_PCH_NOP(dev)) {
2171 I915_WRITE(SDEIER, sde_ier);
2172 POSTING_READ(SDEIER);
2173 }
2174
2175 return ret;
2176 }
2177
2178 static irqreturn_t gen8_irq_handler(int irq, void *arg)
2179 {
2180 struct drm_device *dev = arg;
2181 struct drm_i915_private *dev_priv = dev->dev_private;
2182 u32 master_ctl;
2183 irqreturn_t ret = IRQ_NONE;
2184 uint32_t tmp = 0;
2185 enum pipe pipe;
2186
2187 master_ctl = I915_READ(GEN8_MASTER_IRQ);
2188 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2189 if (!master_ctl)
2190 return IRQ_NONE;
2191
2192 I915_WRITE(GEN8_MASTER_IRQ, 0);
2193 POSTING_READ(GEN8_MASTER_IRQ);
2194
2195 /* Find, clear, then process each source of interrupt */
2196
2197 ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl);
2198
2199 if (master_ctl & GEN8_DE_MISC_IRQ) {
2200 tmp = I915_READ(GEN8_DE_MISC_IIR);
2201 if (tmp) {
2202 I915_WRITE(GEN8_DE_MISC_IIR, tmp);
2203 ret = IRQ_HANDLED;
2204 if (tmp & GEN8_DE_MISC_GSE)
2205 intel_opregion_asle_intr(dev);
2206 else
2207 DRM_ERROR("Unexpected DE Misc interrupt\n");
2208 }
2209 else
2210 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2211 }
2212
2213 if (master_ctl & GEN8_DE_PORT_IRQ) {
2214 tmp = I915_READ(GEN8_DE_PORT_IIR);
2215 if (tmp) {
2216 I915_WRITE(GEN8_DE_PORT_IIR, tmp);
2217 ret = IRQ_HANDLED;
2218 if (tmp & GEN8_AUX_CHANNEL_A)
2219 dp_aux_irq_handler(dev);
2220 else
2221 DRM_ERROR("Unexpected DE Port interrupt\n");
2222 }
2223 else
2224 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2225 }
2226
2227 for_each_pipe(dev_priv, pipe) {
2228 uint32_t pipe_iir, flip_done = 0, fault_errors = 0;
2229
2230 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2231 continue;
2232
2233 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2234 if (pipe_iir) {
2235 ret = IRQ_HANDLED;
2236 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
2237
2238 if (pipe_iir & GEN8_PIPE_VBLANK &&
2239 intel_pipe_handle_vblank(dev, pipe))
2240 intel_check_page_flip(dev, pipe);
2241
2242 if (IS_GEN9(dev))
2243 flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
2244 else
2245 flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
2246
2247 if (flip_done) {
2248 intel_prepare_page_flip(dev, pipe);
2249 intel_finish_page_flip_plane(dev, pipe);
2250 }
2251
2252 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
2253 hsw_pipe_crc_irq_handler(dev, pipe);
2254
2255 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN)
2256 intel_cpu_fifo_underrun_irq_handler(dev_priv,
2257 pipe);
2258
2259
2260 if (IS_GEN9(dev))
2261 fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2262 else
2263 fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2264
2265 if (fault_errors)
2266 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2267 pipe_name(pipe),
2268 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
2269 } else
2270 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2271 }
2272
2273 if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) {
2274 /*
2275 * FIXME(BDW): Assume for now that the new interrupt handling
2276 * scheme also closed the SDE interrupt handling race we've seen
2277 * on older pch-split platforms. But this needs testing.
2278 */
2279 u32 pch_iir = I915_READ(SDEIIR);
2280 if (pch_iir) {
2281 I915_WRITE(SDEIIR, pch_iir);
2282 ret = IRQ_HANDLED;
2283 cpt_irq_handler(dev, pch_iir);
2284 } else
2285 DRM_ERROR("The master control interrupt lied (SDE)!\n");
2286
2287 }
2288
2289 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2290 POSTING_READ(GEN8_MASTER_IRQ);
2291
2292 return ret;
2293 }
2294
2295 static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2296 bool reset_completed)
2297 {
2298 struct intel_engine_cs *ring;
2299 int i;
2300
2301 /*
2302 * Notify all waiters for GPU completion events that reset state has
2303 * been changed, and that they need to restart their wait after
2304 * checking for potential errors (and bail out to drop locks if there is
2305 * a gpu reset pending so that i915_error_work_func can acquire them).
2306 */
2307
2308 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2309 for_each_ring(ring, dev_priv, i)
2310 wake_up_all(&ring->irq_queue);
2311
2312 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2313 wake_up_all(&dev_priv->pending_flip_queue);
2314
2315 /*
2316 * Signal tasks blocked in i915_gem_wait_for_error that the pending
2317 * reset state is cleared.
2318 */
2319 if (reset_completed)
2320 wake_up_all(&dev_priv->gpu_error.reset_queue);
2321 }
2322
2323 /**
2324 * i915_error_work_func - do process context error handling work
2325 * @work: work struct
2326 *
2327 * Fire an error uevent so userspace can see that a hang or error
2328 * was detected.
2329 */
2330 static void i915_error_work_func(struct work_struct *work)
2331 {
2332 struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
2333 work);
2334 struct drm_i915_private *dev_priv =
2335 container_of(error, struct drm_i915_private, gpu_error);
2336 struct drm_device *dev = dev_priv->dev;
2337 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2338 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2339 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2340 int ret;
2341
2342 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
2343
2344 /*
2345 * Note that there's only one work item which does gpu resets, so we
2346 * need not worry about concurrent gpu resets potentially incrementing
2347 * error->reset_counter twice. We only need to take care of another
2348 * racing irq/hangcheck declaring the gpu dead for a second time. A
2349 * quick check for that is good enough: schedule_work ensures the
2350 * correct ordering between hang detection and this work item, and since
2351 * the reset in-progress bit is only ever set by code outside of this
2352 * work we don't need to worry about any other races.
2353 */
2354 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
2355 DRM_DEBUG_DRIVER("resetting chip\n");
2356 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
2357 reset_event);
2358
2359 /*
2360 * In most cases it's guaranteed that we get here with an RPM
2361 * reference held, for example because there is a pending GPU
2362 * request that won't finish until the reset is done. This
2363 * isn't the case at least when we get here by doing a
2364 * simulated reset via debugs, so get an RPM reference.
2365 */
2366 intel_runtime_pm_get(dev_priv);
2367 /*
2368 * All state reset _must_ be completed before we update the
2369 * reset counter, for otherwise waiters might miss the reset
2370 * pending state and not properly drop locks, resulting in
2371 * deadlocks with the reset work.
2372 */
2373 ret = i915_reset(dev);
2374
2375 intel_display_handle_reset(dev);
2376
2377 intel_runtime_pm_put(dev_priv);
2378
2379 if (ret == 0) {
2380 /*
2381 * After all the gem state is reset, increment the reset
2382 * counter and wake up everyone waiting for the reset to
2383 * complete.
2384 *
2385 * Since unlock operations are a one-sided barrier only,
2386 * we need to insert a barrier here to order any seqno
2387 * updates before
2388 * the counter increment.
2389 */
2390 smp_mb__before_atomic();
2391 atomic_inc(&dev_priv->gpu_error.reset_counter);
2392
2393 kobject_uevent_env(&dev->primary->kdev->kobj,
2394 KOBJ_CHANGE, reset_done_event);
2395 } else {
2396 atomic_set_mask(I915_WEDGED, &error->reset_counter);
2397 }
2398
2399 /*
2400 * Note: The wake_up also serves as a memory barrier so that
2401 * waiters see the update value of the reset counter atomic_t.
2402 */
2403 i915_error_wake_up(dev_priv, true);
2404 }
2405 }
2406
2407 static void i915_report_and_clear_eir(struct drm_device *dev)
2408 {
2409 struct drm_i915_private *dev_priv = dev->dev_private;
2410 uint32_t instdone[I915_NUM_INSTDONE_REG];
2411 u32 eir = I915_READ(EIR);
2412 int pipe, i;
2413
2414 if (!eir)
2415 return;
2416
2417 pr_err("render error detected, EIR: 0x%08x\n", eir);
2418
2419 i915_get_extra_instdone(dev, instdone);
2420
2421 if (IS_G4X(dev)) {
2422 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2423 u32 ipeir = I915_READ(IPEIR_I965);
2424
2425 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2426 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2427 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2428 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2429 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
2430 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2431 I915_WRITE(IPEIR_I965, ipeir);
2432 POSTING_READ(IPEIR_I965);
2433 }
2434 if (eir & GM45_ERROR_PAGE_TABLE) {
2435 u32 pgtbl_err = I915_READ(PGTBL_ER);
2436 pr_err("page table error\n");
2437 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
2438 I915_WRITE(PGTBL_ER, pgtbl_err);
2439 POSTING_READ(PGTBL_ER);
2440 }
2441 }
2442
2443 if (!IS_GEN2(dev)) {
2444 if (eir & I915_ERROR_PAGE_TABLE) {
2445 u32 pgtbl_err = I915_READ(PGTBL_ER);
2446 pr_err("page table error\n");
2447 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
2448 I915_WRITE(PGTBL_ER, pgtbl_err);
2449 POSTING_READ(PGTBL_ER);
2450 }
2451 }
2452
2453 if (eir & I915_ERROR_MEMORY_REFRESH) {
2454 pr_err("memory refresh error:\n");
2455 for_each_pipe(dev_priv, pipe)
2456 pr_err("pipe %c stat: 0x%08x\n",
2457 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2458 /* pipestat has already been acked */
2459 }
2460 if (eir & I915_ERROR_INSTRUCTION) {
2461 pr_err("instruction error\n");
2462 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
2463 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2464 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2465 if (INTEL_INFO(dev)->gen < 4) {
2466 u32 ipeir = I915_READ(IPEIR);
2467
2468 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
2469 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
2470 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
2471 I915_WRITE(IPEIR, ipeir);
2472 POSTING_READ(IPEIR);
2473 } else {
2474 u32 ipeir = I915_READ(IPEIR_I965);
2475
2476 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2477 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2478 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
2479 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2480 I915_WRITE(IPEIR_I965, ipeir);
2481 POSTING_READ(IPEIR_I965);
2482 }
2483 }
2484
2485 I915_WRITE(EIR, eir);
2486 POSTING_READ(EIR);
2487 eir = I915_READ(EIR);
2488 if (eir) {
2489 /*
2490 * some errors might have become stuck,
2491 * mask them.
2492 */
2493 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2494 I915_WRITE(EMR, I915_READ(EMR) | eir);
2495 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2496 }
2497 }
2498
2499 /**
2500 * i915_handle_error - handle an error interrupt
2501 * @dev: drm device
2502 *
2503 * Do some basic checking of regsiter state at error interrupt time and
2504 * dump it to the syslog. Also call i915_capture_error_state() to make
2505 * sure we get a record and make it available in debugfs. Fire a uevent
2506 * so userspace knows something bad happened (should trigger collection
2507 * of a ring dump etc.).
2508 */
2509 void i915_handle_error(struct drm_device *dev, bool wedged,
2510 const char *fmt, ...)
2511 {
2512 struct drm_i915_private *dev_priv = dev->dev_private;
2513 va_list args;
2514 char error_msg[80];
2515
2516 va_start(args, fmt);
2517 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2518 va_end(args);
2519
2520 i915_capture_error_state(dev, wedged, error_msg);
2521 i915_report_and_clear_eir(dev);
2522
2523 if (wedged) {
2524 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
2525 &dev_priv->gpu_error.reset_counter);
2526
2527 /*
2528 * Wakeup waiting processes so that the reset work function
2529 * i915_error_work_func doesn't deadlock trying to grab various
2530 * locks. By bumping the reset counter first, the woken
2531 * processes will see a reset in progress and back off,
2532 * releasing their locks and then wait for the reset completion.
2533 * We must do this for _all_ gpu waiters that might hold locks
2534 * that the reset work needs to acquire.
2535 *
2536 * Note: The wake_up serves as the required memory barrier to
2537 * ensure that the waiters see the updated value of the reset
2538 * counter atomic_t.
2539 */
2540 i915_error_wake_up(dev_priv, false);
2541 }
2542
2543 /*
2544 * Our reset work can grab modeset locks (since it needs to reset the
2545 * state of outstanding pagelips). Hence it must not be run on our own
2546 * dev-priv->wq work queue for otherwise the flush_work in the pageflip
2547 * code will deadlock.
2548 */
2549 schedule_work(&dev_priv->gpu_error.work);
2550 }
2551
2552 /* Called from drm generic code, passed 'crtc' which
2553 * we use as a pipe index
2554 */
2555 static int i915_enable_vblank(struct drm_device *dev, int pipe)
2556 {
2557 struct drm_i915_private *dev_priv = dev->dev_private;
2558 unsigned long irqflags;
2559
2560 if (!i915_pipe_enabled(dev, pipe))
2561 return -EINVAL;
2562
2563 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2564 if (INTEL_INFO(dev)->gen >= 4)
2565 i915_enable_pipestat(dev_priv, pipe,
2566 PIPE_START_VBLANK_INTERRUPT_STATUS);
2567 else
2568 i915_enable_pipestat(dev_priv, pipe,
2569 PIPE_VBLANK_INTERRUPT_STATUS);
2570 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2571
2572 return 0;
2573 }
2574
2575 static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
2576 {
2577 struct drm_i915_private *dev_priv = dev->dev_private;
2578 unsigned long irqflags;
2579 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2580 DE_PIPE_VBLANK(pipe);
2581
2582 if (!i915_pipe_enabled(dev, pipe))
2583 return -EINVAL;
2584
2585 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2586 ironlake_enable_display_irq(dev_priv, bit);
2587 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2588
2589 return 0;
2590 }
2591
2592 static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2593 {
2594 struct drm_i915_private *dev_priv = dev->dev_private;
2595 unsigned long irqflags;
2596
2597 if (!i915_pipe_enabled(dev, pipe))
2598 return -EINVAL;
2599
2600 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2601 i915_enable_pipestat(dev_priv, pipe,
2602 PIPE_START_VBLANK_INTERRUPT_STATUS);
2603 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2604
2605 return 0;
2606 }
2607
2608 static int gen8_enable_vblank(struct drm_device *dev, int pipe)
2609 {
2610 struct drm_i915_private *dev_priv = dev->dev_private;
2611 unsigned long irqflags;
2612
2613 if (!i915_pipe_enabled(dev, pipe))
2614 return -EINVAL;
2615
2616 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2617 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
2618 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2619 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2620 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2621 return 0;
2622 }
2623
2624 /* Called from drm generic code, passed 'crtc' which
2625 * we use as a pipe index
2626 */
2627 static void i915_disable_vblank(struct drm_device *dev, int pipe)
2628 {
2629 struct drm_i915_private *dev_priv = dev->dev_private;
2630 unsigned long irqflags;
2631
2632 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2633 i915_disable_pipestat(dev_priv, pipe,
2634 PIPE_VBLANK_INTERRUPT_STATUS |
2635 PIPE_START_VBLANK_INTERRUPT_STATUS);
2636 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2637 }
2638
2639 static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
2640 {
2641 struct drm_i915_private *dev_priv = dev->dev_private;
2642 unsigned long irqflags;
2643 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2644 DE_PIPE_VBLANK(pipe);
2645
2646 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2647 ironlake_disable_display_irq(dev_priv, bit);
2648 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2649 }
2650
2651 static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
2652 {
2653 struct drm_i915_private *dev_priv = dev->dev_private;
2654 unsigned long irqflags;
2655
2656 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2657 i915_disable_pipestat(dev_priv, pipe,
2658 PIPE_START_VBLANK_INTERRUPT_STATUS);
2659 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2660 }
2661
2662 static void gen8_disable_vblank(struct drm_device *dev, int pipe)
2663 {
2664 struct drm_i915_private *dev_priv = dev->dev_private;
2665 unsigned long irqflags;
2666
2667 if (!i915_pipe_enabled(dev, pipe))
2668 return;
2669
2670 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2671 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
2672 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2673 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2674 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2675 }
2676
2677 static u32
2678 ring_last_seqno(struct intel_engine_cs *ring)
2679 {
2680 return list_entry(ring->request_list.prev,
2681 struct drm_i915_gem_request, list)->seqno;
2682 }
2683
2684 static bool
2685 ring_idle(struct intel_engine_cs *ring, u32 seqno)
2686 {
2687 return (list_empty(&ring->request_list) ||
2688 i915_seqno_passed(seqno, ring_last_seqno(ring)));
2689 }
2690
2691 static bool
2692 ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
2693 {
2694 if (INTEL_INFO(dev)->gen >= 8) {
2695 return (ipehr >> 23) == 0x1c;
2696 } else {
2697 ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
2698 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
2699 MI_SEMAPHORE_REGISTER);
2700 }
2701 }
2702
2703 static struct intel_engine_cs *
2704 semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
2705 {
2706 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2707 struct intel_engine_cs *signaller;
2708 int i;
2709
2710 if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
2711 for_each_ring(signaller, dev_priv, i) {
2712 if (ring == signaller)
2713 continue;
2714
2715 if (offset == signaller->semaphore.signal_ggtt[ring->id])
2716 return signaller;
2717 }
2718 } else {
2719 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
2720
2721 for_each_ring(signaller, dev_priv, i) {
2722 if(ring == signaller)
2723 continue;
2724
2725 if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
2726 return signaller;
2727 }
2728 }
2729
2730 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
2731 ring->id, ipehr, offset);
2732
2733 return NULL;
2734 }
2735
2736 static struct intel_engine_cs *
2737 semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
2738 {
2739 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2740 u32 cmd, ipehr, head;
2741 u64 offset = 0;
2742 int i, backwards;
2743
2744 ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
2745 if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
2746 return NULL;
2747
2748 /*
2749 * HEAD is likely pointing to the dword after the actual command,
2750 * so scan backwards until we find the MBOX. But limit it to just 3
2751 * or 4 dwords depending on the semaphore wait command size.
2752 * Note that we don't care about ACTHD here since that might
2753 * point at at batch, and semaphores are always emitted into the
2754 * ringbuffer itself.
2755 */
2756 head = I915_READ_HEAD(ring) & HEAD_ADDR;
2757 backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
2758
2759 for (i = backwards; i; --i) {
2760 /*
2761 * Be paranoid and presume the hw has gone off into the wild -
2762 * our ring is smaller than what the hardware (and hence
2763 * HEAD_ADDR) allows. Also handles wrap-around.
2764 */
2765 head &= ring->buffer->size - 1;
2766
2767 /* This here seems to blow up */
2768 cmd = ioread32(ring->buffer->virtual_start + head);
2769 if (cmd == ipehr)
2770 break;
2771
2772 head -= 4;
2773 }
2774
2775 if (!i)
2776 return NULL;
2777
2778 *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
2779 if (INTEL_INFO(ring->dev)->gen >= 8) {
2780 offset = ioread32(ring->buffer->virtual_start + head + 12);
2781 offset <<= 32;
2782 offset = ioread32(ring->buffer->virtual_start + head + 8);
2783 }
2784 return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
2785 }
2786
2787 static int semaphore_passed(struct intel_engine_cs *ring)
2788 {
2789 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2790 struct intel_engine_cs *signaller;
2791 u32 seqno;
2792
2793 ring->hangcheck.deadlock++;
2794
2795 signaller = semaphore_waits_for(ring, &seqno);
2796 if (signaller == NULL)
2797 return -1;
2798
2799 /* Prevent pathological recursion due to driver bugs */
2800 if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
2801 return -1;
2802
2803 if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
2804 return 1;
2805
2806 /* cursory check for an unkickable deadlock */
2807 if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
2808 semaphore_passed(signaller) < 0)
2809 return -1;
2810
2811 return 0;
2812 }
2813
2814 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2815 {
2816 struct intel_engine_cs *ring;
2817 int i;
2818
2819 for_each_ring(ring, dev_priv, i)
2820 ring->hangcheck.deadlock = 0;
2821 }
2822
2823 static enum intel_ring_hangcheck_action
2824 ring_stuck(struct intel_engine_cs *ring, u64 acthd)
2825 {
2826 struct drm_device *dev = ring->dev;
2827 struct drm_i915_private *dev_priv = dev->dev_private;
2828 u32 tmp;
2829
2830 if (acthd != ring->hangcheck.acthd) {
2831 if (acthd > ring->hangcheck.max_acthd) {
2832 ring->hangcheck.max_acthd = acthd;
2833 return HANGCHECK_ACTIVE;
2834 }
2835
2836 return HANGCHECK_ACTIVE_LOOP;
2837 }
2838
2839 if (IS_GEN2(dev))
2840 return HANGCHECK_HUNG;
2841
2842 /* Is the chip hanging on a WAIT_FOR_EVENT?
2843 * If so we can simply poke the RB_WAIT bit
2844 * and break the hang. This should work on
2845 * all but the second generation chipsets.
2846 */
2847 tmp = I915_READ_CTL(ring);
2848 if (tmp & RING_WAIT) {
2849 i915_handle_error(dev, false,
2850 "Kicking stuck wait on %s",
2851 ring->name);
2852 I915_WRITE_CTL(ring, tmp);
2853 return HANGCHECK_KICK;
2854 }
2855
2856 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
2857 switch (semaphore_passed(ring)) {
2858 default:
2859 return HANGCHECK_HUNG;
2860 case 1:
2861 i915_handle_error(dev, false,
2862 "Kicking stuck semaphore on %s",
2863 ring->name);
2864 I915_WRITE_CTL(ring, tmp);
2865 return HANGCHECK_KICK;
2866 case 0:
2867 return HANGCHECK_WAIT;
2868 }
2869 }
2870
2871 return HANGCHECK_HUNG;
2872 }
2873
2874 /**
2875 * This is called when the chip hasn't reported back with completed
2876 * batchbuffers in a long time. We keep track per ring seqno progress and
2877 * if there are no progress, hangcheck score for that ring is increased.
2878 * Further, acthd is inspected to see if the ring is stuck. On stuck case
2879 * we kick the ring. If we see no progress on three subsequent calls
2880 * we assume chip is wedged and try to fix it by resetting the chip.
2881 */
2882 static void i915_hangcheck_elapsed(unsigned long data)
2883 {
2884 struct drm_device *dev = (struct drm_device *)data;
2885 struct drm_i915_private *dev_priv = dev->dev_private;
2886 struct intel_engine_cs *ring;
2887 int i;
2888 int busy_count = 0, rings_hung = 0;
2889 bool stuck[I915_NUM_RINGS] = { 0 };
2890 #define BUSY 1
2891 #define KICK 5
2892 #define HUNG 20
2893
2894 if (!i915.enable_hangcheck)
2895 return;
2896
2897 for_each_ring(ring, dev_priv, i) {
2898 u64 acthd;
2899 u32 seqno;
2900 bool busy = true;
2901
2902 semaphore_clear_deadlocks(dev_priv);
2903
2904 seqno = ring->get_seqno(ring, false);
2905 acthd = intel_ring_get_active_head(ring);
2906
2907 if (ring->hangcheck.seqno == seqno) {
2908 if (ring_idle(ring, seqno)) {
2909 ring->hangcheck.action = HANGCHECK_IDLE;
2910
2911 if (waitqueue_active(&ring->irq_queue)) {
2912 /* Issue a wake-up to catch stuck h/w. */
2913 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
2914 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
2915 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2916 ring->name);
2917 else
2918 DRM_INFO("Fake missed irq on %s\n",
2919 ring->name);
2920 wake_up_all(&ring->irq_queue);
2921 }
2922 /* Safeguard against driver failure */
2923 ring->hangcheck.score += BUSY;
2924 } else
2925 busy = false;
2926 } else {
2927 /* We always increment the hangcheck score
2928 * if the ring is busy and still processing
2929 * the same request, so that no single request
2930 * can run indefinitely (such as a chain of
2931 * batches). The only time we do not increment
2932 * the hangcheck score on this ring, if this
2933 * ring is in a legitimate wait for another
2934 * ring. In that case the waiting ring is a
2935 * victim and we want to be sure we catch the
2936 * right culprit. Then every time we do kick
2937 * the ring, add a small increment to the
2938 * score so that we can catch a batch that is
2939 * being repeatedly kicked and so responsible
2940 * for stalling the machine.
2941 */
2942 ring->hangcheck.action = ring_stuck(ring,
2943 acthd);
2944
2945 switch (ring->hangcheck.action) {
2946 case HANGCHECK_IDLE:
2947 case HANGCHECK_WAIT:
2948 case HANGCHECK_ACTIVE:
2949 break;
2950 case HANGCHECK_ACTIVE_LOOP:
2951 ring->hangcheck.score += BUSY;
2952 break;
2953 case HANGCHECK_KICK:
2954 ring->hangcheck.score += KICK;
2955 break;
2956 case HANGCHECK_HUNG:
2957 ring->hangcheck.score += HUNG;
2958 stuck[i] = true;
2959 break;
2960 }
2961 }
2962 } else {
2963 ring->hangcheck.action = HANGCHECK_ACTIVE;
2964
2965 /* Gradually reduce the count so that we catch DoS
2966 * attempts across multiple batches.
2967 */
2968 if (ring->hangcheck.score > 0)
2969 ring->hangcheck.score--;
2970
2971 ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
2972 }
2973
2974 ring->hangcheck.seqno = seqno;
2975 ring->hangcheck.acthd = acthd;
2976 busy_count += busy;
2977 }
2978
2979 for_each_ring(ring, dev_priv, i) {
2980 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
2981 DRM_INFO("%s on %s\n",
2982 stuck[i] ? "stuck" : "no progress",
2983 ring->name);
2984 rings_hung++;
2985 }
2986 }
2987
2988 if (rings_hung)
2989 return i915_handle_error(dev, true, "Ring hung");
2990
2991 if (busy_count)
2992 /* Reset timer case chip hangs without another request
2993 * being added */
2994 i915_queue_hangcheck(dev);
2995 }
2996
2997 void i915_queue_hangcheck(struct drm_device *dev)
2998 {
2999 struct drm_i915_private *dev_priv = dev->dev_private;
3000 if (!i915.enable_hangcheck)
3001 return;
3002
3003 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
3004 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
3005 }
3006
3007 static void ibx_irq_reset(struct drm_device *dev)
3008 {
3009 struct drm_i915_private *dev_priv = dev->dev_private;
3010
3011 if (HAS_PCH_NOP(dev))
3012 return;
3013
3014 GEN5_IRQ_RESET(SDE);
3015
3016 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
3017 I915_WRITE(SERR_INT, 0xffffffff);
3018 }
3019
3020 /*
3021 * SDEIER is also touched by the interrupt handler to work around missed PCH
3022 * interrupts. Hence we can't update it after the interrupt handler is enabled -
3023 * instead we unconditionally enable all PCH interrupt sources here, but then
3024 * only unmask them as needed with SDEIMR.
3025 *
3026 * This function needs to be called before interrupts are enabled.
3027 */
3028 static void ibx_irq_pre_postinstall(struct drm_device *dev)
3029 {
3030 struct drm_i915_private *dev_priv = dev->dev_private;
3031
3032 if (HAS_PCH_NOP(dev))
3033 return;
3034
3035 WARN_ON(I915_READ(SDEIER) != 0);
3036 I915_WRITE(SDEIER, 0xffffffff);
3037 POSTING_READ(SDEIER);
3038 }
3039
3040 static void gen5_gt_irq_reset(struct drm_device *dev)
3041 {
3042 struct drm_i915_private *dev_priv = dev->dev_private;
3043
3044 GEN5_IRQ_RESET(GT);
3045 if (INTEL_INFO(dev)->gen >= 6)
3046 GEN5_IRQ_RESET(GEN6_PM);
3047 }
3048
3049 /* drm_dma.h hooks
3050 */
3051 static void ironlake_irq_reset(struct drm_device *dev)
3052 {
3053 struct drm_i915_private *dev_priv = dev->dev_private;
3054
3055 I915_WRITE(HWSTAM, 0xffffffff);
3056
3057 GEN5_IRQ_RESET(DE);
3058 if (IS_GEN7(dev))
3059 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
3060
3061 gen5_gt_irq_reset(dev);
3062
3063 ibx_irq_reset(dev);
3064 }
3065
3066 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
3067 {
3068 enum pipe pipe;
3069
3070 I915_WRITE(PORT_HOTPLUG_EN, 0);
3071 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3072
3073 for_each_pipe(dev_priv, pipe)
3074 I915_WRITE(PIPESTAT(pipe), 0xffff);
3075
3076 GEN5_IRQ_RESET(VLV_);
3077 }
3078
3079 static void valleyview_irq_preinstall(struct drm_device *dev)
3080 {
3081 struct drm_i915_private *dev_priv = dev->dev_private;
3082
3083 /* VLV magic */
3084 I915_WRITE(VLV_IMR, 0);
3085 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
3086 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
3087 I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
3088
3089 gen5_gt_irq_reset(dev);
3090
3091 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3092
3093 vlv_display_irq_reset(dev_priv);
3094 }
3095
3096 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3097 {
3098 GEN8_IRQ_RESET_NDX(GT, 0);
3099 GEN8_IRQ_RESET_NDX(GT, 1);
3100 GEN8_IRQ_RESET_NDX(GT, 2);
3101 GEN8_IRQ_RESET_NDX(GT, 3);
3102 }
3103
3104 static void gen8_irq_reset(struct drm_device *dev)
3105 {
3106 struct drm_i915_private *dev_priv = dev->dev_private;
3107 int pipe;
3108
3109 I915_WRITE(GEN8_MASTER_IRQ, 0);
3110 POSTING_READ(GEN8_MASTER_IRQ);
3111
3112 gen8_gt_irq_reset(dev_priv);
3113
3114 for_each_pipe(dev_priv, pipe)
3115 if (intel_display_power_is_enabled(dev_priv,
3116 POWER_DOMAIN_PIPE(pipe)))
3117 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3118
3119 GEN5_IRQ_RESET(GEN8_DE_PORT_);
3120 GEN5_IRQ_RESET(GEN8_DE_MISC_);
3121 GEN5_IRQ_RESET(GEN8_PCU_);
3122
3123 ibx_irq_reset(dev);
3124 }
3125
3126 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv)
3127 {
3128 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3129
3130 spin_lock_irq(&dev_priv->irq_lock);
3131 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B],
3132 ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
3133 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C],
3134 ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
3135 spin_unlock_irq(&dev_priv->irq_lock);
3136 }
3137
3138 static void cherryview_irq_preinstall(struct drm_device *dev)
3139 {
3140 struct drm_i915_private *dev_priv = dev->dev_private;
3141
3142 I915_WRITE(GEN8_MASTER_IRQ, 0);
3143 POSTING_READ(GEN8_MASTER_IRQ);
3144
3145 gen8_gt_irq_reset(dev_priv);
3146
3147 GEN5_IRQ_RESET(GEN8_PCU_);
3148
3149 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3150
3151 vlv_display_irq_reset(dev_priv);
3152 }
3153
3154 static void ibx_hpd_irq_setup(struct drm_device *dev)
3155 {
3156 struct drm_i915_private *dev_priv = dev->dev_private;
3157 struct intel_encoder *intel_encoder;
3158 u32 hotplug_irqs, hotplug, enabled_irqs = 0;
3159
3160 if (HAS_PCH_IBX(dev)) {
3161 hotplug_irqs = SDE_HOTPLUG_MASK;
3162 for_each_intel_encoder(dev, intel_encoder)
3163 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3164 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
3165 } else {
3166 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3167 for_each_intel_encoder(dev, intel_encoder)
3168 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3169 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
3170 }
3171
3172 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3173
3174 /*
3175 * Enable digital hotplug on the PCH, and configure the DP short pulse
3176 * duration to 2ms (which is the minimum in the Display Port spec)
3177 *
3178 * This register is the same on all known PCH chips.
3179 */
3180 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3181 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3182 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3183 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3184 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3185 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3186 }
3187
3188 static void ibx_irq_postinstall(struct drm_device *dev)
3189 {
3190 struct drm_i915_private *dev_priv = dev->dev_private;
3191 u32 mask;
3192
3193 if (HAS_PCH_NOP(dev))
3194 return;
3195
3196 if (HAS_PCH_IBX(dev))
3197 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3198 else
3199 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3200
3201 GEN5_ASSERT_IIR_IS_ZERO(SDEIIR);
3202 I915_WRITE(SDEIMR, ~mask);
3203 }
3204
3205 static void gen5_gt_irq_postinstall(struct drm_device *dev)
3206 {
3207 struct drm_i915_private *dev_priv = dev->dev_private;
3208 u32 pm_irqs, gt_irqs;
3209
3210 pm_irqs = gt_irqs = 0;
3211
3212 dev_priv->gt_irq_mask = ~0;
3213 if (HAS_L3_DPF(dev)) {
3214 /* L3 parity interrupt is always unmasked. */
3215 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3216 gt_irqs |= GT_PARITY_ERROR(dev);
3217 }
3218
3219 gt_irqs |= GT_RENDER_USER_INTERRUPT;
3220 if (IS_GEN5(dev)) {
3221 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
3222 ILK_BSD_USER_INTERRUPT;
3223 } else {
3224 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3225 }
3226
3227 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
3228
3229 if (INTEL_INFO(dev)->gen >= 6) {
3230 pm_irqs |= dev_priv->pm_rps_events;
3231
3232 if (HAS_VEBOX(dev))
3233 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3234
3235 dev_priv->pm_irq_mask = 0xffffffff;
3236 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
3237 }
3238 }
3239
3240 static int ironlake_irq_postinstall(struct drm_device *dev)
3241 {
3242 struct drm_i915_private *dev_priv = dev->dev_private;
3243 u32 display_mask, extra_mask;
3244
3245 if (INTEL_INFO(dev)->gen >= 7) {
3246 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3247 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3248 DE_PLANEB_FLIP_DONE_IVB |
3249 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
3250 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3251 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB);
3252 } else {
3253 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3254 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
3255 DE_AUX_CHANNEL_A |
3256 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3257 DE_POISON);
3258 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3259 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN;
3260 }
3261
3262 dev_priv->irq_mask = ~display_mask;
3263
3264 I915_WRITE(HWSTAM, 0xeffe);
3265
3266 ibx_irq_pre_postinstall(dev);
3267
3268 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3269
3270 gen5_gt_irq_postinstall(dev);
3271
3272 ibx_irq_postinstall(dev);
3273
3274 if (IS_IRONLAKE_M(dev)) {
3275 /* Enable PCU event interrupts
3276 *
3277 * spinlocking not required here for correctness since interrupt
3278 * setup is guaranteed to run in single-threaded context. But we
3279 * need it to make the assert_spin_locked happy. */
3280 spin_lock_irq(&dev_priv->irq_lock);
3281 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
3282 spin_unlock_irq(&dev_priv->irq_lock);
3283 }
3284
3285 return 0;
3286 }
3287
3288 static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
3289 {
3290 u32 pipestat_mask;
3291 u32 iir_mask;
3292 enum pipe pipe;
3293
3294 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3295 PIPE_FIFO_UNDERRUN_STATUS;
3296
3297 for_each_pipe(dev_priv, pipe)
3298 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3299 POSTING_READ(PIPESTAT(PIPE_A));
3300
3301 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3302 PIPE_CRC_DONE_INTERRUPT_STATUS;
3303
3304 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3305 for_each_pipe(dev_priv, pipe)
3306 i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3307
3308 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3309 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3310 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3311 if (IS_CHERRYVIEW(dev_priv))
3312 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3313 dev_priv->irq_mask &= ~iir_mask;
3314
3315 I915_WRITE(VLV_IIR, iir_mask);
3316 I915_WRITE(VLV_IIR, iir_mask);
3317 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3318 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3319 POSTING_READ(VLV_IMR);
3320 }
3321
3322 static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
3323 {
3324 u32 pipestat_mask;
3325 u32 iir_mask;
3326 enum pipe pipe;
3327
3328 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3329 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3330 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3331 if (IS_CHERRYVIEW(dev_priv))
3332 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3333
3334 dev_priv->irq_mask |= iir_mask;
3335 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3336 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3337 I915_WRITE(VLV_IIR, iir_mask);
3338 I915_WRITE(VLV_IIR, iir_mask);
3339 POSTING_READ(VLV_IIR);
3340
3341 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3342 PIPE_CRC_DONE_INTERRUPT_STATUS;
3343
3344 i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3345 for_each_pipe(dev_priv, pipe)
3346 i915_disable_pipestat(dev_priv, pipe, pipestat_mask);
3347
3348 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3349 PIPE_FIFO_UNDERRUN_STATUS;
3350
3351 for_each_pipe(dev_priv, pipe)
3352 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3353 POSTING_READ(PIPESTAT(PIPE_A));
3354 }
3355
3356 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3357 {
3358 assert_spin_locked(&dev_priv->irq_lock);
3359
3360 if (dev_priv->display_irqs_enabled)
3361 return;
3362
3363 dev_priv->display_irqs_enabled = true;
3364
3365 if (intel_irqs_enabled(dev_priv))
3366 valleyview_display_irqs_install(dev_priv);
3367 }
3368
3369 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3370 {
3371 assert_spin_locked(&dev_priv->irq_lock);
3372
3373 if (!dev_priv->display_irqs_enabled)
3374 return;
3375
3376 dev_priv->display_irqs_enabled = false;
3377
3378 if (intel_irqs_enabled(dev_priv))
3379 valleyview_display_irqs_uninstall(dev_priv);
3380 }
3381
3382 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3383 {
3384 dev_priv->irq_mask = ~0;
3385
3386 I915_WRITE(PORT_HOTPLUG_EN, 0);
3387 POSTING_READ(PORT_HOTPLUG_EN);
3388
3389 I915_WRITE(VLV_IIR, 0xffffffff);
3390 I915_WRITE(VLV_IIR, 0xffffffff);
3391 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3392 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3393 POSTING_READ(VLV_IMR);
3394
3395 /* Interrupt setup is already guaranteed to be single-threaded, this is
3396 * just to make the assert_spin_locked check happy. */
3397 spin_lock_irq(&dev_priv->irq_lock);
3398 if (dev_priv->display_irqs_enabled)
3399 valleyview_display_irqs_install(dev_priv);
3400 spin_unlock_irq(&dev_priv->irq_lock);
3401 }
3402
3403 static int valleyview_irq_postinstall(struct drm_device *dev)
3404 {
3405 struct drm_i915_private *dev_priv = dev->dev_private;
3406
3407 vlv_display_irq_postinstall(dev_priv);
3408
3409 gen5_gt_irq_postinstall(dev);
3410
3411 /* ack & enable invalid PTE error interrupts */
3412 #if 0 /* FIXME: add support to irq handler for checking these bits */
3413 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3414 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
3415 #endif
3416
3417 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3418
3419 return 0;
3420 }
3421
3422 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3423 {
3424 /* These are interrupts we'll toggle with the ring mask register */
3425 uint32_t gt_interrupts[] = {
3426 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3427 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3428 GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
3429 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3430 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3431 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3432 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3433 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3434 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3435 0,
3436 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3437 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3438 };
3439
3440 dev_priv->pm_irq_mask = 0xffffffff;
3441 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3442 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
3443 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, dev_priv->pm_rps_events);
3444 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
3445 }
3446
3447 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3448 {
3449 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3450 uint32_t de_pipe_enables;
3451 int pipe;
3452
3453 if (IS_GEN9(dev_priv))
3454 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3455 GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3456 else
3457 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
3458 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3459
3460 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3461 GEN8_PIPE_FIFO_UNDERRUN;
3462
3463 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3464 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3465 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3466
3467 for_each_pipe(dev_priv, pipe)
3468 if (intel_display_power_is_enabled(dev_priv,
3469 POWER_DOMAIN_PIPE(pipe)))
3470 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3471 dev_priv->de_irq_mask[pipe],
3472 de_pipe_enables);
3473
3474 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~GEN8_AUX_CHANNEL_A, GEN8_AUX_CHANNEL_A);
3475 }
3476
3477 static int gen8_irq_postinstall(struct drm_device *dev)
3478 {
3479 struct drm_i915_private *dev_priv = dev->dev_private;
3480
3481 ibx_irq_pre_postinstall(dev);
3482
3483 gen8_gt_irq_postinstall(dev_priv);
3484 gen8_de_irq_postinstall(dev_priv);
3485
3486 ibx_irq_postinstall(dev);
3487
3488 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
3489 POSTING_READ(GEN8_MASTER_IRQ);
3490
3491 return 0;
3492 }
3493
3494 static int cherryview_irq_postinstall(struct drm_device *dev)
3495 {
3496 struct drm_i915_private *dev_priv = dev->dev_private;
3497 u32 enable_mask = I915_DISPLAY_PORT_INTERRUPT |
3498 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3499 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3500 I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3501 u32 pipestat_enable = PLANE_FLIP_DONE_INT_STATUS_VLV |
3502 PIPE_CRC_DONE_INTERRUPT_STATUS;
3503 int pipe;
3504
3505 /*
3506 * Leave vblank interrupts masked initially. enable/disable will
3507 * toggle them based on usage.
3508 */
3509 dev_priv->irq_mask = ~enable_mask;
3510
3511 for_each_pipe(dev_priv, pipe)
3512 I915_WRITE(PIPESTAT(pipe), 0xffff);
3513
3514 spin_lock_irq(&dev_priv->irq_lock);
3515 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3516 for_each_pipe(dev_priv, pipe)
3517 i915_enable_pipestat(dev_priv, pipe, pipestat_enable);
3518 spin_unlock_irq(&dev_priv->irq_lock);
3519
3520 I915_WRITE(VLV_IIR, 0xffffffff);
3521 I915_WRITE(VLV_IIR, 0xffffffff);
3522 I915_WRITE(VLV_IER, enable_mask);
3523 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3524 POSTING_READ(VLV_IMR);
3525
3526 gen8_gt_irq_postinstall(dev_priv);
3527
3528 I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
3529 POSTING_READ(GEN8_MASTER_IRQ);
3530
3531 return 0;
3532 }
3533
3534 static void gen8_irq_uninstall(struct drm_device *dev)
3535 {
3536 struct drm_i915_private *dev_priv = dev->dev_private;
3537
3538 if (!dev_priv)
3539 return;
3540
3541 gen8_irq_reset(dev);
3542 }
3543
3544 static void valleyview_irq_uninstall(struct drm_device *dev)
3545 {
3546 struct drm_i915_private *dev_priv = dev->dev_private;
3547
3548 if (!dev_priv)
3549 return;
3550
3551 I915_WRITE(VLV_MASTER_IER, 0);
3552
3553 gen5_gt_irq_reset(dev);
3554
3555 I915_WRITE(HWSTAM, 0xffffffff);
3556
3557 /* Interrupt setup is already guaranteed to be single-threaded, this is
3558 * just to make the assert_spin_locked check happy. */
3559 spin_lock_irq(&dev_priv->irq_lock);
3560 if (dev_priv->display_irqs_enabled)
3561 valleyview_display_irqs_uninstall(dev_priv);
3562 spin_unlock_irq(&dev_priv->irq_lock);
3563
3564 vlv_display_irq_reset(dev_priv);
3565
3566 dev_priv->irq_mask = 0;
3567 }
3568
3569 static void cherryview_irq_uninstall(struct drm_device *dev)
3570 {
3571 struct drm_i915_private *dev_priv = dev->dev_private;
3572 int pipe;
3573
3574 if (!dev_priv)
3575 return;
3576
3577 I915_WRITE(GEN8_MASTER_IRQ, 0);
3578 POSTING_READ(GEN8_MASTER_IRQ);
3579
3580 gen8_gt_irq_reset(dev_priv);
3581
3582 GEN5_IRQ_RESET(GEN8_PCU_);
3583
3584 I915_WRITE(PORT_HOTPLUG_EN, 0);
3585 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3586
3587 for_each_pipe(dev_priv, pipe)
3588 I915_WRITE(PIPESTAT(pipe), 0xffff);
3589
3590 GEN5_IRQ_RESET(VLV_);
3591 }
3592
3593 static void ironlake_irq_uninstall(struct drm_device *dev)
3594 {
3595 struct drm_i915_private *dev_priv = dev->dev_private;
3596
3597 if (!dev_priv)
3598 return;
3599
3600 ironlake_irq_reset(dev);
3601 }
3602
3603 static void i8xx_irq_preinstall(struct drm_device * dev)
3604 {
3605 struct drm_i915_private *dev_priv = dev->dev_private;
3606 int pipe;
3607
3608 for_each_pipe(dev_priv, pipe)
3609 I915_WRITE(PIPESTAT(pipe), 0);
3610 I915_WRITE16(IMR, 0xffff);
3611 I915_WRITE16(IER, 0x0);
3612 POSTING_READ16(IER);
3613 }
3614
3615 static int i8xx_irq_postinstall(struct drm_device *dev)
3616 {
3617 struct drm_i915_private *dev_priv = dev->dev_private;
3618
3619 I915_WRITE16(EMR,
3620 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3621
3622 /* Unmask the interrupts that we always want on. */
3623 dev_priv->irq_mask =
3624 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3625 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3626 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3627 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3628 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3629 I915_WRITE16(IMR, dev_priv->irq_mask);
3630
3631 I915_WRITE16(IER,
3632 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3633 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3634 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
3635 I915_USER_INTERRUPT);
3636 POSTING_READ16(IER);
3637
3638 /* Interrupt setup is already guaranteed to be single-threaded, this is
3639 * just to make the assert_spin_locked check happy. */
3640 spin_lock_irq(&dev_priv->irq_lock);
3641 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3642 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3643 spin_unlock_irq(&dev_priv->irq_lock);
3644
3645 return 0;
3646 }
3647
3648 /*
3649 * Returns true when a page flip has completed.
3650 */
3651 static bool i8xx_handle_vblank(struct drm_device *dev,
3652 int plane, int pipe, u32 iir)
3653 {
3654 struct drm_i915_private *dev_priv = dev->dev_private;
3655 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3656
3657 if (!intel_pipe_handle_vblank(dev, pipe))
3658 return false;
3659
3660 if ((iir & flip_pending) == 0)
3661 goto check_page_flip;
3662
3663 intel_prepare_page_flip(dev, plane);
3664
3665 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3666 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3667 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3668 * the flip is completed (no longer pending). Since this doesn't raise
3669 * an interrupt per se, we watch for the change at vblank.
3670 */
3671 if (I915_READ16(ISR) & flip_pending)
3672 goto check_page_flip;
3673
3674 intel_finish_page_flip(dev, pipe);
3675 return true;
3676
3677 check_page_flip:
3678 intel_check_page_flip(dev, pipe);
3679 return false;
3680 }
3681
3682 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3683 {
3684 struct drm_device *dev = arg;
3685 struct drm_i915_private *dev_priv = dev->dev_private;
3686 u16 iir, new_iir;
3687 u32 pipe_stats[2];
3688 int pipe;
3689 u16 flip_mask =
3690 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3691 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3692
3693 iir = I915_READ16(IIR);
3694 if (iir == 0)
3695 return IRQ_NONE;
3696
3697 while (iir & ~flip_mask) {
3698 /* Can't rely on pipestat interrupt bit in iir as it might
3699 * have been cleared after the pipestat interrupt was received.
3700 * It doesn't set the bit in iir again, but it still produces
3701 * interrupts (for non-MSI).
3702 */
3703 spin_lock(&dev_priv->irq_lock);
3704 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3705 i915_handle_error(dev, false,
3706 "Command parser error, iir 0x%08x",
3707 iir);
3708
3709 for_each_pipe(dev_priv, pipe) {
3710 int reg = PIPESTAT(pipe);
3711 pipe_stats[pipe] = I915_READ(reg);
3712
3713 /*
3714 * Clear the PIPE*STAT regs before the IIR
3715 */
3716 if (pipe_stats[pipe] & 0x8000ffff)
3717 I915_WRITE(reg, pipe_stats[pipe]);
3718 }
3719 spin_unlock(&dev_priv->irq_lock);
3720
3721 I915_WRITE16(IIR, iir & ~flip_mask);
3722 new_iir = I915_READ16(IIR); /* Flush posted writes */
3723
3724 i915_update_dri1_breadcrumb(dev);
3725
3726 if (iir & I915_USER_INTERRUPT)
3727 notify_ring(dev, &dev_priv->ring[RCS]);
3728
3729 for_each_pipe(dev_priv, pipe) {
3730 int plane = pipe;
3731 if (HAS_FBC(dev))
3732 plane = !plane;
3733
3734 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3735 i8xx_handle_vblank(dev, plane, pipe, iir))
3736 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3737
3738 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3739 i9xx_pipe_crc_irq_handler(dev, pipe);
3740
3741 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3742 intel_cpu_fifo_underrun_irq_handler(dev_priv,
3743 pipe);
3744 }
3745
3746 iir = new_iir;
3747 }
3748
3749 return IRQ_HANDLED;
3750 }
3751
3752 static void i8xx_irq_uninstall(struct drm_device * dev)
3753 {
3754 struct drm_i915_private *dev_priv = dev->dev_private;
3755 int pipe;
3756
3757 for_each_pipe(dev_priv, pipe) {
3758 /* Clear enable bits; then clear status bits */
3759 I915_WRITE(PIPESTAT(pipe), 0);
3760 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3761 }
3762 I915_WRITE16(IMR, 0xffff);
3763 I915_WRITE16(IER, 0x0);
3764 I915_WRITE16(IIR, I915_READ16(IIR));
3765 }
3766
3767 static void i915_irq_preinstall(struct drm_device * dev)
3768 {
3769 struct drm_i915_private *dev_priv = dev->dev_private;
3770 int pipe;
3771
3772 if (I915_HAS_HOTPLUG(dev)) {
3773 I915_WRITE(PORT_HOTPLUG_EN, 0);
3774 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3775 }
3776
3777 I915_WRITE16(HWSTAM, 0xeffe);
3778 for_each_pipe(dev_priv, pipe)
3779 I915_WRITE(PIPESTAT(pipe), 0);
3780 I915_WRITE(IMR, 0xffffffff);
3781 I915_WRITE(IER, 0x0);
3782 POSTING_READ(IER);
3783 }
3784
3785 static int i915_irq_postinstall(struct drm_device *dev)
3786 {
3787 struct drm_i915_private *dev_priv = dev->dev_private;
3788 u32 enable_mask;
3789
3790 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3791
3792 /* Unmask the interrupts that we always want on. */
3793 dev_priv->irq_mask =
3794 ~(I915_ASLE_INTERRUPT |
3795 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3796 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3797 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3798 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3799 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3800
3801 enable_mask =
3802 I915_ASLE_INTERRUPT |
3803 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3804 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3805 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
3806 I915_USER_INTERRUPT;
3807
3808 if (I915_HAS_HOTPLUG(dev)) {
3809 I915_WRITE(PORT_HOTPLUG_EN, 0);
3810 POSTING_READ(PORT_HOTPLUG_EN);
3811
3812 /* Enable in IER... */
3813 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3814 /* and unmask in IMR */
3815 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3816 }
3817
3818 I915_WRITE(IMR, dev_priv->irq_mask);
3819 I915_WRITE(IER, enable_mask);
3820 POSTING_READ(IER);
3821
3822 i915_enable_asle_pipestat(dev);
3823
3824 /* Interrupt setup is already guaranteed to be single-threaded, this is
3825 * just to make the assert_spin_locked check happy. */
3826 spin_lock_irq(&dev_priv->irq_lock);
3827 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3828 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3829 spin_unlock_irq(&dev_priv->irq_lock);
3830
3831 return 0;
3832 }
3833
3834 /*
3835 * Returns true when a page flip has completed.
3836 */
3837 static bool i915_handle_vblank(struct drm_device *dev,
3838 int plane, int pipe, u32 iir)
3839 {
3840 struct drm_i915_private *dev_priv = dev->dev_private;
3841 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3842
3843 if (!intel_pipe_handle_vblank(dev, pipe))
3844 return false;
3845
3846 if ((iir & flip_pending) == 0)
3847 goto check_page_flip;
3848
3849 intel_prepare_page_flip(dev, plane);
3850
3851 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3852 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3853 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3854 * the flip is completed (no longer pending). Since this doesn't raise
3855 * an interrupt per se, we watch for the change at vblank.
3856 */
3857 if (I915_READ(ISR) & flip_pending)
3858 goto check_page_flip;
3859
3860 intel_finish_page_flip(dev, pipe);
3861 return true;
3862
3863 check_page_flip:
3864 intel_check_page_flip(dev, pipe);
3865 return false;
3866 }
3867
3868 static irqreturn_t i915_irq_handler(int irq, void *arg)
3869 {
3870 struct drm_device *dev = arg;
3871 struct drm_i915_private *dev_priv = dev->dev_private;
3872 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
3873 u32 flip_mask =
3874 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3875 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3876 int pipe, ret = IRQ_NONE;
3877
3878 iir = I915_READ(IIR);
3879 do {
3880 bool irq_received = (iir & ~flip_mask) != 0;
3881 bool blc_event = false;
3882
3883 /* Can't rely on pipestat interrupt bit in iir as it might
3884 * have been cleared after the pipestat interrupt was received.
3885 * It doesn't set the bit in iir again, but it still produces
3886 * interrupts (for non-MSI).
3887 */
3888 spin_lock(&dev_priv->irq_lock);
3889 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3890 i915_handle_error(dev, false,
3891 "Command parser error, iir 0x%08x",
3892 iir);
3893
3894 for_each_pipe(dev_priv, pipe) {
3895 int reg = PIPESTAT(pipe);
3896 pipe_stats[pipe] = I915_READ(reg);
3897
3898 /* Clear the PIPE*STAT regs before the IIR */
3899 if (pipe_stats[pipe] & 0x8000ffff) {
3900 I915_WRITE(reg, pipe_stats[pipe]);
3901 irq_received = true;
3902 }
3903 }
3904 spin_unlock(&dev_priv->irq_lock);
3905
3906 if (!irq_received)
3907 break;
3908
3909 /* Consume port. Then clear IIR or we'll miss events */
3910 if (I915_HAS_HOTPLUG(dev) &&
3911 iir & I915_DISPLAY_PORT_INTERRUPT)
3912 i9xx_hpd_irq_handler(dev);
3913
3914 I915_WRITE(IIR, iir & ~flip_mask);
3915 new_iir = I915_READ(IIR); /* Flush posted writes */
3916
3917 if (iir & I915_USER_INTERRUPT)
3918 notify_ring(dev, &dev_priv->ring[RCS]);
3919
3920 for_each_pipe(dev_priv, pipe) {
3921 int plane = pipe;
3922 if (HAS_FBC(dev))
3923 plane = !plane;
3924
3925 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3926 i915_handle_vblank(dev, plane, pipe, iir))
3927 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3928
3929 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3930 blc_event = true;
3931
3932 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3933 i9xx_pipe_crc_irq_handler(dev, pipe);
3934
3935 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3936 intel_cpu_fifo_underrun_irq_handler(dev_priv,
3937 pipe);
3938 }
3939
3940 if (blc_event || (iir & I915_ASLE_INTERRUPT))
3941 intel_opregion_asle_intr(dev);
3942
3943 /* With MSI, interrupts are only generated when iir
3944 * transitions from zero to nonzero. If another bit got
3945 * set while we were handling the existing iir bits, then
3946 * we would never get another interrupt.
3947 *
3948 * This is fine on non-MSI as well, as if we hit this path
3949 * we avoid exiting the interrupt handler only to generate
3950 * another one.
3951 *
3952 * Note that for MSI this could cause a stray interrupt report
3953 * if an interrupt landed in the time between writing IIR and
3954 * the posting read. This should be rare enough to never
3955 * trigger the 99% of 100,000 interrupts test for disabling
3956 * stray interrupts.
3957 */
3958 ret = IRQ_HANDLED;
3959 iir = new_iir;
3960 } while (iir & ~flip_mask);
3961
3962 i915_update_dri1_breadcrumb(dev);
3963
3964 return ret;
3965 }
3966
3967 static void i915_irq_uninstall(struct drm_device * dev)
3968 {
3969 struct drm_i915_private *dev_priv = dev->dev_private;
3970 int pipe;
3971
3972 if (I915_HAS_HOTPLUG(dev)) {
3973 I915_WRITE(PORT_HOTPLUG_EN, 0);
3974 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3975 }
3976
3977 I915_WRITE16(HWSTAM, 0xffff);
3978 for_each_pipe(dev_priv, pipe) {
3979 /* Clear enable bits; then clear status bits */
3980 I915_WRITE(PIPESTAT(pipe), 0);
3981 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3982 }
3983 I915_WRITE(IMR, 0xffffffff);
3984 I915_WRITE(IER, 0x0);
3985
3986 I915_WRITE(IIR, I915_READ(IIR));
3987 }
3988
3989 static void i965_irq_preinstall(struct drm_device * dev)
3990 {
3991 struct drm_i915_private *dev_priv = dev->dev_private;
3992 int pipe;
3993
3994 I915_WRITE(PORT_HOTPLUG_EN, 0);
3995 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3996
3997 I915_WRITE(HWSTAM, 0xeffe);
3998 for_each_pipe(dev_priv, pipe)
3999 I915_WRITE(PIPESTAT(pipe), 0);
4000 I915_WRITE(IMR, 0xffffffff);
4001 I915_WRITE(IER, 0x0);
4002 POSTING_READ(IER);
4003 }
4004
4005 static int i965_irq_postinstall(struct drm_device *dev)
4006 {
4007 struct drm_i915_private *dev_priv = dev->dev_private;
4008 u32 enable_mask;
4009 u32 error_mask;
4010
4011 /* Unmask the interrupts that we always want on. */
4012 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
4013 I915_DISPLAY_PORT_INTERRUPT |
4014 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4015 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4016 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4017 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4018 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4019
4020 enable_mask = ~dev_priv->irq_mask;
4021 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4022 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4023 enable_mask |= I915_USER_INTERRUPT;
4024
4025 if (IS_G4X(dev))
4026 enable_mask |= I915_BSD_USER_INTERRUPT;
4027
4028 /* Interrupt setup is already guaranteed to be single-threaded, this is
4029 * just to make the assert_spin_locked check happy. */
4030 spin_lock_irq(&dev_priv->irq_lock);
4031 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4032 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4033 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4034 spin_unlock_irq(&dev_priv->irq_lock);
4035
4036 /*
4037 * Enable some error detection, note the instruction error mask
4038 * bit is reserved, so we leave it masked.
4039 */
4040 if (IS_G4X(dev)) {
4041 error_mask = ~(GM45_ERROR_PAGE_TABLE |
4042 GM45_ERROR_MEM_PRIV |
4043 GM45_ERROR_CP_PRIV |
4044 I915_ERROR_MEMORY_REFRESH);
4045 } else {
4046 error_mask = ~(I915_ERROR_PAGE_TABLE |
4047 I915_ERROR_MEMORY_REFRESH);
4048 }
4049 I915_WRITE(EMR, error_mask);
4050
4051 I915_WRITE(IMR, dev_priv->irq_mask);
4052 I915_WRITE(IER, enable_mask);
4053 POSTING_READ(IER);
4054
4055 I915_WRITE(PORT_HOTPLUG_EN, 0);
4056 POSTING_READ(PORT_HOTPLUG_EN);
4057
4058 i915_enable_asle_pipestat(dev);
4059
4060 return 0;
4061 }
4062
4063 static void i915_hpd_irq_setup(struct drm_device *dev)
4064 {
4065 struct drm_i915_private *dev_priv = dev->dev_private;
4066 struct intel_encoder *intel_encoder;
4067 u32 hotplug_en;
4068
4069 assert_spin_locked(&dev_priv->irq_lock);
4070
4071 if (I915_HAS_HOTPLUG(dev)) {
4072 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
4073 hotplug_en &= ~HOTPLUG_INT_EN_MASK;
4074 /* Note HDMI and DP share hotplug bits */
4075 /* enable bits are the same for all generations */
4076 for_each_intel_encoder(dev, intel_encoder)
4077 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
4078 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
4079 /* Programming the CRT detection parameters tends
4080 to generate a spurious hotplug event about three
4081 seconds later. So just do it once.
4082 */
4083 if (IS_G4X(dev))
4084 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4085 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
4086 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4087
4088 /* Ignore TV since it's buggy */
4089 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
4090 }
4091 }
4092
4093 static irqreturn_t i965_irq_handler(int irq, void *arg)
4094 {
4095 struct drm_device *dev = arg;
4096 struct drm_i915_private *dev_priv = dev->dev_private;
4097 u32 iir, new_iir;
4098 u32 pipe_stats[I915_MAX_PIPES];
4099 int ret = IRQ_NONE, pipe;
4100 u32 flip_mask =
4101 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4102 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4103
4104 iir = I915_READ(IIR);
4105
4106 for (;;) {
4107 bool irq_received = (iir & ~flip_mask) != 0;
4108 bool blc_event = false;
4109
4110 /* Can't rely on pipestat interrupt bit in iir as it might
4111 * have been cleared after the pipestat interrupt was received.
4112 * It doesn't set the bit in iir again, but it still produces
4113 * interrupts (for non-MSI).
4114 */
4115 spin_lock(&dev_priv->irq_lock);
4116 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4117 i915_handle_error(dev, false,
4118 "Command parser error, iir 0x%08x",
4119 iir);
4120
4121 for_each_pipe(dev_priv, pipe) {
4122 int reg = PIPESTAT(pipe);
4123 pipe_stats[pipe] = I915_READ(reg);
4124
4125 /*
4126 * Clear the PIPE*STAT regs before the IIR
4127 */
4128 if (pipe_stats[pipe] & 0x8000ffff) {
4129 I915_WRITE(reg, pipe_stats[pipe]);
4130 irq_received = true;
4131 }
4132 }
4133 spin_unlock(&dev_priv->irq_lock);
4134
4135 if (!irq_received)
4136 break;
4137
4138 ret = IRQ_HANDLED;
4139
4140 /* Consume port. Then clear IIR or we'll miss events */
4141 if (iir & I915_DISPLAY_PORT_INTERRUPT)
4142 i9xx_hpd_irq_handler(dev);
4143
4144 I915_WRITE(IIR, iir & ~flip_mask);
4145 new_iir = I915_READ(IIR); /* Flush posted writes */
4146
4147 if (iir & I915_USER_INTERRUPT)
4148 notify_ring(dev, &dev_priv->ring[RCS]);
4149 if (iir & I915_BSD_USER_INTERRUPT)
4150 notify_ring(dev, &dev_priv->ring[VCS]);
4151
4152 for_each_pipe(dev_priv, pipe) {
4153 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4154 i915_handle_vblank(dev, pipe, pipe, iir))
4155 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
4156
4157 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4158 blc_event = true;
4159
4160 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4161 i9xx_pipe_crc_irq_handler(dev, pipe);
4162
4163 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4164 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
4165 }
4166
4167 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4168 intel_opregion_asle_intr(dev);
4169
4170 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4171 gmbus_irq_handler(dev);
4172
4173 /* With MSI, interrupts are only generated when iir
4174 * transitions from zero to nonzero. If another bit got
4175 * set while we were handling the existing iir bits, then
4176 * we would never get another interrupt.
4177 *
4178 * This is fine on non-MSI as well, as if we hit this path
4179 * we avoid exiting the interrupt handler only to generate
4180 * another one.
4181 *
4182 * Note that for MSI this could cause a stray interrupt report
4183 * if an interrupt landed in the time between writing IIR and
4184 * the posting read. This should be rare enough to never
4185 * trigger the 99% of 100,000 interrupts test for disabling
4186 * stray interrupts.
4187 */
4188 iir = new_iir;
4189 }
4190
4191 i915_update_dri1_breadcrumb(dev);
4192
4193 return ret;
4194 }
4195
4196 static void i965_irq_uninstall(struct drm_device * dev)
4197 {
4198 struct drm_i915_private *dev_priv = dev->dev_private;
4199 int pipe;
4200
4201 if (!dev_priv)
4202 return;
4203
4204 I915_WRITE(PORT_HOTPLUG_EN, 0);
4205 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4206
4207 I915_WRITE(HWSTAM, 0xffffffff);
4208 for_each_pipe(dev_priv, pipe)
4209 I915_WRITE(PIPESTAT(pipe), 0);
4210 I915_WRITE(IMR, 0xffffffff);
4211 I915_WRITE(IER, 0x0);
4212
4213 for_each_pipe(dev_priv, pipe)
4214 I915_WRITE(PIPESTAT(pipe),
4215 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4216 I915_WRITE(IIR, I915_READ(IIR));
4217 }
4218
4219 static void intel_hpd_irq_reenable_work(struct work_struct *work)
4220 {
4221 struct drm_i915_private *dev_priv =
4222 container_of(work, typeof(*dev_priv),
4223 hotplug_reenable_work.work);
4224 struct drm_device *dev = dev_priv->dev;
4225 struct drm_mode_config *mode_config = &dev->mode_config;
4226 int i;
4227
4228 intel_runtime_pm_get(dev_priv);
4229
4230 spin_lock_irq(&dev_priv->irq_lock);
4231 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
4232 struct drm_connector *connector;
4233
4234 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
4235 continue;
4236
4237 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
4238
4239 list_for_each_entry(connector, &mode_config->connector_list, head) {
4240 struct intel_connector *intel_connector = to_intel_connector(connector);
4241
4242 if (intel_connector->encoder->hpd_pin == i) {
4243 if (connector->polled != intel_connector->polled)
4244 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
4245 connector->name);
4246 connector->polled = intel_connector->polled;
4247 if (!connector->polled)
4248 connector->polled = DRM_CONNECTOR_POLL_HPD;
4249 }
4250 }
4251 }
4252 if (dev_priv->display.hpd_irq_setup)
4253 dev_priv->display.hpd_irq_setup(dev);
4254 spin_unlock_irq(&dev_priv->irq_lock);
4255
4256 intel_runtime_pm_put(dev_priv);
4257 }
4258
4259 /**
4260 * intel_irq_init - initializes irq support
4261 * @dev_priv: i915 device instance
4262 *
4263 * This function initializes all the irq support including work items, timers
4264 * and all the vtables. It does not setup the interrupt itself though.
4265 */
4266 void intel_irq_init(struct drm_i915_private *dev_priv)
4267 {
4268 struct drm_device *dev = dev_priv->dev;
4269
4270 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
4271 INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func);
4272 INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
4273 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
4274 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4275
4276 /* Let's track the enabled rps events */
4277 if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
4278 /* WaGsvRC0ResidencyMethod:vlv */
4279 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
4280 else
4281 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4282
4283 setup_timer(&dev_priv->gpu_error.hangcheck_timer,
4284 i915_hangcheck_elapsed,
4285 (unsigned long) dev);
4286 INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work,
4287 intel_hpd_irq_reenable_work);
4288
4289 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
4290
4291 if (IS_GEN2(dev_priv)) {
4292 dev->max_vblank_count = 0;
4293 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
4294 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
4295 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4296 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
4297 } else {
4298 dev->driver->get_vblank_counter = i915_get_vblank_counter;
4299 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4300 }
4301
4302 /*
4303 * Opt out of the vblank disable timer on everything except gen2.
4304 * Gen2 doesn't have a hardware frame counter and so depends on
4305 * vblank interrupts to produce sane vblank seuquence numbers.
4306 */
4307 if (!IS_GEN2(dev_priv))
4308 dev->vblank_disable_immediate = true;
4309
4310 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
4311 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4312 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4313 }
4314
4315 if (IS_CHERRYVIEW(dev_priv)) {
4316 dev->driver->irq_handler = cherryview_irq_handler;
4317 dev->driver->irq_preinstall = cherryview_irq_preinstall;
4318 dev->driver->irq_postinstall = cherryview_irq_postinstall;
4319 dev->driver->irq_uninstall = cherryview_irq_uninstall;
4320 dev->driver->enable_vblank = valleyview_enable_vblank;
4321 dev->driver->disable_vblank = valleyview_disable_vblank;
4322 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4323 } else if (IS_VALLEYVIEW(dev_priv)) {
4324 dev->driver->irq_handler = valleyview_irq_handler;
4325 dev->driver->irq_preinstall = valleyview_irq_preinstall;
4326 dev->driver->irq_postinstall = valleyview_irq_postinstall;
4327 dev->driver->irq_uninstall = valleyview_irq_uninstall;
4328 dev->driver->enable_vblank = valleyview_enable_vblank;
4329 dev->driver->disable_vblank = valleyview_disable_vblank;
4330 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4331 } else if (INTEL_INFO(dev_priv)->gen >= 8) {
4332 dev->driver->irq_handler = gen8_irq_handler;
4333 dev->driver->irq_preinstall = gen8_irq_reset;
4334 dev->driver->irq_postinstall = gen8_irq_postinstall;
4335 dev->driver->irq_uninstall = gen8_irq_uninstall;
4336 dev->driver->enable_vblank = gen8_enable_vblank;
4337 dev->driver->disable_vblank = gen8_disable_vblank;
4338 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
4339 } else if (HAS_PCH_SPLIT(dev)) {
4340 dev->driver->irq_handler = ironlake_irq_handler;
4341 dev->driver->irq_preinstall = ironlake_irq_reset;
4342 dev->driver->irq_postinstall = ironlake_irq_postinstall;
4343 dev->driver->irq_uninstall = ironlake_irq_uninstall;
4344 dev->driver->enable_vblank = ironlake_enable_vblank;
4345 dev->driver->disable_vblank = ironlake_disable_vblank;
4346 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
4347 } else {
4348 if (INTEL_INFO(dev_priv)->gen == 2) {
4349 dev->driver->irq_preinstall = i8xx_irq_preinstall;
4350 dev->driver->irq_postinstall = i8xx_irq_postinstall;
4351 dev->driver->irq_handler = i8xx_irq_handler;
4352 dev->driver->irq_uninstall = i8xx_irq_uninstall;
4353 } else if (INTEL_INFO(dev_priv)->gen == 3) {
4354 dev->driver->irq_preinstall = i915_irq_preinstall;
4355 dev->driver->irq_postinstall = i915_irq_postinstall;
4356 dev->driver->irq_uninstall = i915_irq_uninstall;
4357 dev->driver->irq_handler = i915_irq_handler;
4358 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4359 } else {
4360 dev->driver->irq_preinstall = i965_irq_preinstall;
4361 dev->driver->irq_postinstall = i965_irq_postinstall;
4362 dev->driver->irq_uninstall = i965_irq_uninstall;
4363 dev->driver->irq_handler = i965_irq_handler;
4364 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4365 }
4366 dev->driver->enable_vblank = i915_enable_vblank;
4367 dev->driver->disable_vblank = i915_disable_vblank;
4368 }
4369 }
4370
4371 /**
4372 * intel_hpd_init - initializes and enables hpd support
4373 * @dev_priv: i915 device instance
4374 *
4375 * This function enables the hotplug support. It requires that interrupts have
4376 * already been enabled with intel_irq_init_hw(). From this point on hotplug and
4377 * poll request can run concurrently to other code, so locking rules must be
4378 * obeyed.
4379 *
4380 * This is a separate step from interrupt enabling to simplify the locking rules
4381 * in the driver load and resume code.
4382 */
4383 void intel_hpd_init(struct drm_i915_private *dev_priv)
4384 {
4385 struct drm_device *dev = dev_priv->dev;
4386 struct drm_mode_config *mode_config = &dev->mode_config;
4387 struct drm_connector *connector;
4388 int i;
4389
4390 for (i = 1; i < HPD_NUM_PINS; i++) {
4391 dev_priv->hpd_stats[i].hpd_cnt = 0;
4392 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
4393 }
4394 list_for_each_entry(connector, &mode_config->connector_list, head) {
4395 struct intel_connector *intel_connector = to_intel_connector(connector);
4396 connector->polled = intel_connector->polled;
4397 if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
4398 connector->polled = DRM_CONNECTOR_POLL_HPD;
4399 if (intel_connector->mst_port)
4400 connector->polled = DRM_CONNECTOR_POLL_HPD;
4401 }
4402
4403 /* Interrupt setup is already guaranteed to be single-threaded, this is
4404 * just to make the assert_spin_locked checks happy. */
4405 spin_lock_irq(&dev_priv->irq_lock);
4406 if (dev_priv->display.hpd_irq_setup)
4407 dev_priv->display.hpd_irq_setup(dev);
4408 spin_unlock_irq(&dev_priv->irq_lock);
4409 }
4410
4411 /**
4412 * intel_irq_install - enables the hardware interrupt
4413 * @dev_priv: i915 device instance
4414 *
4415 * This function enables the hardware interrupt handling, but leaves the hotplug
4416 * handling still disabled. It is called after intel_irq_init().
4417 *
4418 * In the driver load and resume code we need working interrupts in a few places
4419 * but don't want to deal with the hassle of concurrent probe and hotplug
4420 * workers. Hence the split into this two-stage approach.
4421 */
4422 int intel_irq_install(struct drm_i915_private *dev_priv)
4423 {
4424 /*
4425 * We enable some interrupt sources in our postinstall hooks, so mark
4426 * interrupts as enabled _before_ actually enabling them to avoid
4427 * special cases in our ordering checks.
4428 */
4429 dev_priv->pm.irqs_enabled = true;
4430
4431 return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
4432 }
4433
4434 /**
4435 * intel_irq_uninstall - finilizes all irq handling
4436 * @dev_priv: i915 device instance
4437 *
4438 * This stops interrupt and hotplug handling and unregisters and frees all
4439 * resources acquired in the init functions.
4440 */
4441 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4442 {
4443 drm_irq_uninstall(dev_priv->dev);
4444 intel_hpd_cancel_work(dev_priv);
4445 dev_priv->pm.irqs_enabled = false;
4446 }
4447
4448 /**
4449 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4450 * @dev_priv: i915 device instance
4451 *
4452 * This function is used to disable interrupts at runtime, both in the runtime
4453 * pm and the system suspend/resume code.
4454 */
4455 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4456 {
4457 dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
4458 dev_priv->pm.irqs_enabled = false;
4459 }
4460
4461 /**
4462 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4463 * @dev_priv: i915 device instance
4464 *
4465 * This function is used to enable interrupts at runtime, both in the runtime
4466 * pm and the system suspend/resume code.
4467 */
4468 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4469 {
4470 dev_priv->pm.irqs_enabled = true;
4471 dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
4472 dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
4473 }
This page took 0.45507 seconds and 6 git commands to generate.