drm/i915: (re)init HPD interrupt storm statistics
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_irq.c
CommitLineData
0d6aa60b 1/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
1da177e4 2 */
0d6aa60b 3/*
1da177e4
LT
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
bc54fd1a
DA
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
0d6aa60b 27 */
1da177e4 28
a70491cc
JP
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
63eeaf38 31#include <linux/sysrq.h>
5a0e3ad6 32#include <linux/slab.h>
760285e7
DH
33#include <drm/drmP.h>
34#include <drm/i915_drm.h>
1da177e4 35#include "i915_drv.h"
1c5d22f7 36#include "i915_trace.h"
79e53945 37#include "intel_drv.h"
1da177e4 38
e5868a31
EE
39static const u32 hpd_ibx[] = {
40 [HPD_CRT] = SDE_CRT_HOTPLUG,
41 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
42 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
43 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
44 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
45};
46
47static const u32 hpd_cpt[] = {
48 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
73c352a2 49 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
e5868a31
EE
50 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
51 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
52 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
53};
54
55static const u32 hpd_mask_i915[] = {
56 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
57 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
58 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
59 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
60 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
61 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
62};
63
64static const u32 hpd_status_gen4[] = {
65 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
66 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
67 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
68 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
69 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
70 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
71};
72
73static const u32 hpd_status_i965[] = {
74 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
75 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I965,
76 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I965,
77 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
78 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
79 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
80};
81
82static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
83 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
84 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
85 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
86 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
87 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
88 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
89};
90
91
92
036a4a7d 93/* For display hotplug interrupt */
995b6762 94static void
f2b115e6 95ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
036a4a7d 96{
1ec14ad3
CW
97 if ((dev_priv->irq_mask & mask) != 0) {
98 dev_priv->irq_mask &= ~mask;
99 I915_WRITE(DEIMR, dev_priv->irq_mask);
3143a2bf 100 POSTING_READ(DEIMR);
036a4a7d
ZW
101 }
102}
103
0ff9800a 104static void
f2b115e6 105ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
036a4a7d 106{
1ec14ad3
CW
107 if ((dev_priv->irq_mask & mask) != mask) {
108 dev_priv->irq_mask |= mask;
109 I915_WRITE(DEIMR, dev_priv->irq_mask);
3143a2bf 110 POSTING_READ(DEIMR);
036a4a7d
ZW
111 }
112}
113
7c463586
KP
114void
115i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
116{
46c06a30
VS
117 u32 reg = PIPESTAT(pipe);
118 u32 pipestat = I915_READ(reg) & 0x7fff0000;
7c463586 119
46c06a30
VS
120 if ((pipestat & mask) == mask)
121 return;
122
123 /* Enable the interrupt, clear any pending status */
124 pipestat |= mask | (mask >> 16);
125 I915_WRITE(reg, pipestat);
126 POSTING_READ(reg);
7c463586
KP
127}
128
129void
130i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
131{
46c06a30
VS
132 u32 reg = PIPESTAT(pipe);
133 u32 pipestat = I915_READ(reg) & 0x7fff0000;
7c463586 134
46c06a30
VS
135 if ((pipestat & mask) == 0)
136 return;
137
138 pipestat &= ~mask;
139 I915_WRITE(reg, pipestat);
140 POSTING_READ(reg);
7c463586
KP
141}
142
01c66889
ZY
143/**
144 * intel_enable_asle - enable ASLE interrupt for OpRegion
145 */
1ec14ad3 146void intel_enable_asle(struct drm_device *dev)
01c66889 147{
1ec14ad3
CW
148 drm_i915_private_t *dev_priv = dev->dev_private;
149 unsigned long irqflags;
150
7e231dbe
JB
151 /* FIXME: opregion/asle for VLV */
152 if (IS_VALLEYVIEW(dev))
153 return;
154
1ec14ad3 155 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
01c66889 156
c619eed4 157 if (HAS_PCH_SPLIT(dev))
f2b115e6 158 ironlake_enable_display_irq(dev_priv, DE_GSE);
edcb49ca 159 else {
01c66889 160 i915_enable_pipestat(dev_priv, 1,
d874bcff 161 PIPE_LEGACY_BLC_EVENT_ENABLE);
a6c45cf0 162 if (INTEL_INFO(dev)->gen >= 4)
edcb49ca 163 i915_enable_pipestat(dev_priv, 0,
d874bcff 164 PIPE_LEGACY_BLC_EVENT_ENABLE);
edcb49ca 165 }
1ec14ad3
CW
166
167 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
01c66889
ZY
168}
169
0a3e67a4
JB
170/**
171 * i915_pipe_enabled - check if a pipe is enabled
172 * @dev: DRM device
173 * @pipe: pipe to check
174 *
175 * Reading certain registers when the pipe is disabled can hang the chip.
176 * Use this routine to make sure the PLL is running and the pipe is active
177 * before reading such registers if unsure.
178 */
179static int
180i915_pipe_enabled(struct drm_device *dev, int pipe)
181{
182 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
702e7a56
PZ
183 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
184 pipe);
185
186 return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE;
0a3e67a4
JB
187}
188
42f52ef8
KP
189/* Called from drm generic code, passed a 'crtc', which
190 * we use as a pipe index
191 */
f71d4af4 192static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
0a3e67a4
JB
193{
194 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
195 unsigned long high_frame;
196 unsigned long low_frame;
5eddb70b 197 u32 high1, high2, low;
0a3e67a4
JB
198
199 if (!i915_pipe_enabled(dev, pipe)) {
44d98a61 200 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
9db4a9c7 201 "pipe %c\n", pipe_name(pipe));
0a3e67a4
JB
202 return 0;
203 }
204
9db4a9c7
JB
205 high_frame = PIPEFRAME(pipe);
206 low_frame = PIPEFRAMEPIXEL(pipe);
5eddb70b 207
0a3e67a4
JB
208 /*
209 * High & low register fields aren't synchronized, so make sure
210 * we get a low value that's stable across two reads of the high
211 * register.
212 */
213 do {
5eddb70b
CW
214 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
215 low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK;
216 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
0a3e67a4
JB
217 } while (high1 != high2);
218
5eddb70b
CW
219 high1 >>= PIPE_FRAME_HIGH_SHIFT;
220 low >>= PIPE_FRAME_LOW_SHIFT;
221 return (high1 << 8) | low;
0a3e67a4
JB
222}
223
f71d4af4 224static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
9880b7a5
JB
225{
226 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
9db4a9c7 227 int reg = PIPE_FRMCOUNT_GM45(pipe);
9880b7a5
JB
228
229 if (!i915_pipe_enabled(dev, pipe)) {
44d98a61 230 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
9db4a9c7 231 "pipe %c\n", pipe_name(pipe));
9880b7a5
JB
232 return 0;
233 }
234
235 return I915_READ(reg);
236}
237
f71d4af4 238static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
0af7e4df
MK
239 int *vpos, int *hpos)
240{
241 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
242 u32 vbl = 0, position = 0;
243 int vbl_start, vbl_end, htotal, vtotal;
244 bool in_vbl = true;
245 int ret = 0;
fe2b8f9d
PZ
246 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
247 pipe);
0af7e4df
MK
248
249 if (!i915_pipe_enabled(dev, pipe)) {
250 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
9db4a9c7 251 "pipe %c\n", pipe_name(pipe));
0af7e4df
MK
252 return 0;
253 }
254
255 /* Get vtotal. */
fe2b8f9d 256 vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
0af7e4df
MK
257
258 if (INTEL_INFO(dev)->gen >= 4) {
259 /* No obvious pixelcount register. Only query vertical
260 * scanout position from Display scan line register.
261 */
262 position = I915_READ(PIPEDSL(pipe));
263
264 /* Decode into vertical scanout position. Don't have
265 * horizontal scanout position.
266 */
267 *vpos = position & 0x1fff;
268 *hpos = 0;
269 } else {
270 /* Have access to pixelcount since start of frame.
271 * We can split this into vertical and horizontal
272 * scanout position.
273 */
274 position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
275
fe2b8f9d 276 htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
0af7e4df
MK
277 *vpos = position / htotal;
278 *hpos = position - (*vpos * htotal);
279 }
280
281 /* Query vblank area. */
fe2b8f9d 282 vbl = I915_READ(VBLANK(cpu_transcoder));
0af7e4df
MK
283
284 /* Test position against vblank region. */
285 vbl_start = vbl & 0x1fff;
286 vbl_end = (vbl >> 16) & 0x1fff;
287
288 if ((*vpos < vbl_start) || (*vpos > vbl_end))
289 in_vbl = false;
290
291 /* Inside "upper part" of vblank area? Apply corrective offset: */
292 if (in_vbl && (*vpos >= vbl_start))
293 *vpos = *vpos - vtotal;
294
295 /* Readouts valid? */
296 if (vbl > 0)
297 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
298
299 /* In vblank? */
300 if (in_vbl)
301 ret |= DRM_SCANOUTPOS_INVBL;
302
303 return ret;
304}
305
f71d4af4 306static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
0af7e4df
MK
307 int *max_error,
308 struct timeval *vblank_time,
309 unsigned flags)
310{
4041b853 311 struct drm_crtc *crtc;
0af7e4df 312
7eb552ae 313 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
4041b853 314 DRM_ERROR("Invalid crtc %d\n", pipe);
0af7e4df
MK
315 return -EINVAL;
316 }
317
318 /* Get drm_crtc to timestamp: */
4041b853
CW
319 crtc = intel_get_crtc_for_pipe(dev, pipe);
320 if (crtc == NULL) {
321 DRM_ERROR("Invalid crtc %d\n", pipe);
322 return -EINVAL;
323 }
324
325 if (!crtc->enabled) {
326 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
327 return -EBUSY;
328 }
0af7e4df
MK
329
330 /* Helper routine in DRM core does all the work: */
4041b853
CW
331 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
332 vblank_time, flags,
333 crtc);
0af7e4df
MK
334}
335
5ca58282
JB
336/*
337 * Handle hotplug events outside the interrupt handler proper.
338 */
339static void i915_hotplug_work_func(struct work_struct *work)
340{
341 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
342 hotplug_work);
343 struct drm_device *dev = dev_priv->dev;
c31c4ba3 344 struct drm_mode_config *mode_config = &dev->mode_config;
4ef69c7a
CW
345 struct intel_encoder *encoder;
346
52d7eced
DV
347 /* HPD irq before everything is fully set up. */
348 if (!dev_priv->enable_hotplug_processing)
349 return;
350
a65e34c7 351 mutex_lock(&mode_config->mutex);
e67189ab
JB
352 DRM_DEBUG_KMS("running encoder hotplug functions\n");
353
4ef69c7a
CW
354 list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
355 if (encoder->hot_plug)
356 encoder->hot_plug(encoder);
357
40ee3381
KP
358 mutex_unlock(&mode_config->mutex);
359
5ca58282 360 /* Just fire off a uevent and let userspace tell us what to do */
eb1f8e4f 361 drm_helper_hpd_irq_event(dev);
5ca58282
JB
362}
363
73edd18f 364static void ironlake_handle_rps_change(struct drm_device *dev)
f97108d1
JB
365{
366 drm_i915_private_t *dev_priv = dev->dev_private;
b5b72e89 367 u32 busy_up, busy_down, max_avg, min_avg;
9270388e
DV
368 u8 new_delay;
369 unsigned long flags;
370
371 spin_lock_irqsave(&mchdev_lock, flags);
f97108d1 372
73edd18f
DV
373 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
374
20e4d407 375 new_delay = dev_priv->ips.cur_delay;
9270388e 376
7648fa99 377 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
b5b72e89
MG
378 busy_up = I915_READ(RCPREVBSYTUPAVG);
379 busy_down = I915_READ(RCPREVBSYTDNAVG);
f97108d1
JB
380 max_avg = I915_READ(RCBMAXAVG);
381 min_avg = I915_READ(RCBMINAVG);
382
383 /* Handle RCS change request from hw */
b5b72e89 384 if (busy_up > max_avg) {
20e4d407
DV
385 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
386 new_delay = dev_priv->ips.cur_delay - 1;
387 if (new_delay < dev_priv->ips.max_delay)
388 new_delay = dev_priv->ips.max_delay;
b5b72e89 389 } else if (busy_down < min_avg) {
20e4d407
DV
390 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
391 new_delay = dev_priv->ips.cur_delay + 1;
392 if (new_delay > dev_priv->ips.min_delay)
393 new_delay = dev_priv->ips.min_delay;
f97108d1
JB
394 }
395
7648fa99 396 if (ironlake_set_drps(dev, new_delay))
20e4d407 397 dev_priv->ips.cur_delay = new_delay;
f97108d1 398
9270388e
DV
399 spin_unlock_irqrestore(&mchdev_lock, flags);
400
f97108d1
JB
401 return;
402}
403
549f7365
CW
404static void notify_ring(struct drm_device *dev,
405 struct intel_ring_buffer *ring)
406{
407 struct drm_i915_private *dev_priv = dev->dev_private;
9862e600 408
475553de
CW
409 if (ring->obj == NULL)
410 return;
411
b2eadbc8 412 trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
9862e600 413
549f7365 414 wake_up_all(&ring->irq_queue);
3e0dc6b0 415 if (i915_enable_hangcheck) {
99584db3
DV
416 dev_priv->gpu_error.hangcheck_count = 0;
417 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
cecc21fe 418 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
3e0dc6b0 419 }
549f7365
CW
420}
421
4912d041 422static void gen6_pm_rps_work(struct work_struct *work)
3b8d8d91 423{
4912d041 424 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
c6a828d3 425 rps.work);
4912d041 426 u32 pm_iir, pm_imr;
7b9e0ae6 427 u8 new_delay;
4912d041 428
c6a828d3
DV
429 spin_lock_irq(&dev_priv->rps.lock);
430 pm_iir = dev_priv->rps.pm_iir;
431 dev_priv->rps.pm_iir = 0;
4912d041 432 pm_imr = I915_READ(GEN6_PMIMR);
a9e2641d 433 I915_WRITE(GEN6_PMIMR, 0);
c6a828d3 434 spin_unlock_irq(&dev_priv->rps.lock);
3b8d8d91 435
7b9e0ae6 436 if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
3b8d8d91
JB
437 return;
438
4fc688ce 439 mutex_lock(&dev_priv->rps.hw_lock);
7b9e0ae6
CW
440
441 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
c6a828d3 442 new_delay = dev_priv->rps.cur_delay + 1;
7b9e0ae6 443 else
c6a828d3 444 new_delay = dev_priv->rps.cur_delay - 1;
3b8d8d91 445
79249636
BW
446 /* sysfs frequency interfaces may have snuck in while servicing the
447 * interrupt
448 */
449 if (!(new_delay > dev_priv->rps.max_delay ||
450 new_delay < dev_priv->rps.min_delay)) {
451 gen6_set_rps(dev_priv->dev, new_delay);
452 }
3b8d8d91 453
4fc688ce 454 mutex_unlock(&dev_priv->rps.hw_lock);
3b8d8d91
JB
455}
456
e3689190
BW
457
458/**
459 * ivybridge_parity_work - Workqueue called when a parity error interrupt
460 * occurred.
461 * @work: workqueue struct
462 *
463 * Doesn't actually do anything except notify userspace. As a consequence of
464 * this event, userspace should try to remap the bad rows since statistically
465 * it is likely the same row is more likely to go bad again.
466 */
467static void ivybridge_parity_work(struct work_struct *work)
468{
469 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
a4da4fa4 470 l3_parity.error_work);
e3689190
BW
471 u32 error_status, row, bank, subbank;
472 char *parity_event[5];
473 uint32_t misccpctl;
474 unsigned long flags;
475
476 /* We must turn off DOP level clock gating to access the L3 registers.
477 * In order to prevent a get/put style interface, acquire struct mutex
478 * any time we access those registers.
479 */
480 mutex_lock(&dev_priv->dev->struct_mutex);
481
482 misccpctl = I915_READ(GEN7_MISCCPCTL);
483 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
484 POSTING_READ(GEN7_MISCCPCTL);
485
486 error_status = I915_READ(GEN7_L3CDERRST1);
487 row = GEN7_PARITY_ERROR_ROW(error_status);
488 bank = GEN7_PARITY_ERROR_BANK(error_status);
489 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
490
491 I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
492 GEN7_L3CDERRST1_ENABLE);
493 POSTING_READ(GEN7_L3CDERRST1);
494
495 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
496
497 spin_lock_irqsave(&dev_priv->irq_lock, flags);
498 dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
499 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
500 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
501
502 mutex_unlock(&dev_priv->dev->struct_mutex);
503
504 parity_event[0] = "L3_PARITY_ERROR=1";
505 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
506 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
507 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
508 parity_event[4] = NULL;
509
510 kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
511 KOBJ_CHANGE, parity_event);
512
513 DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
514 row, bank, subbank);
515
516 kfree(parity_event[3]);
517 kfree(parity_event[2]);
518 kfree(parity_event[1]);
519}
520
d2ba8470 521static void ivybridge_handle_parity_error(struct drm_device *dev)
e3689190
BW
522{
523 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
524 unsigned long flags;
525
e1ef7cc2 526 if (!HAS_L3_GPU_CACHE(dev))
e3689190
BW
527 return;
528
529 spin_lock_irqsave(&dev_priv->irq_lock, flags);
530 dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
531 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
532 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
533
a4da4fa4 534 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
e3689190
BW
535}
536
e7b4c6b1
DV
537static void snb_gt_irq_handler(struct drm_device *dev,
538 struct drm_i915_private *dev_priv,
539 u32 gt_iir)
540{
541
542 if (gt_iir & (GEN6_RENDER_USER_INTERRUPT |
543 GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT))
544 notify_ring(dev, &dev_priv->ring[RCS]);
545 if (gt_iir & GEN6_BSD_USER_INTERRUPT)
546 notify_ring(dev, &dev_priv->ring[VCS]);
547 if (gt_iir & GEN6_BLITTER_USER_INTERRUPT)
548 notify_ring(dev, &dev_priv->ring[BCS]);
549
550 if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT |
551 GT_GEN6_BSD_CS_ERROR_INTERRUPT |
552 GT_RENDER_CS_ERROR_INTERRUPT)) {
553 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
554 i915_handle_error(dev, false);
555 }
e3689190
BW
556
557 if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT)
558 ivybridge_handle_parity_error(dev);
e7b4c6b1
DV
559}
560
fc6826d1
CW
561static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
562 u32 pm_iir)
563{
564 unsigned long flags;
565
566 /*
567 * IIR bits should never already be set because IMR should
568 * prevent an interrupt from being shown in IIR. The warning
569 * displays a case where we've unsafely cleared
c6a828d3 570 * dev_priv->rps.pm_iir. Although missing an interrupt of the same
fc6826d1
CW
571 * type is not a problem, it displays a problem in the logic.
572 *
c6a828d3 573 * The mask bit in IMR is cleared by dev_priv->rps.work.
fc6826d1
CW
574 */
575
c6a828d3 576 spin_lock_irqsave(&dev_priv->rps.lock, flags);
c6a828d3
DV
577 dev_priv->rps.pm_iir |= pm_iir;
578 I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
fc6826d1 579 POSTING_READ(GEN6_PMIMR);
c6a828d3 580 spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
fc6826d1 581
c6a828d3 582 queue_work(dev_priv->wq, &dev_priv->rps.work);
fc6826d1
CW
583}
584
b543fb04
EE
585#define HPD_STORM_DETECT_PERIOD 1000
586#define HPD_STORM_THRESHOLD 5
587
588static inline void hotplug_irq_storm_detect(struct drm_device *dev,
589 u32 hotplug_trigger,
590 const u32 *hpd)
591{
592 drm_i915_private_t *dev_priv = dev->dev_private;
593 unsigned long irqflags;
594 int i;
595
596 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
597
598 for (i = 1; i < HPD_NUM_PINS; i++) {
821450c6 599
b543fb04
EE
600 if (!(hpd[i] & hotplug_trigger) ||
601 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
602 continue;
603
604 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
605 dev_priv->hpd_stats[i].hpd_last_jiffies
606 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
607 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
608 dev_priv->hpd_stats[i].hpd_cnt = 0;
609 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
610 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
611 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
612 } else {
613 dev_priv->hpd_stats[i].hpd_cnt++;
614 }
615 }
616
617 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
618}
619
515ac2bb
DV
620static void gmbus_irq_handler(struct drm_device *dev)
621{
28c70f16
DV
622 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
623
28c70f16 624 wake_up_all(&dev_priv->gmbus_wait_queue);
515ac2bb
DV
625}
626
ce99c256
DV
627static void dp_aux_irq_handler(struct drm_device *dev)
628{
9ee32fea
DV
629 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
630
9ee32fea 631 wake_up_all(&dev_priv->gmbus_wait_queue);
ce99c256
DV
632}
633
ff1f525e 634static irqreturn_t valleyview_irq_handler(int irq, void *arg)
7e231dbe
JB
635{
636 struct drm_device *dev = (struct drm_device *) arg;
637 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
638 u32 iir, gt_iir, pm_iir;
639 irqreturn_t ret = IRQ_NONE;
640 unsigned long irqflags;
641 int pipe;
642 u32 pipe_stats[I915_MAX_PIPES];
7e231dbe
JB
643
644 atomic_inc(&dev_priv->irq_received);
645
7e231dbe
JB
646 while (true) {
647 iir = I915_READ(VLV_IIR);
648 gt_iir = I915_READ(GTIIR);
649 pm_iir = I915_READ(GEN6_PMIIR);
650
651 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
652 goto out;
653
654 ret = IRQ_HANDLED;
655
e7b4c6b1 656 snb_gt_irq_handler(dev, dev_priv, gt_iir);
7e231dbe
JB
657
658 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
659 for_each_pipe(pipe) {
660 int reg = PIPESTAT(pipe);
661 pipe_stats[pipe] = I915_READ(reg);
662
663 /*
664 * Clear the PIPE*STAT regs before the IIR
665 */
666 if (pipe_stats[pipe] & 0x8000ffff) {
667 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
668 DRM_DEBUG_DRIVER("pipe %c underrun\n",
669 pipe_name(pipe));
670 I915_WRITE(reg, pipe_stats[pipe]);
671 }
672 }
673 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
674
31acc7f5
JB
675 for_each_pipe(pipe) {
676 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
677 drm_handle_vblank(dev, pipe);
678
679 if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
680 intel_prepare_page_flip(dev, pipe);
681 intel_finish_page_flip(dev, pipe);
682 }
683 }
684
7e231dbe
JB
685 /* Consume port. Then clear IIR or we'll miss events */
686 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
687 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
b543fb04 688 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
7e231dbe
JB
689
690 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
691 hotplug_status);
b543fb04
EE
692 if (hotplug_trigger) {
693 hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_status_i915);
7e231dbe
JB
694 queue_work(dev_priv->wq,
695 &dev_priv->hotplug_work);
b543fb04 696 }
7e231dbe
JB
697 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
698 I915_READ(PORT_HOTPLUG_STAT);
699 }
700
515ac2bb
DV
701 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
702 gmbus_irq_handler(dev);
7e231dbe 703
fc6826d1
CW
704 if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
705 gen6_queue_rps_work(dev_priv, pm_iir);
7e231dbe
JB
706
707 I915_WRITE(GTIIR, gt_iir);
708 I915_WRITE(GEN6_PMIIR, pm_iir);
709 I915_WRITE(VLV_IIR, iir);
710 }
711
712out:
713 return ret;
714}
715
23e81d69 716static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
776ad806
JB
717{
718 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
9db4a9c7 719 int pipe;
b543fb04 720 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
776ad806 721
b543fb04
EE
722 if (hotplug_trigger) {
723 hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_ibx);
76e43830 724 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
b543fb04 725 }
776ad806
JB
726 if (pch_iir & SDE_AUDIO_POWER_MASK)
727 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
728 (pch_iir & SDE_AUDIO_POWER_MASK) >>
729 SDE_AUDIO_POWER_SHIFT);
730
ce99c256
DV
731 if (pch_iir & SDE_AUX_MASK)
732 dp_aux_irq_handler(dev);
733
776ad806 734 if (pch_iir & SDE_GMBUS)
515ac2bb 735 gmbus_irq_handler(dev);
776ad806
JB
736
737 if (pch_iir & SDE_AUDIO_HDCP_MASK)
738 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
739
740 if (pch_iir & SDE_AUDIO_TRANS_MASK)
741 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
742
743 if (pch_iir & SDE_POISON)
744 DRM_ERROR("PCH poison interrupt\n");
745
9db4a9c7
JB
746 if (pch_iir & SDE_FDI_MASK)
747 for_each_pipe(pipe)
748 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
749 pipe_name(pipe),
750 I915_READ(FDI_RX_IIR(pipe)));
776ad806
JB
751
752 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
753 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
754
755 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
756 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
757
758 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
759 DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n");
760 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
761 DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
762}
763
23e81d69
AJ
764static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
765{
766 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
767 int pipe;
b543fb04 768 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
23e81d69 769
b543fb04
EE
770 if (hotplug_trigger) {
771 hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_cpt);
76e43830 772 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
b543fb04 773 }
23e81d69
AJ
774 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
775 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
776 (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
777 SDE_AUDIO_POWER_SHIFT_CPT);
778
779 if (pch_iir & SDE_AUX_MASK_CPT)
ce99c256 780 dp_aux_irq_handler(dev);
23e81d69
AJ
781
782 if (pch_iir & SDE_GMBUS_CPT)
515ac2bb 783 gmbus_irq_handler(dev);
23e81d69
AJ
784
785 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
786 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
787
788 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
789 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
790
791 if (pch_iir & SDE_FDI_MASK_CPT)
792 for_each_pipe(pipe)
793 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
794 pipe_name(pipe),
795 I915_READ(FDI_RX_IIR(pipe)));
796}
797
ff1f525e 798static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
b1f14ad0
JB
799{
800 struct drm_device *dev = (struct drm_device *) arg;
801 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
ab5c608b 802 u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier = 0;
0e43406b
CW
803 irqreturn_t ret = IRQ_NONE;
804 int i;
b1f14ad0
JB
805
806 atomic_inc(&dev_priv->irq_received);
807
808 /* disable master interrupt before clearing iir */
809 de_ier = I915_READ(DEIER);
810 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
b1f14ad0 811
44498aea
PZ
812 /* Disable south interrupts. We'll only write to SDEIIR once, so further
813 * interrupts will will be stored on its back queue, and then we'll be
814 * able to process them after we restore SDEIER (as soon as we restore
815 * it, we'll get an interrupt if SDEIIR still has something to process
816 * due to its back queue). */
ab5c608b
BW
817 if (!HAS_PCH_NOP(dev)) {
818 sde_ier = I915_READ(SDEIER);
819 I915_WRITE(SDEIER, 0);
820 POSTING_READ(SDEIER);
821 }
44498aea 822
b1f14ad0 823 gt_iir = I915_READ(GTIIR);
0e43406b
CW
824 if (gt_iir) {
825 snb_gt_irq_handler(dev, dev_priv, gt_iir);
826 I915_WRITE(GTIIR, gt_iir);
827 ret = IRQ_HANDLED;
b1f14ad0
JB
828 }
829
0e43406b
CW
830 de_iir = I915_READ(DEIIR);
831 if (de_iir) {
ce99c256
DV
832 if (de_iir & DE_AUX_CHANNEL_A_IVB)
833 dp_aux_irq_handler(dev);
834
0e43406b
CW
835 if (de_iir & DE_GSE_IVB)
836 intel_opregion_gse_intr(dev);
837
838 for (i = 0; i < 3; i++) {
74d44445
DV
839 if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
840 drm_handle_vblank(dev, i);
0e43406b
CW
841 if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
842 intel_prepare_page_flip(dev, i);
843 intel_finish_page_flip_plane(dev, i);
844 }
0e43406b 845 }
b615b57a 846
0e43406b 847 /* check event from PCH */
ab5c608b 848 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
0e43406b 849 u32 pch_iir = I915_READ(SDEIIR);
b1f14ad0 850
23e81d69 851 cpt_irq_handler(dev, pch_iir);
b1f14ad0 852
0e43406b
CW
853 /* clear PCH hotplug event before clear CPU irq */
854 I915_WRITE(SDEIIR, pch_iir);
855 }
b615b57a 856
0e43406b
CW
857 I915_WRITE(DEIIR, de_iir);
858 ret = IRQ_HANDLED;
b1f14ad0
JB
859 }
860
0e43406b
CW
861 pm_iir = I915_READ(GEN6_PMIIR);
862 if (pm_iir) {
863 if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
864 gen6_queue_rps_work(dev_priv, pm_iir);
865 I915_WRITE(GEN6_PMIIR, pm_iir);
866 ret = IRQ_HANDLED;
867 }
b1f14ad0 868
b1f14ad0
JB
869 I915_WRITE(DEIER, de_ier);
870 POSTING_READ(DEIER);
ab5c608b
BW
871 if (!HAS_PCH_NOP(dev)) {
872 I915_WRITE(SDEIER, sde_ier);
873 POSTING_READ(SDEIER);
874 }
b1f14ad0
JB
875
876 return ret;
877}
878
e7b4c6b1
DV
879static void ilk_gt_irq_handler(struct drm_device *dev,
880 struct drm_i915_private *dev_priv,
881 u32 gt_iir)
882{
883 if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
884 notify_ring(dev, &dev_priv->ring[RCS]);
885 if (gt_iir & GT_BSD_USER_INTERRUPT)
886 notify_ring(dev, &dev_priv->ring[VCS]);
887}
888
ff1f525e 889static irqreturn_t ironlake_irq_handler(int irq, void *arg)
036a4a7d 890{
4697995b 891 struct drm_device *dev = (struct drm_device *) arg;
036a4a7d
ZW
892 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
893 int ret = IRQ_NONE;
44498aea 894 u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
881f47b6 895
4697995b
JB
896 atomic_inc(&dev_priv->irq_received);
897
2d109a84
ZN
898 /* disable master interrupt before clearing iir */
899 de_ier = I915_READ(DEIER);
900 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
3143a2bf 901 POSTING_READ(DEIER);
2d109a84 902
44498aea
PZ
903 /* Disable south interrupts. We'll only write to SDEIIR once, so further
904 * interrupts will will be stored on its back queue, and then we'll be
905 * able to process them after we restore SDEIER (as soon as we restore
906 * it, we'll get an interrupt if SDEIIR still has something to process
907 * due to its back queue). */
908 sde_ier = I915_READ(SDEIER);
909 I915_WRITE(SDEIER, 0);
910 POSTING_READ(SDEIER);
911
036a4a7d
ZW
912 de_iir = I915_READ(DEIIR);
913 gt_iir = I915_READ(GTIIR);
3b8d8d91 914 pm_iir = I915_READ(GEN6_PMIIR);
036a4a7d 915
acd15b6c 916 if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0))
c7c85101 917 goto done;
036a4a7d 918
c7c85101 919 ret = IRQ_HANDLED;
036a4a7d 920
e7b4c6b1
DV
921 if (IS_GEN5(dev))
922 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
923 else
924 snb_gt_irq_handler(dev, dev_priv, gt_iir);
01c66889 925
ce99c256
DV
926 if (de_iir & DE_AUX_CHANNEL_A)
927 dp_aux_irq_handler(dev);
928
c7c85101 929 if (de_iir & DE_GSE)
3b617967 930 intel_opregion_gse_intr(dev);
c650156a 931
74d44445
DV
932 if (de_iir & DE_PIPEA_VBLANK)
933 drm_handle_vblank(dev, 0);
934
935 if (de_iir & DE_PIPEB_VBLANK)
936 drm_handle_vblank(dev, 1);
937
f072d2e7 938 if (de_iir & DE_PLANEA_FLIP_DONE) {
013d5aa2 939 intel_prepare_page_flip(dev, 0);
2bbda389 940 intel_finish_page_flip_plane(dev, 0);
f072d2e7 941 }
013d5aa2 942
f072d2e7 943 if (de_iir & DE_PLANEB_FLIP_DONE) {
013d5aa2 944 intel_prepare_page_flip(dev, 1);
2bbda389 945 intel_finish_page_flip_plane(dev, 1);
f072d2e7 946 }
013d5aa2 947
c7c85101 948 /* check event from PCH */
776ad806 949 if (de_iir & DE_PCH_EVENT) {
acd15b6c
DV
950 u32 pch_iir = I915_READ(SDEIIR);
951
23e81d69
AJ
952 if (HAS_PCH_CPT(dev))
953 cpt_irq_handler(dev, pch_iir);
954 else
955 ibx_irq_handler(dev, pch_iir);
acd15b6c
DV
956
957 /* should clear PCH hotplug event before clear CPU irq */
958 I915_WRITE(SDEIIR, pch_iir);
776ad806 959 }
036a4a7d 960
73edd18f
DV
961 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
962 ironlake_handle_rps_change(dev);
f97108d1 963
fc6826d1
CW
964 if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
965 gen6_queue_rps_work(dev_priv, pm_iir);
3b8d8d91 966
c7c85101
ZN
967 I915_WRITE(GTIIR, gt_iir);
968 I915_WRITE(DEIIR, de_iir);
4912d041 969 I915_WRITE(GEN6_PMIIR, pm_iir);
c7c85101
ZN
970
971done:
2d109a84 972 I915_WRITE(DEIER, de_ier);
3143a2bf 973 POSTING_READ(DEIER);
44498aea
PZ
974 I915_WRITE(SDEIER, sde_ier);
975 POSTING_READ(SDEIER);
2d109a84 976
036a4a7d
ZW
977 return ret;
978}
979
8a905236
JB
980/**
981 * i915_error_work_func - do process context error handling work
982 * @work: work struct
983 *
984 * Fire an error uevent so userspace can see that a hang or error
985 * was detected.
986 */
987static void i915_error_work_func(struct work_struct *work)
988{
1f83fee0
DV
989 struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
990 work);
991 drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
992 gpu_error);
8a905236 993 struct drm_device *dev = dev_priv->dev;
f69061be 994 struct intel_ring_buffer *ring;
f316a42c
BG
995 char *error_event[] = { "ERROR=1", NULL };
996 char *reset_event[] = { "RESET=1", NULL };
997 char *reset_done_event[] = { "ERROR=0", NULL };
f69061be 998 int i, ret;
8a905236 999
f316a42c
BG
1000 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
1001
7db0ba24
DV
1002 /*
1003 * Note that there's only one work item which does gpu resets, so we
1004 * need not worry about concurrent gpu resets potentially incrementing
1005 * error->reset_counter twice. We only need to take care of another
1006 * racing irq/hangcheck declaring the gpu dead for a second time. A
1007 * quick check for that is good enough: schedule_work ensures the
1008 * correct ordering between hang detection and this work item, and since
1009 * the reset in-progress bit is only ever set by code outside of this
1010 * work we don't need to worry about any other races.
1011 */
1012 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
f803aa55 1013 DRM_DEBUG_DRIVER("resetting chip\n");
7db0ba24
DV
1014 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
1015 reset_event);
1f83fee0 1016
f69061be
DV
1017 ret = i915_reset(dev);
1018
1019 if (ret == 0) {
1020 /*
1021 * After all the gem state is reset, increment the reset
1022 * counter and wake up everyone waiting for the reset to
1023 * complete.
1024 *
1025 * Since unlock operations are a one-sided barrier only,
1026 * we need to insert a barrier here to order any seqno
1027 * updates before
1028 * the counter increment.
1029 */
1030 smp_mb__before_atomic_inc();
1031 atomic_inc(&dev_priv->gpu_error.reset_counter);
1032
1033 kobject_uevent_env(&dev->primary->kdev.kobj,
1034 KOBJ_CHANGE, reset_done_event);
1f83fee0
DV
1035 } else {
1036 atomic_set(&error->reset_counter, I915_WEDGED);
f316a42c 1037 }
1f83fee0 1038
f69061be
DV
1039 for_each_ring(ring, dev_priv, i)
1040 wake_up_all(&ring->irq_queue);
1041
96a02917
VS
1042 intel_display_handle_reset(dev);
1043
1f83fee0 1044 wake_up_all(&dev_priv->gpu_error.reset_queue);
f316a42c 1045 }
8a905236
JB
1046}
1047
85f9e50d
DV
1048/* NB: please notice the memset */
1049static void i915_get_extra_instdone(struct drm_device *dev,
1050 uint32_t *instdone)
1051{
1052 struct drm_i915_private *dev_priv = dev->dev_private;
1053 memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
1054
1055 switch(INTEL_INFO(dev)->gen) {
1056 case 2:
1057 case 3:
1058 instdone[0] = I915_READ(INSTDONE);
1059 break;
1060 case 4:
1061 case 5:
1062 case 6:
1063 instdone[0] = I915_READ(INSTDONE_I965);
1064 instdone[1] = I915_READ(INSTDONE1);
1065 break;
1066 default:
1067 WARN_ONCE(1, "Unsupported platform\n");
1068 case 7:
1069 instdone[0] = I915_READ(GEN7_INSTDONE_1);
1070 instdone[1] = I915_READ(GEN7_SC_INSTDONE);
1071 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
1072 instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
1073 break;
1074 }
1075}
1076
3bd3c932 1077#ifdef CONFIG_DEBUG_FS
9df30794 1078static struct drm_i915_error_object *
d0d045e8
BW
1079i915_error_object_create_sized(struct drm_i915_private *dev_priv,
1080 struct drm_i915_gem_object *src,
1081 const int num_pages)
9df30794
CW
1082{
1083 struct drm_i915_error_object *dst;
d0d045e8 1084 int i;
e56660dd 1085 u32 reloc_offset;
9df30794 1086
05394f39 1087 if (src == NULL || src->pages == NULL)
9df30794
CW
1088 return NULL;
1089
d0d045e8 1090 dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
9df30794
CW
1091 if (dst == NULL)
1092 return NULL;
1093
05394f39 1094 reloc_offset = src->gtt_offset;
d0d045e8 1095 for (i = 0; i < num_pages; i++) {
788885ae 1096 unsigned long flags;
e56660dd 1097 void *d;
788885ae 1098
e56660dd 1099 d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
9df30794
CW
1100 if (d == NULL)
1101 goto unwind;
e56660dd 1102
788885ae 1103 local_irq_save(flags);
5d4545ae 1104 if (reloc_offset < dev_priv->gtt.mappable_end &&
74898d7e 1105 src->has_global_gtt_mapping) {
172975aa
CW
1106 void __iomem *s;
1107
1108 /* Simply ignore tiling or any overlapping fence.
1109 * It's part of the error state, and this hopefully
1110 * captures what the GPU read.
1111 */
1112
5d4545ae 1113 s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
172975aa
CW
1114 reloc_offset);
1115 memcpy_fromio(d, s, PAGE_SIZE);
1116 io_mapping_unmap_atomic(s);
960e3564
CW
1117 } else if (src->stolen) {
1118 unsigned long offset;
1119
1120 offset = dev_priv->mm.stolen_base;
1121 offset += src->stolen->start;
1122 offset += i << PAGE_SHIFT;
1123
1a240d4d 1124 memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
172975aa 1125 } else {
9da3da66 1126 struct page *page;
172975aa
CW
1127 void *s;
1128
9da3da66 1129 page = i915_gem_object_get_page(src, i);
172975aa 1130
9da3da66
CW
1131 drm_clflush_pages(&page, 1);
1132
1133 s = kmap_atomic(page);
172975aa
CW
1134 memcpy(d, s, PAGE_SIZE);
1135 kunmap_atomic(s);
1136
9da3da66 1137 drm_clflush_pages(&page, 1);
172975aa 1138 }
788885ae 1139 local_irq_restore(flags);
e56660dd 1140
9da3da66 1141 dst->pages[i] = d;
e56660dd
CW
1142
1143 reloc_offset += PAGE_SIZE;
9df30794 1144 }
d0d045e8 1145 dst->page_count = num_pages;
05394f39 1146 dst->gtt_offset = src->gtt_offset;
9df30794
CW
1147
1148 return dst;
1149
1150unwind:
9da3da66
CW
1151 while (i--)
1152 kfree(dst->pages[i]);
9df30794
CW
1153 kfree(dst);
1154 return NULL;
1155}
d0d045e8
BW
1156#define i915_error_object_create(dev_priv, src) \
1157 i915_error_object_create_sized((dev_priv), (src), \
1158 (src)->base.size>>PAGE_SHIFT)
9df30794
CW
1159
1160static void
1161i915_error_object_free(struct drm_i915_error_object *obj)
1162{
1163 int page;
1164
1165 if (obj == NULL)
1166 return;
1167
1168 for (page = 0; page < obj->page_count; page++)
1169 kfree(obj->pages[page]);
1170
1171 kfree(obj);
1172}
1173
742cbee8
DV
1174void
1175i915_error_state_free(struct kref *error_ref)
9df30794 1176{
742cbee8
DV
1177 struct drm_i915_error_state *error = container_of(error_ref,
1178 typeof(*error), ref);
e2f973d5
CW
1179 int i;
1180
52d39a21
CW
1181 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
1182 i915_error_object_free(error->ring[i].batchbuffer);
1183 i915_error_object_free(error->ring[i].ringbuffer);
1184 kfree(error->ring[i].requests);
1185 }
e2f973d5 1186
9df30794 1187 kfree(error->active_bo);
6ef3d427 1188 kfree(error->overlay);
9df30794
CW
1189 kfree(error);
1190}
1b50247a
CW
1191static void capture_bo(struct drm_i915_error_buffer *err,
1192 struct drm_i915_gem_object *obj)
1193{
1194 err->size = obj->base.size;
1195 err->name = obj->base.name;
0201f1ec
CW
1196 err->rseqno = obj->last_read_seqno;
1197 err->wseqno = obj->last_write_seqno;
1b50247a
CW
1198 err->gtt_offset = obj->gtt_offset;
1199 err->read_domains = obj->base.read_domains;
1200 err->write_domain = obj->base.write_domain;
1201 err->fence_reg = obj->fence_reg;
1202 err->pinned = 0;
1203 if (obj->pin_count > 0)
1204 err->pinned = 1;
1205 if (obj->user_pin_count > 0)
1206 err->pinned = -1;
1207 err->tiling = obj->tiling_mode;
1208 err->dirty = obj->dirty;
1209 err->purgeable = obj->madv != I915_MADV_WILLNEED;
1210 err->ring = obj->ring ? obj->ring->id : -1;
1211 err->cache_level = obj->cache_level;
1212}
9df30794 1213
1b50247a
CW
1214static u32 capture_active_bo(struct drm_i915_error_buffer *err,
1215 int count, struct list_head *head)
c724e8a9
CW
1216{
1217 struct drm_i915_gem_object *obj;
1218 int i = 0;
1219
1220 list_for_each_entry(obj, head, mm_list) {
1b50247a 1221 capture_bo(err++, obj);
c724e8a9
CW
1222 if (++i == count)
1223 break;
1b50247a
CW
1224 }
1225
1226 return i;
1227}
1228
1229static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
1230 int count, struct list_head *head)
1231{
1232 struct drm_i915_gem_object *obj;
1233 int i = 0;
1234
1235 list_for_each_entry(obj, head, gtt_list) {
1236 if (obj->pin_count == 0)
1237 continue;
c724e8a9 1238
1b50247a
CW
1239 capture_bo(err++, obj);
1240 if (++i == count)
1241 break;
c724e8a9
CW
1242 }
1243
1244 return i;
1245}
1246
748ebc60
CW
1247static void i915_gem_record_fences(struct drm_device *dev,
1248 struct drm_i915_error_state *error)
1249{
1250 struct drm_i915_private *dev_priv = dev->dev_private;
1251 int i;
1252
1253 /* Fences */
1254 switch (INTEL_INFO(dev)->gen) {
775d17b6 1255 case 7:
748ebc60 1256 case 6:
42b5aeab 1257 for (i = 0; i < dev_priv->num_fence_regs; i++)
748ebc60
CW
1258 error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
1259 break;
1260 case 5:
1261 case 4:
1262 for (i = 0; i < 16; i++)
1263 error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
1264 break;
1265 case 3:
1266 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
1267 for (i = 0; i < 8; i++)
1268 error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
1269 case 2:
1270 for (i = 0; i < 8; i++)
1271 error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
1272 break;
1273
7dbf9d6e
BW
1274 default:
1275 BUG();
748ebc60
CW
1276 }
1277}
1278
bcfb2e28
CW
1279static struct drm_i915_error_object *
1280i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
1281 struct intel_ring_buffer *ring)
1282{
1283 struct drm_i915_gem_object *obj;
1284 u32 seqno;
1285
1286 if (!ring->get_seqno)
1287 return NULL;
1288
b45305fc
DV
1289 if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
1290 u32 acthd = I915_READ(ACTHD);
1291
1292 if (WARN_ON(ring->id != RCS))
1293 return NULL;
1294
1295 obj = ring->private;
1296 if (acthd >= obj->gtt_offset &&
1297 acthd < obj->gtt_offset + obj->base.size)
1298 return i915_error_object_create(dev_priv, obj);
1299 }
1300
b2eadbc8 1301 seqno = ring->get_seqno(ring, false);
bcfb2e28
CW
1302 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
1303 if (obj->ring != ring)
1304 continue;
1305
0201f1ec 1306 if (i915_seqno_passed(seqno, obj->last_read_seqno))
bcfb2e28
CW
1307 continue;
1308
1309 if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
1310 continue;
1311
1312 /* We need to copy these to an anonymous buffer as the simplest
1313 * method to avoid being overwritten by userspace.
1314 */
1315 return i915_error_object_create(dev_priv, obj);
1316 }
1317
1318 return NULL;
1319}
1320
d27b1e0e
DV
1321static void i915_record_ring_state(struct drm_device *dev,
1322 struct drm_i915_error_state *error,
1323 struct intel_ring_buffer *ring)
1324{
1325 struct drm_i915_private *dev_priv = dev->dev_private;
1326
33f3f518 1327 if (INTEL_INFO(dev)->gen >= 6) {
12f55818 1328 error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
33f3f518 1329 error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
7e3b8737
DV
1330 error->semaphore_mboxes[ring->id][0]
1331 = I915_READ(RING_SYNC_0(ring->mmio_base));
1332 error->semaphore_mboxes[ring->id][1]
1333 = I915_READ(RING_SYNC_1(ring->mmio_base));
df2b23d9
CW
1334 error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
1335 error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
33f3f518 1336 }
c1cd90ed 1337
d27b1e0e 1338 if (INTEL_INFO(dev)->gen >= 4) {
9d2f41fa 1339 error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
d27b1e0e
DV
1340 error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
1341 error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
1342 error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
c1cd90ed 1343 error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
050ee91f 1344 if (ring->id == RCS)
d27b1e0e 1345 error->bbaddr = I915_READ64(BB_ADDR);
d27b1e0e 1346 } else {
9d2f41fa 1347 error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
d27b1e0e
DV
1348 error->ipeir[ring->id] = I915_READ(IPEIR);
1349 error->ipehr[ring->id] = I915_READ(IPEHR);
1350 error->instdone[ring->id] = I915_READ(INSTDONE);
d27b1e0e
DV
1351 }
1352
9574b3fe 1353 error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
c1cd90ed 1354 error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
b2eadbc8 1355 error->seqno[ring->id] = ring->get_seqno(ring, false);
d27b1e0e 1356 error->acthd[ring->id] = intel_ring_get_active_head(ring);
c1cd90ed
DV
1357 error->head[ring->id] = I915_READ_HEAD(ring);
1358 error->tail[ring->id] = I915_READ_TAIL(ring);
0f3b6849 1359 error->ctl[ring->id] = I915_READ_CTL(ring);
7e3b8737
DV
1360
1361 error->cpu_ring_head[ring->id] = ring->head;
1362 error->cpu_ring_tail[ring->id] = ring->tail;
d27b1e0e
DV
1363}
1364
8c123e54
BW
1365
1366static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
1367 struct drm_i915_error_state *error,
1368 struct drm_i915_error_ring *ering)
1369{
1370 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1371 struct drm_i915_gem_object *obj;
1372
1373 /* Currently render ring is the only HW context user */
1374 if (ring->id != RCS || !error->ccid)
1375 return;
1376
1377 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
1378 if ((error->ccid & PAGE_MASK) == obj->gtt_offset) {
1379 ering->ctx = i915_error_object_create_sized(dev_priv,
1380 obj, 1);
1381 }
1382 }
1383}
1384
52d39a21
CW
1385static void i915_gem_record_rings(struct drm_device *dev,
1386 struct drm_i915_error_state *error)
1387{
1388 struct drm_i915_private *dev_priv = dev->dev_private;
b4519513 1389 struct intel_ring_buffer *ring;
52d39a21
CW
1390 struct drm_i915_gem_request *request;
1391 int i, count;
1392
b4519513 1393 for_each_ring(ring, dev_priv, i) {
52d39a21
CW
1394 i915_record_ring_state(dev, error, ring);
1395
1396 error->ring[i].batchbuffer =
1397 i915_error_first_batchbuffer(dev_priv, ring);
1398
1399 error->ring[i].ringbuffer =
1400 i915_error_object_create(dev_priv, ring->obj);
1401
8c123e54
BW
1402
1403 i915_gem_record_active_context(ring, error, &error->ring[i]);
1404
52d39a21
CW
1405 count = 0;
1406 list_for_each_entry(request, &ring->request_list, list)
1407 count++;
1408
1409 error->ring[i].num_requests = count;
1410 error->ring[i].requests =
1411 kmalloc(count*sizeof(struct drm_i915_error_request),
1412 GFP_ATOMIC);
1413 if (error->ring[i].requests == NULL) {
1414 error->ring[i].num_requests = 0;
1415 continue;
1416 }
1417
1418 count = 0;
1419 list_for_each_entry(request, &ring->request_list, list) {
1420 struct drm_i915_error_request *erq;
1421
1422 erq = &error->ring[i].requests[count++];
1423 erq->seqno = request->seqno;
1424 erq->jiffies = request->emitted_jiffies;
ee4f42b1 1425 erq->tail = request->tail;
52d39a21
CW
1426 }
1427 }
1428}
1429
8a905236
JB
1430/**
1431 * i915_capture_error_state - capture an error record for later analysis
1432 * @dev: drm device
1433 *
1434 * Should be called when an error is detected (either a hang or an error
1435 * interrupt) to capture error state from the time of the error. Fills
1436 * out a structure which becomes available in debugfs for user level tools
1437 * to pick up.
1438 */
63eeaf38
JB
1439static void i915_capture_error_state(struct drm_device *dev)
1440{
1441 struct drm_i915_private *dev_priv = dev->dev_private;
05394f39 1442 struct drm_i915_gem_object *obj;
63eeaf38
JB
1443 struct drm_i915_error_state *error;
1444 unsigned long flags;
9db4a9c7 1445 int i, pipe;
63eeaf38 1446
99584db3
DV
1447 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1448 error = dev_priv->gpu_error.first_error;
1449 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
9df30794
CW
1450 if (error)
1451 return;
63eeaf38 1452
9db4a9c7 1453 /* Account for pipe specific data like PIPE*STAT */
33f3f518 1454 error = kzalloc(sizeof(*error), GFP_ATOMIC);
63eeaf38 1455 if (!error) {
9df30794
CW
1456 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1457 return;
63eeaf38
JB
1458 }
1459
5d83d294 1460 DRM_INFO("capturing error event; look for more information in "
2f86f191 1461 "/sys/kernel/debug/dri/%d/i915_error_state\n",
b6f7833b 1462 dev->primary->index);
2fa772f3 1463
742cbee8 1464 kref_init(&error->ref);
63eeaf38
JB
1465 error->eir = I915_READ(EIR);
1466 error->pgtbl_er = I915_READ(PGTBL_ER);
211816ec
BW
1467 if (HAS_HW_CONTEXTS(dev))
1468 error->ccid = I915_READ(CCID);
be998e2e
BW
1469
1470 if (HAS_PCH_SPLIT(dev))
1471 error->ier = I915_READ(DEIER) | I915_READ(GTIER);
1472 else if (IS_VALLEYVIEW(dev))
1473 error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
1474 else if (IS_GEN2(dev))
1475 error->ier = I915_READ16(IER);
1476 else
1477 error->ier = I915_READ(IER);
1478
0f3b6849
CW
1479 if (INTEL_INFO(dev)->gen >= 6)
1480 error->derrmr = I915_READ(DERRMR);
1481
1482 if (IS_VALLEYVIEW(dev))
1483 error->forcewake = I915_READ(FORCEWAKE_VLV);
1484 else if (INTEL_INFO(dev)->gen >= 7)
1485 error->forcewake = I915_READ(FORCEWAKE_MT);
1486 else if (INTEL_INFO(dev)->gen == 6)
1487 error->forcewake = I915_READ(FORCEWAKE);
1488
4f3308b9
PZ
1489 if (!HAS_PCH_SPLIT(dev))
1490 for_each_pipe(pipe)
1491 error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
d27b1e0e 1492
33f3f518 1493 if (INTEL_INFO(dev)->gen >= 6) {
f406839f 1494 error->error = I915_READ(ERROR_GEN6);
33f3f518
DV
1495 error->done_reg = I915_READ(DONE_REG);
1496 }
d27b1e0e 1497
71e172e8
BW
1498 if (INTEL_INFO(dev)->gen == 7)
1499 error->err_int = I915_READ(GEN7_ERR_INT);
1500
050ee91f
BW
1501 i915_get_extra_instdone(dev, error->extra_instdone);
1502
748ebc60 1503 i915_gem_record_fences(dev, error);
52d39a21 1504 i915_gem_record_rings(dev, error);
9df30794 1505
c724e8a9 1506 /* Record buffers on the active and pinned lists. */
9df30794 1507 error->active_bo = NULL;
c724e8a9 1508 error->pinned_bo = NULL;
9df30794 1509
bcfb2e28
CW
1510 i = 0;
1511 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
1512 i++;
1513 error->active_bo_count = i;
6c085a72 1514 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
1b50247a
CW
1515 if (obj->pin_count)
1516 i++;
bcfb2e28 1517 error->pinned_bo_count = i - error->active_bo_count;
c724e8a9 1518
8e934dbf
CW
1519 error->active_bo = NULL;
1520 error->pinned_bo = NULL;
bcfb2e28
CW
1521 if (i) {
1522 error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
9df30794 1523 GFP_ATOMIC);
c724e8a9
CW
1524 if (error->active_bo)
1525 error->pinned_bo =
1526 error->active_bo + error->active_bo_count;
9df30794
CW
1527 }
1528
c724e8a9
CW
1529 if (error->active_bo)
1530 error->active_bo_count =
1b50247a
CW
1531 capture_active_bo(error->active_bo,
1532 error->active_bo_count,
1533 &dev_priv->mm.active_list);
c724e8a9
CW
1534
1535 if (error->pinned_bo)
1536 error->pinned_bo_count =
1b50247a
CW
1537 capture_pinned_bo(error->pinned_bo,
1538 error->pinned_bo_count,
6c085a72 1539 &dev_priv->mm.bound_list);
c724e8a9 1540
9df30794
CW
1541 do_gettimeofday(&error->time);
1542
6ef3d427 1543 error->overlay = intel_overlay_capture_error_state(dev);
c4a1d9e4 1544 error->display = intel_display_capture_error_state(dev);
6ef3d427 1545
99584db3
DV
1546 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1547 if (dev_priv->gpu_error.first_error == NULL) {
1548 dev_priv->gpu_error.first_error = error;
9df30794
CW
1549 error = NULL;
1550 }
99584db3 1551 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
9df30794
CW
1552
1553 if (error)
742cbee8 1554 i915_error_state_free(&error->ref);
9df30794
CW
1555}
1556
1557void i915_destroy_error_state(struct drm_device *dev)
1558{
1559 struct drm_i915_private *dev_priv = dev->dev_private;
1560 struct drm_i915_error_state *error;
6dc0e816 1561 unsigned long flags;
9df30794 1562
99584db3
DV
1563 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1564 error = dev_priv->gpu_error.first_error;
1565 dev_priv->gpu_error.first_error = NULL;
1566 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
9df30794
CW
1567
1568 if (error)
742cbee8 1569 kref_put(&error->ref, i915_error_state_free);
63eeaf38 1570}
3bd3c932
CW
1571#else
1572#define i915_capture_error_state(x)
1573#endif
63eeaf38 1574
35aed2e6 1575static void i915_report_and_clear_eir(struct drm_device *dev)
8a905236
JB
1576{
1577 struct drm_i915_private *dev_priv = dev->dev_private;
bd9854f9 1578 uint32_t instdone[I915_NUM_INSTDONE_REG];
8a905236 1579 u32 eir = I915_READ(EIR);
050ee91f 1580 int pipe, i;
8a905236 1581
35aed2e6
CW
1582 if (!eir)
1583 return;
8a905236 1584
a70491cc 1585 pr_err("render error detected, EIR: 0x%08x\n", eir);
8a905236 1586
bd9854f9
BW
1587 i915_get_extra_instdone(dev, instdone);
1588
8a905236
JB
1589 if (IS_G4X(dev)) {
1590 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
1591 u32 ipeir = I915_READ(IPEIR_I965);
1592
a70491cc
JP
1593 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1594 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
050ee91f
BW
1595 for (i = 0; i < ARRAY_SIZE(instdone); i++)
1596 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
a70491cc 1597 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
a70491cc 1598 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
8a905236 1599 I915_WRITE(IPEIR_I965, ipeir);
3143a2bf 1600 POSTING_READ(IPEIR_I965);
8a905236
JB
1601 }
1602 if (eir & GM45_ERROR_PAGE_TABLE) {
1603 u32 pgtbl_err = I915_READ(PGTBL_ER);
a70491cc
JP
1604 pr_err("page table error\n");
1605 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
8a905236 1606 I915_WRITE(PGTBL_ER, pgtbl_err);
3143a2bf 1607 POSTING_READ(PGTBL_ER);
8a905236
JB
1608 }
1609 }
1610
a6c45cf0 1611 if (!IS_GEN2(dev)) {
8a905236
JB
1612 if (eir & I915_ERROR_PAGE_TABLE) {
1613 u32 pgtbl_err = I915_READ(PGTBL_ER);
a70491cc
JP
1614 pr_err("page table error\n");
1615 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
8a905236 1616 I915_WRITE(PGTBL_ER, pgtbl_err);
3143a2bf 1617 POSTING_READ(PGTBL_ER);
8a905236
JB
1618 }
1619 }
1620
1621 if (eir & I915_ERROR_MEMORY_REFRESH) {
a70491cc 1622 pr_err("memory refresh error:\n");
9db4a9c7 1623 for_each_pipe(pipe)
a70491cc 1624 pr_err("pipe %c stat: 0x%08x\n",
9db4a9c7 1625 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
8a905236
JB
1626 /* pipestat has already been acked */
1627 }
1628 if (eir & I915_ERROR_INSTRUCTION) {
a70491cc
JP
1629 pr_err("instruction error\n");
1630 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
050ee91f
BW
1631 for (i = 0; i < ARRAY_SIZE(instdone); i++)
1632 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
a6c45cf0 1633 if (INTEL_INFO(dev)->gen < 4) {
8a905236
JB
1634 u32 ipeir = I915_READ(IPEIR);
1635
a70491cc
JP
1636 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
1637 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
a70491cc 1638 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
8a905236 1639 I915_WRITE(IPEIR, ipeir);
3143a2bf 1640 POSTING_READ(IPEIR);
8a905236
JB
1641 } else {
1642 u32 ipeir = I915_READ(IPEIR_I965);
1643
a70491cc
JP
1644 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1645 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
a70491cc 1646 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
a70491cc 1647 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
8a905236 1648 I915_WRITE(IPEIR_I965, ipeir);
3143a2bf 1649 POSTING_READ(IPEIR_I965);
8a905236
JB
1650 }
1651 }
1652
1653 I915_WRITE(EIR, eir);
3143a2bf 1654 POSTING_READ(EIR);
8a905236
JB
1655 eir = I915_READ(EIR);
1656 if (eir) {
1657 /*
1658 * some errors might have become stuck,
1659 * mask them.
1660 */
1661 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
1662 I915_WRITE(EMR, I915_READ(EMR) | eir);
1663 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
1664 }
35aed2e6
CW
1665}
1666
1667/**
1668 * i915_handle_error - handle an error interrupt
1669 * @dev: drm device
1670 *
1671 * Do some basic checking of regsiter state at error interrupt time and
1672 * dump it to the syslog. Also call i915_capture_error_state() to make
1673 * sure we get a record and make it available in debugfs. Fire a uevent
1674 * so userspace knows something bad happened (should trigger collection
1675 * of a ring dump etc.).
1676 */
527f9e90 1677void i915_handle_error(struct drm_device *dev, bool wedged)
35aed2e6
CW
1678{
1679 struct drm_i915_private *dev_priv = dev->dev_private;
b4519513
CW
1680 struct intel_ring_buffer *ring;
1681 int i;
35aed2e6
CW
1682
1683 i915_capture_error_state(dev);
1684 i915_report_and_clear_eir(dev);
8a905236 1685
ba1234d1 1686 if (wedged) {
f69061be
DV
1687 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
1688 &dev_priv->gpu_error.reset_counter);
ba1234d1 1689
11ed50ec 1690 /*
1f83fee0
DV
1691 * Wakeup waiting processes so that the reset work item
1692 * doesn't deadlock trying to grab various locks.
11ed50ec 1693 */
b4519513
CW
1694 for_each_ring(ring, dev_priv, i)
1695 wake_up_all(&ring->irq_queue);
11ed50ec
BG
1696 }
1697
99584db3 1698 queue_work(dev_priv->wq, &dev_priv->gpu_error.work);
8a905236
JB
1699}
1700
21ad8330 1701static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
4e5359cd
SF
1702{
1703 drm_i915_private_t *dev_priv = dev->dev_private;
1704 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1705 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
05394f39 1706 struct drm_i915_gem_object *obj;
4e5359cd
SF
1707 struct intel_unpin_work *work;
1708 unsigned long flags;
1709 bool stall_detected;
1710
1711 /* Ignore early vblank irqs */
1712 if (intel_crtc == NULL)
1713 return;
1714
1715 spin_lock_irqsave(&dev->event_lock, flags);
1716 work = intel_crtc->unpin_work;
1717
e7d841ca
CW
1718 if (work == NULL ||
1719 atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
1720 !work->enable_stall_check) {
4e5359cd
SF
1721 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
1722 spin_unlock_irqrestore(&dev->event_lock, flags);
1723 return;
1724 }
1725
1726 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
05394f39 1727 obj = work->pending_flip_obj;
a6c45cf0 1728 if (INTEL_INFO(dev)->gen >= 4) {
9db4a9c7 1729 int dspsurf = DSPSURF(intel_crtc->plane);
446f2545
AR
1730 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
1731 obj->gtt_offset;
4e5359cd 1732 } else {
9db4a9c7 1733 int dspaddr = DSPADDR(intel_crtc->plane);
05394f39 1734 stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
01f2c773 1735 crtc->y * crtc->fb->pitches[0] +
4e5359cd
SF
1736 crtc->x * crtc->fb->bits_per_pixel/8);
1737 }
1738
1739 spin_unlock_irqrestore(&dev->event_lock, flags);
1740
1741 if (stall_detected) {
1742 DRM_DEBUG_DRIVER("Pageflip stall detected\n");
1743 intel_prepare_page_flip(dev, intel_crtc->plane);
1744 }
1745}
1746
42f52ef8
KP
1747/* Called from drm generic code, passed 'crtc' which
1748 * we use as a pipe index
1749 */
f71d4af4 1750static int i915_enable_vblank(struct drm_device *dev, int pipe)
0a3e67a4
JB
1751{
1752 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
e9d21d7f 1753 unsigned long irqflags;
71e0ffa5 1754
5eddb70b 1755 if (!i915_pipe_enabled(dev, pipe))
71e0ffa5 1756 return -EINVAL;
0a3e67a4 1757
1ec14ad3 1758 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
f796cf8f 1759 if (INTEL_INFO(dev)->gen >= 4)
7c463586
KP
1760 i915_enable_pipestat(dev_priv, pipe,
1761 PIPE_START_VBLANK_INTERRUPT_ENABLE);
e9d21d7f 1762 else
7c463586
KP
1763 i915_enable_pipestat(dev_priv, pipe,
1764 PIPE_VBLANK_INTERRUPT_ENABLE);
8692d00e
CW
1765
1766 /* maintain vblank delivery even in deep C-states */
1767 if (dev_priv->info->gen == 3)
6b26c86d 1768 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
1ec14ad3 1769 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
8692d00e 1770
0a3e67a4
JB
1771 return 0;
1772}
1773
f71d4af4 1774static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
f796cf8f
JB
1775{
1776 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1777 unsigned long irqflags;
1778
1779 if (!i915_pipe_enabled(dev, pipe))
1780 return -EINVAL;
1781
1782 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1783 ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
0206e353 1784 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
f796cf8f
JB
1785 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1786
1787 return 0;
1788}
1789
f71d4af4 1790static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
b1f14ad0
JB
1791{
1792 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1793 unsigned long irqflags;
1794
1795 if (!i915_pipe_enabled(dev, pipe))
1796 return -EINVAL;
1797
1798 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
b615b57a
CW
1799 ironlake_enable_display_irq(dev_priv,
1800 DE_PIPEA_VBLANK_IVB << (5 * pipe));
b1f14ad0
JB
1801 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1802
1803 return 0;
1804}
1805
7e231dbe
JB
1806static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
1807{
1808 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1809 unsigned long irqflags;
31acc7f5 1810 u32 imr;
7e231dbe
JB
1811
1812 if (!i915_pipe_enabled(dev, pipe))
1813 return -EINVAL;
1814
1815 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
7e231dbe 1816 imr = I915_READ(VLV_IMR);
31acc7f5 1817 if (pipe == 0)
7e231dbe 1818 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
31acc7f5 1819 else
7e231dbe 1820 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
7e231dbe 1821 I915_WRITE(VLV_IMR, imr);
31acc7f5
JB
1822 i915_enable_pipestat(dev_priv, pipe,
1823 PIPE_START_VBLANK_INTERRUPT_ENABLE);
7e231dbe
JB
1824 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1825
1826 return 0;
1827}
1828
42f52ef8
KP
1829/* Called from drm generic code, passed 'crtc' which
1830 * we use as a pipe index
1831 */
f71d4af4 1832static void i915_disable_vblank(struct drm_device *dev, int pipe)
0a3e67a4
JB
1833{
1834 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
e9d21d7f 1835 unsigned long irqflags;
0a3e67a4 1836
1ec14ad3 1837 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
8692d00e 1838 if (dev_priv->info->gen == 3)
6b26c86d 1839 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
8692d00e 1840
f796cf8f
JB
1841 i915_disable_pipestat(dev_priv, pipe,
1842 PIPE_VBLANK_INTERRUPT_ENABLE |
1843 PIPE_START_VBLANK_INTERRUPT_ENABLE);
1844 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1845}
1846
f71d4af4 1847static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
f796cf8f
JB
1848{
1849 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1850 unsigned long irqflags;
1851
1852 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1853 ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
0206e353 1854 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
1ec14ad3 1855 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
0a3e67a4
JB
1856}
1857
f71d4af4 1858static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
b1f14ad0
JB
1859{
1860 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1861 unsigned long irqflags;
1862
1863 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
b615b57a
CW
1864 ironlake_disable_display_irq(dev_priv,
1865 DE_PIPEA_VBLANK_IVB << (pipe * 5));
b1f14ad0
JB
1866 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1867}
1868
7e231dbe
JB
1869static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
1870{
1871 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1872 unsigned long irqflags;
31acc7f5 1873 u32 imr;
7e231dbe
JB
1874
1875 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
31acc7f5
JB
1876 i915_disable_pipestat(dev_priv, pipe,
1877 PIPE_START_VBLANK_INTERRUPT_ENABLE);
7e231dbe 1878 imr = I915_READ(VLV_IMR);
31acc7f5 1879 if (pipe == 0)
7e231dbe 1880 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
31acc7f5 1881 else
7e231dbe 1882 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
7e231dbe 1883 I915_WRITE(VLV_IMR, imr);
7e231dbe
JB
1884 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1885}
1886
893eead0
CW
1887static u32
1888ring_last_seqno(struct intel_ring_buffer *ring)
852835f3 1889{
893eead0
CW
1890 return list_entry(ring->request_list.prev,
1891 struct drm_i915_gem_request, list)->seqno;
1892}
1893
1894static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
1895{
1896 if (list_empty(&ring->request_list) ||
b2eadbc8
CW
1897 i915_seqno_passed(ring->get_seqno(ring, false),
1898 ring_last_seqno(ring))) {
893eead0 1899 /* Issue a wake-up to catch stuck h/w. */
9574b3fe
BW
1900 if (waitqueue_active(&ring->irq_queue)) {
1901 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
1902 ring->name);
893eead0
CW
1903 wake_up_all(&ring->irq_queue);
1904 *err = true;
1905 }
1906 return true;
1907 }
1908 return false;
f65d9421
BG
1909}
1910
a24a11e6
CW
1911static bool semaphore_passed(struct intel_ring_buffer *ring)
1912{
1913 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1914 u32 acthd = intel_ring_get_active_head(ring) & HEAD_ADDR;
1915 struct intel_ring_buffer *signaller;
1916 u32 cmd, ipehr, acthd_min;
1917
1918 ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
1919 if ((ipehr & ~(0x3 << 16)) !=
1920 (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER))
1921 return false;
1922
1923 /* ACTHD is likely pointing to the dword after the actual command,
1924 * so scan backwards until we find the MBOX.
1925 */
1926 acthd_min = max((int)acthd - 3 * 4, 0);
1927 do {
1928 cmd = ioread32(ring->virtual_start + acthd);
1929 if (cmd == ipehr)
1930 break;
1931
1932 acthd -= 4;
1933 if (acthd < acthd_min)
1934 return false;
1935 } while (1);
1936
1937 signaller = &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3];
1938 return i915_seqno_passed(signaller->get_seqno(signaller, false),
1939 ioread32(ring->virtual_start+acthd+4)+1);
1940}
1941
1ec14ad3
CW
1942static bool kick_ring(struct intel_ring_buffer *ring)
1943{
1944 struct drm_device *dev = ring->dev;
1945 struct drm_i915_private *dev_priv = dev->dev_private;
1946 u32 tmp = I915_READ_CTL(ring);
1947 if (tmp & RING_WAIT) {
1948 DRM_ERROR("Kicking stuck wait on %s\n",
1949 ring->name);
1950 I915_WRITE_CTL(ring, tmp);
1951 return true;
1952 }
a24a11e6
CW
1953
1954 if (INTEL_INFO(dev)->gen >= 6 &&
1955 tmp & RING_WAIT_SEMAPHORE &&
1956 semaphore_passed(ring)) {
1957 DRM_ERROR("Kicking stuck semaphore on %s\n",
1958 ring->name);
1959 I915_WRITE_CTL(ring, tmp);
1960 return true;
1961 }
1ec14ad3
CW
1962 return false;
1963}
1964
d1e61e7f
CW
1965static bool i915_hangcheck_hung(struct drm_device *dev)
1966{
1967 drm_i915_private_t *dev_priv = dev->dev_private;
1968
99584db3 1969 if (dev_priv->gpu_error.hangcheck_count++ > 1) {
b4519513
CW
1970 bool hung = true;
1971
d1e61e7f
CW
1972 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
1973 i915_handle_error(dev, true);
1974
1975 if (!IS_GEN2(dev)) {
b4519513
CW
1976 struct intel_ring_buffer *ring;
1977 int i;
1978
d1e61e7f
CW
1979 /* Is the chip hanging on a WAIT_FOR_EVENT?
1980 * If so we can simply poke the RB_WAIT bit
1981 * and break the hang. This should work on
1982 * all but the second generation chipsets.
1983 */
b4519513
CW
1984 for_each_ring(ring, dev_priv, i)
1985 hung &= !kick_ring(ring);
d1e61e7f
CW
1986 }
1987
b4519513 1988 return hung;
d1e61e7f
CW
1989 }
1990
1991 return false;
1992}
1993
f65d9421
BG
1994/**
1995 * This is called when the chip hasn't reported back with completed
1996 * batchbuffers in a long time. The first time this is called we simply record
1997 * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
1998 * again, we assume the chip is wedged and try to fix it.
1999 */
2000void i915_hangcheck_elapsed(unsigned long data)
2001{
2002 struct drm_device *dev = (struct drm_device *)data;
2003 drm_i915_private_t *dev_priv = dev->dev_private;
bd9854f9 2004 uint32_t acthd[I915_NUM_RINGS], instdone[I915_NUM_INSTDONE_REG];
b4519513
CW
2005 struct intel_ring_buffer *ring;
2006 bool err = false, idle;
2007 int i;
893eead0 2008
3e0dc6b0
BW
2009 if (!i915_enable_hangcheck)
2010 return;
2011
b4519513
CW
2012 memset(acthd, 0, sizeof(acthd));
2013 idle = true;
2014 for_each_ring(ring, dev_priv, i) {
2015 idle &= i915_hangcheck_ring_idle(ring, &err);
2016 acthd[i] = intel_ring_get_active_head(ring);
2017 }
2018
893eead0 2019 /* If all work is done then ACTHD clearly hasn't advanced. */
b4519513 2020 if (idle) {
d1e61e7f
CW
2021 if (err) {
2022 if (i915_hangcheck_hung(dev))
2023 return;
2024
893eead0 2025 goto repeat;
d1e61e7f
CW
2026 }
2027
99584db3 2028 dev_priv->gpu_error.hangcheck_count = 0;
893eead0
CW
2029 return;
2030 }
b9201c14 2031
bd9854f9 2032 i915_get_extra_instdone(dev, instdone);
99584db3
DV
2033 if (memcmp(dev_priv->gpu_error.last_acthd, acthd,
2034 sizeof(acthd)) == 0 &&
2035 memcmp(dev_priv->gpu_error.prev_instdone, instdone,
2036 sizeof(instdone)) == 0) {
d1e61e7f 2037 if (i915_hangcheck_hung(dev))
cbb465e7 2038 return;
cbb465e7 2039 } else {
99584db3 2040 dev_priv->gpu_error.hangcheck_count = 0;
cbb465e7 2041
99584db3
DV
2042 memcpy(dev_priv->gpu_error.last_acthd, acthd,
2043 sizeof(acthd));
2044 memcpy(dev_priv->gpu_error.prev_instdone, instdone,
2045 sizeof(instdone));
cbb465e7 2046 }
f65d9421 2047
893eead0 2048repeat:
f65d9421 2049 /* Reset timer case chip hangs without another request being added */
99584db3 2050 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
cecc21fe 2051 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
f65d9421
BG
2052}
2053
1da177e4
LT
2054/* drm_dma.h hooks
2055*/
f71d4af4 2056static void ironlake_irq_preinstall(struct drm_device *dev)
036a4a7d
ZW
2057{
2058 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2059
4697995b
JB
2060 atomic_set(&dev_priv->irq_received, 0);
2061
036a4a7d 2062 I915_WRITE(HWSTAM, 0xeffe);
bdfcdb63 2063
036a4a7d
ZW
2064 /* XXX hotplug from PCH */
2065
2066 I915_WRITE(DEIMR, 0xffffffff);
2067 I915_WRITE(DEIER, 0x0);
3143a2bf 2068 POSTING_READ(DEIER);
036a4a7d
ZW
2069
2070 /* and GT */
2071 I915_WRITE(GTIMR, 0xffffffff);
2072 I915_WRITE(GTIER, 0x0);
3143a2bf 2073 POSTING_READ(GTIER);
c650156a 2074
ab5c608b
BW
2075 if (HAS_PCH_NOP(dev))
2076 return;
2077
c650156a
ZW
2078 /* south display irq */
2079 I915_WRITE(SDEIMR, 0xffffffff);
82a28bcf
DV
2080 /*
2081 * SDEIER is also touched by the interrupt handler to work around missed
2082 * PCH interrupts. Hence we can't update it after the interrupt handler
2083 * is enabled - instead we unconditionally enable all PCH interrupt
2084 * sources here, but then only unmask them as needed with SDEIMR.
2085 */
2086 I915_WRITE(SDEIER, 0xffffffff);
3143a2bf 2087 POSTING_READ(SDEIER);
036a4a7d
ZW
2088}
2089
7e231dbe
JB
2090static void valleyview_irq_preinstall(struct drm_device *dev)
2091{
2092 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2093 int pipe;
2094
2095 atomic_set(&dev_priv->irq_received, 0);
2096
7e231dbe
JB
2097 /* VLV magic */
2098 I915_WRITE(VLV_IMR, 0);
2099 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
2100 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
2101 I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
2102
7e231dbe
JB
2103 /* and GT */
2104 I915_WRITE(GTIIR, I915_READ(GTIIR));
2105 I915_WRITE(GTIIR, I915_READ(GTIIR));
2106 I915_WRITE(GTIMR, 0xffffffff);
2107 I915_WRITE(GTIER, 0x0);
2108 POSTING_READ(GTIER);
2109
2110 I915_WRITE(DPINVGTT, 0xff);
2111
2112 I915_WRITE(PORT_HOTPLUG_EN, 0);
2113 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2114 for_each_pipe(pipe)
2115 I915_WRITE(PIPESTAT(pipe), 0xffff);
2116 I915_WRITE(VLV_IIR, 0xffffffff);
2117 I915_WRITE(VLV_IMR, 0xffffffff);
2118 I915_WRITE(VLV_IER, 0x0);
2119 POSTING_READ(VLV_IER);
2120}
2121
82a28bcf 2122static void ibx_hpd_irq_setup(struct drm_device *dev)
7fe0b973
KP
2123{
2124 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
82a28bcf
DV
2125 struct drm_mode_config *mode_config = &dev->mode_config;
2126 struct intel_encoder *intel_encoder;
2127 u32 mask = ~I915_READ(SDEIMR);
2128 u32 hotplug;
2129
2130 if (HAS_PCH_IBX(dev)) {
2131 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
2132 mask |= hpd_ibx[intel_encoder->hpd_pin];
2133 } else {
2134 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
2135 mask |= hpd_cpt[intel_encoder->hpd_pin];
2136 }
7fe0b973 2137
82a28bcf
DV
2138 I915_WRITE(SDEIMR, ~mask);
2139
2140 /*
2141 * Enable digital hotplug on the PCH, and configure the DP short pulse
2142 * duration to 2ms (which is the minimum in the Display Port spec)
2143 *
2144 * This register is the same on all known PCH chips.
2145 */
7fe0b973
KP
2146 hotplug = I915_READ(PCH_PORT_HOTPLUG);
2147 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
2148 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
2149 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
2150 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
2151 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
2152}
2153
d46da437
PZ
2154static void ibx_irq_postinstall(struct drm_device *dev)
2155{
2156 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
82a28bcf 2157 u32 mask;
e5868a31 2158
82a28bcf
DV
2159 if (HAS_PCH_IBX(dev))
2160 mask = SDE_GMBUS | SDE_AUX_MASK;
2161 else
2162 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
ab5c608b
BW
2163
2164 if (HAS_PCH_NOP(dev))
2165 return;
2166
d46da437
PZ
2167 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2168 I915_WRITE(SDEIMR, ~mask);
d46da437
PZ
2169}
2170
f71d4af4 2171static int ironlake_irq_postinstall(struct drm_device *dev)
036a4a7d
ZW
2172{
2173 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2174 /* enable kind of interrupts always enabled */
013d5aa2 2175 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
ce99c256
DV
2176 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
2177 DE_AUX_CHANNEL_A;
1ec14ad3 2178 u32 render_irqs;
036a4a7d 2179
1ec14ad3 2180 dev_priv->irq_mask = ~display_mask;
036a4a7d
ZW
2181
2182 /* should always can generate irq */
2183 I915_WRITE(DEIIR, I915_READ(DEIIR));
1ec14ad3
CW
2184 I915_WRITE(DEIMR, dev_priv->irq_mask);
2185 I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
3143a2bf 2186 POSTING_READ(DEIER);
036a4a7d 2187
1ec14ad3 2188 dev_priv->gt_irq_mask = ~0;
036a4a7d
ZW
2189
2190 I915_WRITE(GTIIR, I915_READ(GTIIR));
1ec14ad3 2191 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
881f47b6 2192
1ec14ad3
CW
2193 if (IS_GEN6(dev))
2194 render_irqs =
2195 GT_USER_INTERRUPT |
e2a1e2f0
BW
2196 GEN6_BSD_USER_INTERRUPT |
2197 GEN6_BLITTER_USER_INTERRUPT;
1ec14ad3
CW
2198 else
2199 render_irqs =
88f23b8f 2200 GT_USER_INTERRUPT |
c6df541c 2201 GT_PIPE_NOTIFY |
1ec14ad3
CW
2202 GT_BSD_USER_INTERRUPT;
2203 I915_WRITE(GTIER, render_irqs);
3143a2bf 2204 POSTING_READ(GTIER);
036a4a7d 2205
d46da437 2206 ibx_irq_postinstall(dev);
7fe0b973 2207
f97108d1
JB
2208 if (IS_IRONLAKE_M(dev)) {
2209 /* Clear & enable PCU event interrupts */
2210 I915_WRITE(DEIIR, DE_PCU_EVENT);
2211 I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
2212 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
2213 }
2214
036a4a7d
ZW
2215 return 0;
2216}
2217
f71d4af4 2218static int ivybridge_irq_postinstall(struct drm_device *dev)
b1f14ad0
JB
2219{
2220 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2221 /* enable kind of interrupts always enabled */
b615b57a
CW
2222 u32 display_mask =
2223 DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
2224 DE_PLANEC_FLIP_DONE_IVB |
2225 DE_PLANEB_FLIP_DONE_IVB |
ce99c256
DV
2226 DE_PLANEA_FLIP_DONE_IVB |
2227 DE_AUX_CHANNEL_A_IVB;
b1f14ad0 2228 u32 render_irqs;
b1f14ad0 2229
b1f14ad0
JB
2230 dev_priv->irq_mask = ~display_mask;
2231
2232 /* should always can generate irq */
2233 I915_WRITE(DEIIR, I915_READ(DEIIR));
2234 I915_WRITE(DEIMR, dev_priv->irq_mask);
b615b57a
CW
2235 I915_WRITE(DEIER,
2236 display_mask |
2237 DE_PIPEC_VBLANK_IVB |
2238 DE_PIPEB_VBLANK_IVB |
2239 DE_PIPEA_VBLANK_IVB);
b1f14ad0
JB
2240 POSTING_READ(DEIER);
2241
15b9f80e 2242 dev_priv->gt_irq_mask = ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
b1f14ad0
JB
2243
2244 I915_WRITE(GTIIR, I915_READ(GTIIR));
2245 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2246
e2a1e2f0 2247 render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
15b9f80e 2248 GEN6_BLITTER_USER_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
b1f14ad0
JB
2249 I915_WRITE(GTIER, render_irqs);
2250 POSTING_READ(GTIER);
2251
d46da437 2252 ibx_irq_postinstall(dev);
7fe0b973 2253
b1f14ad0
JB
2254 return 0;
2255}
2256
7e231dbe
JB
2257static int valleyview_irq_postinstall(struct drm_device *dev)
2258{
2259 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
7e231dbe 2260 u32 enable_mask;
31acc7f5 2261 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
3bcedbe5 2262 u32 render_irqs;
7e231dbe
JB
2263 u16 msid;
2264
2265 enable_mask = I915_DISPLAY_PORT_INTERRUPT;
31acc7f5
JB
2266 enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2267 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2268 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
7e231dbe
JB
2269 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2270
31acc7f5
JB
2271 /*
2272 *Leave vblank interrupts masked initially. enable/disable will
2273 * toggle them based on usage.
2274 */
2275 dev_priv->irq_mask = (~enable_mask) |
2276 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2277 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
7e231dbe 2278
7e231dbe
JB
2279 /* Hack for broken MSIs on VLV */
2280 pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000);
2281 pci_read_config_word(dev->pdev, 0x98, &msid);
2282 msid &= 0xff; /* mask out delivery bits */
2283 msid |= (1<<14);
2284 pci_write_config_word(dev_priv->dev->pdev, 0x98, msid);
2285
20afbda2
DV
2286 I915_WRITE(PORT_HOTPLUG_EN, 0);
2287 POSTING_READ(PORT_HOTPLUG_EN);
2288
7e231dbe
JB
2289 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
2290 I915_WRITE(VLV_IER, enable_mask);
2291 I915_WRITE(VLV_IIR, 0xffffffff);
2292 I915_WRITE(PIPESTAT(0), 0xffff);
2293 I915_WRITE(PIPESTAT(1), 0xffff);
2294 POSTING_READ(VLV_IER);
2295
31acc7f5 2296 i915_enable_pipestat(dev_priv, 0, pipestat_enable);
515ac2bb 2297 i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
31acc7f5
JB
2298 i915_enable_pipestat(dev_priv, 1, pipestat_enable);
2299
7e231dbe
JB
2300 I915_WRITE(VLV_IIR, 0xffffffff);
2301 I915_WRITE(VLV_IIR, 0xffffffff);
2302
7e231dbe 2303 I915_WRITE(GTIIR, I915_READ(GTIIR));
31acc7f5 2304 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
3bcedbe5
JB
2305
2306 render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
2307 GEN6_BLITTER_USER_INTERRUPT;
2308 I915_WRITE(GTIER, render_irqs);
7e231dbe
JB
2309 POSTING_READ(GTIER);
2310
2311 /* ack & enable invalid PTE error interrupts */
2312#if 0 /* FIXME: add support to irq handler for checking these bits */
2313 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
2314 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
2315#endif
2316
2317 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
20afbda2
DV
2318
2319 return 0;
2320}
2321
7e231dbe
JB
2322static void valleyview_irq_uninstall(struct drm_device *dev)
2323{
2324 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2325 int pipe;
2326
2327 if (!dev_priv)
2328 return;
2329
7e231dbe
JB
2330 for_each_pipe(pipe)
2331 I915_WRITE(PIPESTAT(pipe), 0xffff);
2332
2333 I915_WRITE(HWSTAM, 0xffffffff);
2334 I915_WRITE(PORT_HOTPLUG_EN, 0);
2335 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2336 for_each_pipe(pipe)
2337 I915_WRITE(PIPESTAT(pipe), 0xffff);
2338 I915_WRITE(VLV_IIR, 0xffffffff);
2339 I915_WRITE(VLV_IMR, 0xffffffff);
2340 I915_WRITE(VLV_IER, 0x0);
2341 POSTING_READ(VLV_IER);
2342}
2343
f71d4af4 2344static void ironlake_irq_uninstall(struct drm_device *dev)
036a4a7d
ZW
2345{
2346 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
4697995b
JB
2347
2348 if (!dev_priv)
2349 return;
2350
036a4a7d
ZW
2351 I915_WRITE(HWSTAM, 0xffffffff);
2352
2353 I915_WRITE(DEIMR, 0xffffffff);
2354 I915_WRITE(DEIER, 0x0);
2355 I915_WRITE(DEIIR, I915_READ(DEIIR));
2356
2357 I915_WRITE(GTIMR, 0xffffffff);
2358 I915_WRITE(GTIER, 0x0);
2359 I915_WRITE(GTIIR, I915_READ(GTIIR));
192aac1f 2360
ab5c608b
BW
2361 if (HAS_PCH_NOP(dev))
2362 return;
2363
192aac1f
KP
2364 I915_WRITE(SDEIMR, 0xffffffff);
2365 I915_WRITE(SDEIER, 0x0);
2366 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
036a4a7d
ZW
2367}
2368
a266c7d5 2369static void i8xx_irq_preinstall(struct drm_device * dev)
1da177e4
LT
2370{
2371 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
9db4a9c7 2372 int pipe;
91e3738e 2373
a266c7d5 2374 atomic_set(&dev_priv->irq_received, 0);
5ca58282 2375
9db4a9c7
JB
2376 for_each_pipe(pipe)
2377 I915_WRITE(PIPESTAT(pipe), 0);
a266c7d5
CW
2378 I915_WRITE16(IMR, 0xffff);
2379 I915_WRITE16(IER, 0x0);
2380 POSTING_READ16(IER);
c2798b19
CW
2381}
2382
2383static int i8xx_irq_postinstall(struct drm_device *dev)
2384{
2385 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2386
c2798b19
CW
2387 I915_WRITE16(EMR,
2388 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2389
2390 /* Unmask the interrupts that we always want on. */
2391 dev_priv->irq_mask =
2392 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2393 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2394 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2395 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2396 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2397 I915_WRITE16(IMR, dev_priv->irq_mask);
2398
2399 I915_WRITE16(IER,
2400 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2401 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2402 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2403 I915_USER_INTERRUPT);
2404 POSTING_READ16(IER);
2405
2406 return 0;
2407}
2408
90a72f87
VS
2409/*
2410 * Returns true when a page flip has completed.
2411 */
2412static bool i8xx_handle_vblank(struct drm_device *dev,
2413 int pipe, u16 iir)
2414{
2415 drm_i915_private_t *dev_priv = dev->dev_private;
2416 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe);
2417
2418 if (!drm_handle_vblank(dev, pipe))
2419 return false;
2420
2421 if ((iir & flip_pending) == 0)
2422 return false;
2423
2424 intel_prepare_page_flip(dev, pipe);
2425
2426 /* We detect FlipDone by looking for the change in PendingFlip from '1'
2427 * to '0' on the following vblank, i.e. IIR has the Pendingflip
2428 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
2429 * the flip is completed (no longer pending). Since this doesn't raise
2430 * an interrupt per se, we watch for the change at vblank.
2431 */
2432 if (I915_READ16(ISR) & flip_pending)
2433 return false;
2434
2435 intel_finish_page_flip(dev, pipe);
2436
2437 return true;
2438}
2439
ff1f525e 2440static irqreturn_t i8xx_irq_handler(int irq, void *arg)
c2798b19
CW
2441{
2442 struct drm_device *dev = (struct drm_device *) arg;
2443 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
c2798b19
CW
2444 u16 iir, new_iir;
2445 u32 pipe_stats[2];
2446 unsigned long irqflags;
2447 int irq_received;
2448 int pipe;
2449 u16 flip_mask =
2450 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2451 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2452
2453 atomic_inc(&dev_priv->irq_received);
2454
2455 iir = I915_READ16(IIR);
2456 if (iir == 0)
2457 return IRQ_NONE;
2458
2459 while (iir & ~flip_mask) {
2460 /* Can't rely on pipestat interrupt bit in iir as it might
2461 * have been cleared after the pipestat interrupt was received.
2462 * It doesn't set the bit in iir again, but it still produces
2463 * interrupts (for non-MSI).
2464 */
2465 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2466 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2467 i915_handle_error(dev, false);
2468
2469 for_each_pipe(pipe) {
2470 int reg = PIPESTAT(pipe);
2471 pipe_stats[pipe] = I915_READ(reg);
2472
2473 /*
2474 * Clear the PIPE*STAT regs before the IIR
2475 */
2476 if (pipe_stats[pipe] & 0x8000ffff) {
2477 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2478 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2479 pipe_name(pipe));
2480 I915_WRITE(reg, pipe_stats[pipe]);
2481 irq_received = 1;
2482 }
2483 }
2484 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2485
2486 I915_WRITE16(IIR, iir & ~flip_mask);
2487 new_iir = I915_READ16(IIR); /* Flush posted writes */
2488
d05c617e 2489 i915_update_dri1_breadcrumb(dev);
c2798b19
CW
2490
2491 if (iir & I915_USER_INTERRUPT)
2492 notify_ring(dev, &dev_priv->ring[RCS]);
2493
2494 if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
90a72f87
VS
2495 i8xx_handle_vblank(dev, 0, iir))
2496 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0);
c2798b19
CW
2497
2498 if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
90a72f87
VS
2499 i8xx_handle_vblank(dev, 1, iir))
2500 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1);
c2798b19
CW
2501
2502 iir = new_iir;
2503 }
2504
2505 return IRQ_HANDLED;
2506}
2507
2508static void i8xx_irq_uninstall(struct drm_device * dev)
2509{
2510 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2511 int pipe;
2512
c2798b19
CW
2513 for_each_pipe(pipe) {
2514 /* Clear enable bits; then clear status bits */
2515 I915_WRITE(PIPESTAT(pipe), 0);
2516 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2517 }
2518 I915_WRITE16(IMR, 0xffff);
2519 I915_WRITE16(IER, 0x0);
2520 I915_WRITE16(IIR, I915_READ16(IIR));
2521}
2522
a266c7d5
CW
2523static void i915_irq_preinstall(struct drm_device * dev)
2524{
2525 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2526 int pipe;
2527
2528 atomic_set(&dev_priv->irq_received, 0);
2529
2530 if (I915_HAS_HOTPLUG(dev)) {
2531 I915_WRITE(PORT_HOTPLUG_EN, 0);
2532 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2533 }
2534
00d98ebd 2535 I915_WRITE16(HWSTAM, 0xeffe);
a266c7d5
CW
2536 for_each_pipe(pipe)
2537 I915_WRITE(PIPESTAT(pipe), 0);
2538 I915_WRITE(IMR, 0xffffffff);
2539 I915_WRITE(IER, 0x0);
2540 POSTING_READ(IER);
2541}
2542
2543static int i915_irq_postinstall(struct drm_device *dev)
2544{
2545 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
38bde180 2546 u32 enable_mask;
a266c7d5 2547
38bde180
CW
2548 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2549
2550 /* Unmask the interrupts that we always want on. */
2551 dev_priv->irq_mask =
2552 ~(I915_ASLE_INTERRUPT |
2553 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2554 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2555 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2556 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2557 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2558
2559 enable_mask =
2560 I915_ASLE_INTERRUPT |
2561 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2562 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2563 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2564 I915_USER_INTERRUPT;
2565
a266c7d5 2566 if (I915_HAS_HOTPLUG(dev)) {
20afbda2
DV
2567 I915_WRITE(PORT_HOTPLUG_EN, 0);
2568 POSTING_READ(PORT_HOTPLUG_EN);
2569
a266c7d5
CW
2570 /* Enable in IER... */
2571 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
2572 /* and unmask in IMR */
2573 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
2574 }
2575
a266c7d5
CW
2576 I915_WRITE(IMR, dev_priv->irq_mask);
2577 I915_WRITE(IER, enable_mask);
2578 POSTING_READ(IER);
2579
20afbda2
DV
2580 intel_opregion_enable_asle(dev);
2581
2582 return 0;
2583}
2584
90a72f87
VS
2585/*
2586 * Returns true when a page flip has completed.
2587 */
2588static bool i915_handle_vblank(struct drm_device *dev,
2589 int plane, int pipe, u32 iir)
2590{
2591 drm_i915_private_t *dev_priv = dev->dev_private;
2592 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
2593
2594 if (!drm_handle_vblank(dev, pipe))
2595 return false;
2596
2597 if ((iir & flip_pending) == 0)
2598 return false;
2599
2600 intel_prepare_page_flip(dev, plane);
2601
2602 /* We detect FlipDone by looking for the change in PendingFlip from '1'
2603 * to '0' on the following vblank, i.e. IIR has the Pendingflip
2604 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
2605 * the flip is completed (no longer pending). Since this doesn't raise
2606 * an interrupt per se, we watch for the change at vblank.
2607 */
2608 if (I915_READ(ISR) & flip_pending)
2609 return false;
2610
2611 intel_finish_page_flip(dev, pipe);
2612
2613 return true;
2614}
2615
ff1f525e 2616static irqreturn_t i915_irq_handler(int irq, void *arg)
a266c7d5
CW
2617{
2618 struct drm_device *dev = (struct drm_device *) arg;
2619 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
8291ee90 2620 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
a266c7d5 2621 unsigned long irqflags;
38bde180
CW
2622 u32 flip_mask =
2623 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2624 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
38bde180 2625 int pipe, ret = IRQ_NONE;
a266c7d5
CW
2626
2627 atomic_inc(&dev_priv->irq_received);
2628
2629 iir = I915_READ(IIR);
38bde180
CW
2630 do {
2631 bool irq_received = (iir & ~flip_mask) != 0;
8291ee90 2632 bool blc_event = false;
a266c7d5
CW
2633
2634 /* Can't rely on pipestat interrupt bit in iir as it might
2635 * have been cleared after the pipestat interrupt was received.
2636 * It doesn't set the bit in iir again, but it still produces
2637 * interrupts (for non-MSI).
2638 */
2639 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2640 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2641 i915_handle_error(dev, false);
2642
2643 for_each_pipe(pipe) {
2644 int reg = PIPESTAT(pipe);
2645 pipe_stats[pipe] = I915_READ(reg);
2646
38bde180 2647 /* Clear the PIPE*STAT regs before the IIR */
a266c7d5
CW
2648 if (pipe_stats[pipe] & 0x8000ffff) {
2649 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2650 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2651 pipe_name(pipe));
2652 I915_WRITE(reg, pipe_stats[pipe]);
38bde180 2653 irq_received = true;
a266c7d5
CW
2654 }
2655 }
2656 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2657
2658 if (!irq_received)
2659 break;
2660
a266c7d5
CW
2661 /* Consume port. Then clear IIR or we'll miss events */
2662 if ((I915_HAS_HOTPLUG(dev)) &&
2663 (iir & I915_DISPLAY_PORT_INTERRUPT)) {
2664 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
b543fb04 2665 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
a266c7d5
CW
2666
2667 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2668 hotplug_status);
b543fb04
EE
2669 if (hotplug_trigger) {
2670 hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_status_i915);
a266c7d5
CW
2671 queue_work(dev_priv->wq,
2672 &dev_priv->hotplug_work);
b543fb04 2673 }
a266c7d5 2674 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
38bde180 2675 POSTING_READ(PORT_HOTPLUG_STAT);
a266c7d5
CW
2676 }
2677
38bde180 2678 I915_WRITE(IIR, iir & ~flip_mask);
a266c7d5
CW
2679 new_iir = I915_READ(IIR); /* Flush posted writes */
2680
a266c7d5
CW
2681 if (iir & I915_USER_INTERRUPT)
2682 notify_ring(dev, &dev_priv->ring[RCS]);
a266c7d5 2683
a266c7d5 2684 for_each_pipe(pipe) {
38bde180
CW
2685 int plane = pipe;
2686 if (IS_MOBILE(dev))
2687 plane = !plane;
90a72f87 2688
8291ee90 2689 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
90a72f87
VS
2690 i915_handle_vblank(dev, plane, pipe, iir))
2691 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
a266c7d5
CW
2692
2693 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2694 blc_event = true;
2695 }
2696
a266c7d5
CW
2697 if (blc_event || (iir & I915_ASLE_INTERRUPT))
2698 intel_opregion_asle_intr(dev);
2699
2700 /* With MSI, interrupts are only generated when iir
2701 * transitions from zero to nonzero. If another bit got
2702 * set while we were handling the existing iir bits, then
2703 * we would never get another interrupt.
2704 *
2705 * This is fine on non-MSI as well, as if we hit this path
2706 * we avoid exiting the interrupt handler only to generate
2707 * another one.
2708 *
2709 * Note that for MSI this could cause a stray interrupt report
2710 * if an interrupt landed in the time between writing IIR and
2711 * the posting read. This should be rare enough to never
2712 * trigger the 99% of 100,000 interrupts test for disabling
2713 * stray interrupts.
2714 */
38bde180 2715 ret = IRQ_HANDLED;
a266c7d5 2716 iir = new_iir;
38bde180 2717 } while (iir & ~flip_mask);
a266c7d5 2718
d05c617e 2719 i915_update_dri1_breadcrumb(dev);
8291ee90 2720
a266c7d5
CW
2721 return ret;
2722}
2723
2724static void i915_irq_uninstall(struct drm_device * dev)
2725{
2726 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2727 int pipe;
2728
a266c7d5
CW
2729 if (I915_HAS_HOTPLUG(dev)) {
2730 I915_WRITE(PORT_HOTPLUG_EN, 0);
2731 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2732 }
2733
00d98ebd 2734 I915_WRITE16(HWSTAM, 0xffff);
55b39755
CW
2735 for_each_pipe(pipe) {
2736 /* Clear enable bits; then clear status bits */
a266c7d5 2737 I915_WRITE(PIPESTAT(pipe), 0);
55b39755
CW
2738 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2739 }
a266c7d5
CW
2740 I915_WRITE(IMR, 0xffffffff);
2741 I915_WRITE(IER, 0x0);
2742
a266c7d5
CW
2743 I915_WRITE(IIR, I915_READ(IIR));
2744}
2745
2746static void i965_irq_preinstall(struct drm_device * dev)
2747{
2748 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2749 int pipe;
2750
2751 atomic_set(&dev_priv->irq_received, 0);
2752
adca4730
CW
2753 I915_WRITE(PORT_HOTPLUG_EN, 0);
2754 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
a266c7d5
CW
2755
2756 I915_WRITE(HWSTAM, 0xeffe);
2757 for_each_pipe(pipe)
2758 I915_WRITE(PIPESTAT(pipe), 0);
2759 I915_WRITE(IMR, 0xffffffff);
2760 I915_WRITE(IER, 0x0);
2761 POSTING_READ(IER);
2762}
2763
2764static int i965_irq_postinstall(struct drm_device *dev)
2765{
2766 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
bbba0a97 2767 u32 enable_mask;
a266c7d5
CW
2768 u32 error_mask;
2769
a266c7d5 2770 /* Unmask the interrupts that we always want on. */
bbba0a97 2771 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
adca4730 2772 I915_DISPLAY_PORT_INTERRUPT |
bbba0a97
CW
2773 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2774 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2775 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2776 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2777 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2778
2779 enable_mask = ~dev_priv->irq_mask;
21ad8330
VS
2780 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2781 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
bbba0a97
CW
2782 enable_mask |= I915_USER_INTERRUPT;
2783
2784 if (IS_G4X(dev))
2785 enable_mask |= I915_BSD_USER_INTERRUPT;
a266c7d5 2786
515ac2bb 2787 i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
a266c7d5 2788
a266c7d5
CW
2789 /*
2790 * Enable some error detection, note the instruction error mask
2791 * bit is reserved, so we leave it masked.
2792 */
2793 if (IS_G4X(dev)) {
2794 error_mask = ~(GM45_ERROR_PAGE_TABLE |
2795 GM45_ERROR_MEM_PRIV |
2796 GM45_ERROR_CP_PRIV |
2797 I915_ERROR_MEMORY_REFRESH);
2798 } else {
2799 error_mask = ~(I915_ERROR_PAGE_TABLE |
2800 I915_ERROR_MEMORY_REFRESH);
2801 }
2802 I915_WRITE(EMR, error_mask);
2803
2804 I915_WRITE(IMR, dev_priv->irq_mask);
2805 I915_WRITE(IER, enable_mask);
2806 POSTING_READ(IER);
2807
20afbda2
DV
2808 I915_WRITE(PORT_HOTPLUG_EN, 0);
2809 POSTING_READ(PORT_HOTPLUG_EN);
2810
2811 intel_opregion_enable_asle(dev);
2812
2813 return 0;
2814}
2815
bac56d5b 2816static void i915_hpd_irq_setup(struct drm_device *dev)
20afbda2
DV
2817{
2818 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
e5868a31
EE
2819 struct drm_mode_config *mode_config = &dev->mode_config;
2820 struct intel_encoder *encoder;
20afbda2
DV
2821 u32 hotplug_en;
2822
bac56d5b
EE
2823 if (I915_HAS_HOTPLUG(dev)) {
2824 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
2825 hotplug_en &= ~HOTPLUG_INT_EN_MASK;
2826 /* Note HDMI and DP share hotplug bits */
e5868a31 2827 /* enable bits are the same for all generations */
bac56d5b
EE
2828 list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
2829 hotplug_en |= hpd_mask_i915[encoder->hpd_pin];
2830 /* Programming the CRT detection parameters tends
2831 to generate a spurious hotplug event about three
2832 seconds later. So just do it once.
2833 */
2834 if (IS_G4X(dev))
2835 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
85fc95ba 2836 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
bac56d5b 2837 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
a266c7d5 2838
bac56d5b
EE
2839 /* Ignore TV since it's buggy */
2840 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2841 }
a266c7d5
CW
2842}
2843
ff1f525e 2844static irqreturn_t i965_irq_handler(int irq, void *arg)
a266c7d5
CW
2845{
2846 struct drm_device *dev = (struct drm_device *) arg;
2847 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
a266c7d5
CW
2848 u32 iir, new_iir;
2849 u32 pipe_stats[I915_MAX_PIPES];
a266c7d5
CW
2850 unsigned long irqflags;
2851 int irq_received;
2852 int ret = IRQ_NONE, pipe;
21ad8330
VS
2853 u32 flip_mask =
2854 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2855 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
a266c7d5
CW
2856
2857 atomic_inc(&dev_priv->irq_received);
2858
2859 iir = I915_READ(IIR);
2860
a266c7d5 2861 for (;;) {
2c8ba29f
CW
2862 bool blc_event = false;
2863
21ad8330 2864 irq_received = (iir & ~flip_mask) != 0;
a266c7d5
CW
2865
2866 /* Can't rely on pipestat interrupt bit in iir as it might
2867 * have been cleared after the pipestat interrupt was received.
2868 * It doesn't set the bit in iir again, but it still produces
2869 * interrupts (for non-MSI).
2870 */
2871 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2872 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2873 i915_handle_error(dev, false);
2874
2875 for_each_pipe(pipe) {
2876 int reg = PIPESTAT(pipe);
2877 pipe_stats[pipe] = I915_READ(reg);
2878
2879 /*
2880 * Clear the PIPE*STAT regs before the IIR
2881 */
2882 if (pipe_stats[pipe] & 0x8000ffff) {
2883 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2884 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2885 pipe_name(pipe));
2886 I915_WRITE(reg, pipe_stats[pipe]);
2887 irq_received = 1;
2888 }
2889 }
2890 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2891
2892 if (!irq_received)
2893 break;
2894
2895 ret = IRQ_HANDLED;
2896
2897 /* Consume port. Then clear IIR or we'll miss events */
adca4730 2898 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
a266c7d5 2899 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
b543fb04
EE
2900 u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ?
2901 HOTPLUG_INT_STATUS_G4X :
2902 HOTPLUG_INT_STATUS_I965);
a266c7d5
CW
2903
2904 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2905 hotplug_status);
b543fb04
EE
2906 if (hotplug_trigger) {
2907 hotplug_irq_storm_detect(dev, hotplug_trigger,
2908 IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i965);
a266c7d5
CW
2909 queue_work(dev_priv->wq,
2910 &dev_priv->hotplug_work);
b543fb04 2911 }
a266c7d5
CW
2912 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2913 I915_READ(PORT_HOTPLUG_STAT);
2914 }
2915
21ad8330 2916 I915_WRITE(IIR, iir & ~flip_mask);
a266c7d5
CW
2917 new_iir = I915_READ(IIR); /* Flush posted writes */
2918
a266c7d5
CW
2919 if (iir & I915_USER_INTERRUPT)
2920 notify_ring(dev, &dev_priv->ring[RCS]);
2921 if (iir & I915_BSD_USER_INTERRUPT)
2922 notify_ring(dev, &dev_priv->ring[VCS]);
2923
a266c7d5 2924 for_each_pipe(pipe) {
2c8ba29f 2925 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
90a72f87
VS
2926 i915_handle_vblank(dev, pipe, pipe, iir))
2927 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
a266c7d5
CW
2928
2929 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2930 blc_event = true;
2931 }
2932
2933
2934 if (blc_event || (iir & I915_ASLE_INTERRUPT))
2935 intel_opregion_asle_intr(dev);
2936
515ac2bb
DV
2937 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
2938 gmbus_irq_handler(dev);
2939
a266c7d5
CW
2940 /* With MSI, interrupts are only generated when iir
2941 * transitions from zero to nonzero. If another bit got
2942 * set while we were handling the existing iir bits, then
2943 * we would never get another interrupt.
2944 *
2945 * This is fine on non-MSI as well, as if we hit this path
2946 * we avoid exiting the interrupt handler only to generate
2947 * another one.
2948 *
2949 * Note that for MSI this could cause a stray interrupt report
2950 * if an interrupt landed in the time between writing IIR and
2951 * the posting read. This should be rare enough to never
2952 * trigger the 99% of 100,000 interrupts test for disabling
2953 * stray interrupts.
2954 */
2955 iir = new_iir;
2956 }
2957
d05c617e 2958 i915_update_dri1_breadcrumb(dev);
2c8ba29f 2959
a266c7d5
CW
2960 return ret;
2961}
2962
2963static void i965_irq_uninstall(struct drm_device * dev)
2964{
2965 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2966 int pipe;
2967
2968 if (!dev_priv)
2969 return;
2970
adca4730
CW
2971 I915_WRITE(PORT_HOTPLUG_EN, 0);
2972 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
a266c7d5
CW
2973
2974 I915_WRITE(HWSTAM, 0xffffffff);
2975 for_each_pipe(pipe)
2976 I915_WRITE(PIPESTAT(pipe), 0);
2977 I915_WRITE(IMR, 0xffffffff);
2978 I915_WRITE(IER, 0x0);
2979
2980 for_each_pipe(pipe)
2981 I915_WRITE(PIPESTAT(pipe),
2982 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
2983 I915_WRITE(IIR, I915_READ(IIR));
2984}
2985
f71d4af4
JB
2986void intel_irq_init(struct drm_device *dev)
2987{
8b2e326d
CW
2988 struct drm_i915_private *dev_priv = dev->dev_private;
2989
2990 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
99584db3 2991 INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
c6a828d3 2992 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
a4da4fa4 2993 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
8b2e326d 2994
99584db3
DV
2995 setup_timer(&dev_priv->gpu_error.hangcheck_timer,
2996 i915_hangcheck_elapsed,
61bac78e
DV
2997 (unsigned long) dev);
2998
97a19a24 2999 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
9ee32fea 3000
f71d4af4
JB
3001 dev->driver->get_vblank_counter = i915_get_vblank_counter;
3002 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
7d4e146f 3003 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
f71d4af4
JB
3004 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
3005 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
3006 }
3007
c3613de9
KP
3008 if (drm_core_check_feature(dev, DRIVER_MODESET))
3009 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
3010 else
3011 dev->driver->get_vblank_timestamp = NULL;
f71d4af4
JB
3012 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
3013
7e231dbe
JB
3014 if (IS_VALLEYVIEW(dev)) {
3015 dev->driver->irq_handler = valleyview_irq_handler;
3016 dev->driver->irq_preinstall = valleyview_irq_preinstall;
3017 dev->driver->irq_postinstall = valleyview_irq_postinstall;
3018 dev->driver->irq_uninstall = valleyview_irq_uninstall;
3019 dev->driver->enable_vblank = valleyview_enable_vblank;
3020 dev->driver->disable_vblank = valleyview_disable_vblank;
fa00abe0 3021 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4a06e201 3022 } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
f71d4af4
JB
3023 /* Share pre & uninstall handlers with ILK/SNB */
3024 dev->driver->irq_handler = ivybridge_irq_handler;
3025 dev->driver->irq_preinstall = ironlake_irq_preinstall;
3026 dev->driver->irq_postinstall = ivybridge_irq_postinstall;
3027 dev->driver->irq_uninstall = ironlake_irq_uninstall;
3028 dev->driver->enable_vblank = ivybridge_enable_vblank;
3029 dev->driver->disable_vblank = ivybridge_disable_vblank;
82a28bcf 3030 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
f71d4af4
JB
3031 } else if (HAS_PCH_SPLIT(dev)) {
3032 dev->driver->irq_handler = ironlake_irq_handler;
3033 dev->driver->irq_preinstall = ironlake_irq_preinstall;
3034 dev->driver->irq_postinstall = ironlake_irq_postinstall;
3035 dev->driver->irq_uninstall = ironlake_irq_uninstall;
3036 dev->driver->enable_vblank = ironlake_enable_vblank;
3037 dev->driver->disable_vblank = ironlake_disable_vblank;
82a28bcf 3038 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
f71d4af4 3039 } else {
c2798b19
CW
3040 if (INTEL_INFO(dev)->gen == 2) {
3041 dev->driver->irq_preinstall = i8xx_irq_preinstall;
3042 dev->driver->irq_postinstall = i8xx_irq_postinstall;
3043 dev->driver->irq_handler = i8xx_irq_handler;
3044 dev->driver->irq_uninstall = i8xx_irq_uninstall;
a266c7d5
CW
3045 } else if (INTEL_INFO(dev)->gen == 3) {
3046 dev->driver->irq_preinstall = i915_irq_preinstall;
3047 dev->driver->irq_postinstall = i915_irq_postinstall;
3048 dev->driver->irq_uninstall = i915_irq_uninstall;
3049 dev->driver->irq_handler = i915_irq_handler;
20afbda2 3050 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
c2798b19 3051 } else {
a266c7d5
CW
3052 dev->driver->irq_preinstall = i965_irq_preinstall;
3053 dev->driver->irq_postinstall = i965_irq_postinstall;
3054 dev->driver->irq_uninstall = i965_irq_uninstall;
3055 dev->driver->irq_handler = i965_irq_handler;
bac56d5b 3056 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
c2798b19 3057 }
f71d4af4
JB
3058 dev->driver->enable_vblank = i915_enable_vblank;
3059 dev->driver->disable_vblank = i915_disable_vblank;
3060 }
3061}
20afbda2
DV
3062
3063void intel_hpd_init(struct drm_device *dev)
3064{
3065 struct drm_i915_private *dev_priv = dev->dev_private;
821450c6
EE
3066 struct drm_mode_config *mode_config = &dev->mode_config;
3067 struct drm_connector *connector;
3068 int i;
20afbda2 3069
821450c6
EE
3070 for (i = 1; i < HPD_NUM_PINS; i++) {
3071 dev_priv->hpd_stats[i].hpd_cnt = 0;
3072 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
3073 }
3074 list_for_each_entry(connector, &mode_config->connector_list, head) {
3075 struct intel_connector *intel_connector = to_intel_connector(connector);
3076 connector->polled = intel_connector->polled;
3077 if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
3078 connector->polled = DRM_CONNECTOR_POLL_HPD;
3079 }
20afbda2
DV
3080 if (dev_priv->display.hpd_irq_setup)
3081 dev_priv->display.hpd_irq_setup(dev);
3082}
This page took 1.062164 seconds and 5 git commands to generate.