DRM/I915: Add enum hpd_pin to intel_encoder.
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_irq.c
CommitLineData
0d6aa60b 1/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
1da177e4 2 */
0d6aa60b 3/*
1da177e4
LT
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
bc54fd1a
DA
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
0d6aa60b 27 */
1da177e4 28
a70491cc
JP
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
63eeaf38 31#include <linux/sysrq.h>
5a0e3ad6 32#include <linux/slab.h>
760285e7
DH
33#include <drm/drmP.h>
34#include <drm/i915_drm.h>
1da177e4 35#include "i915_drv.h"
1c5d22f7 36#include "i915_trace.h"
79e53945 37#include "intel_drv.h"
1da177e4 38
036a4a7d 39/* For display hotplug interrupt */
995b6762 40static void
f2b115e6 41ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
036a4a7d 42{
1ec14ad3
CW
43 if ((dev_priv->irq_mask & mask) != 0) {
44 dev_priv->irq_mask &= ~mask;
45 I915_WRITE(DEIMR, dev_priv->irq_mask);
3143a2bf 46 POSTING_READ(DEIMR);
036a4a7d
ZW
47 }
48}
49
50static inline void
f2b115e6 51ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
036a4a7d 52{
1ec14ad3
CW
53 if ((dev_priv->irq_mask & mask) != mask) {
54 dev_priv->irq_mask |= mask;
55 I915_WRITE(DEIMR, dev_priv->irq_mask);
3143a2bf 56 POSTING_READ(DEIMR);
036a4a7d
ZW
57 }
58}
59
7c463586
KP
60void
61i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
62{
46c06a30
VS
63 u32 reg = PIPESTAT(pipe);
64 u32 pipestat = I915_READ(reg) & 0x7fff0000;
7c463586 65
46c06a30
VS
66 if ((pipestat & mask) == mask)
67 return;
68
69 /* Enable the interrupt, clear any pending status */
70 pipestat |= mask | (mask >> 16);
71 I915_WRITE(reg, pipestat);
72 POSTING_READ(reg);
7c463586
KP
73}
74
75void
76i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
77{
46c06a30
VS
78 u32 reg = PIPESTAT(pipe);
79 u32 pipestat = I915_READ(reg) & 0x7fff0000;
7c463586 80
46c06a30
VS
81 if ((pipestat & mask) == 0)
82 return;
83
84 pipestat &= ~mask;
85 I915_WRITE(reg, pipestat);
86 POSTING_READ(reg);
7c463586
KP
87}
88
01c66889
ZY
89/**
90 * intel_enable_asle - enable ASLE interrupt for OpRegion
91 */
1ec14ad3 92void intel_enable_asle(struct drm_device *dev)
01c66889 93{
1ec14ad3
CW
94 drm_i915_private_t *dev_priv = dev->dev_private;
95 unsigned long irqflags;
96
7e231dbe
JB
97 /* FIXME: opregion/asle for VLV */
98 if (IS_VALLEYVIEW(dev))
99 return;
100
1ec14ad3 101 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
01c66889 102
c619eed4 103 if (HAS_PCH_SPLIT(dev))
f2b115e6 104 ironlake_enable_display_irq(dev_priv, DE_GSE);
edcb49ca 105 else {
01c66889 106 i915_enable_pipestat(dev_priv, 1,
d874bcff 107 PIPE_LEGACY_BLC_EVENT_ENABLE);
a6c45cf0 108 if (INTEL_INFO(dev)->gen >= 4)
edcb49ca 109 i915_enable_pipestat(dev_priv, 0,
d874bcff 110 PIPE_LEGACY_BLC_EVENT_ENABLE);
edcb49ca 111 }
1ec14ad3
CW
112
113 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
01c66889
ZY
114}
115
0a3e67a4
JB
116/**
117 * i915_pipe_enabled - check if a pipe is enabled
118 * @dev: DRM device
119 * @pipe: pipe to check
120 *
121 * Reading certain registers when the pipe is disabled can hang the chip.
122 * Use this routine to make sure the PLL is running and the pipe is active
123 * before reading such registers if unsure.
124 */
125static int
126i915_pipe_enabled(struct drm_device *dev, int pipe)
127{
128 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
702e7a56
PZ
129 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
130 pipe);
131
132 return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE;
0a3e67a4
JB
133}
134
42f52ef8
KP
135/* Called from drm generic code, passed a 'crtc', which
136 * we use as a pipe index
137 */
f71d4af4 138static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
0a3e67a4
JB
139{
140 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
141 unsigned long high_frame;
142 unsigned long low_frame;
5eddb70b 143 u32 high1, high2, low;
0a3e67a4
JB
144
145 if (!i915_pipe_enabled(dev, pipe)) {
44d98a61 146 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
9db4a9c7 147 "pipe %c\n", pipe_name(pipe));
0a3e67a4
JB
148 return 0;
149 }
150
9db4a9c7
JB
151 high_frame = PIPEFRAME(pipe);
152 low_frame = PIPEFRAMEPIXEL(pipe);
5eddb70b 153
0a3e67a4
JB
154 /*
155 * High & low register fields aren't synchronized, so make sure
156 * we get a low value that's stable across two reads of the high
157 * register.
158 */
159 do {
5eddb70b
CW
160 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
161 low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK;
162 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
0a3e67a4
JB
163 } while (high1 != high2);
164
5eddb70b
CW
165 high1 >>= PIPE_FRAME_HIGH_SHIFT;
166 low >>= PIPE_FRAME_LOW_SHIFT;
167 return (high1 << 8) | low;
0a3e67a4
JB
168}
169
f71d4af4 170static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
9880b7a5
JB
171{
172 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
9db4a9c7 173 int reg = PIPE_FRMCOUNT_GM45(pipe);
9880b7a5
JB
174
175 if (!i915_pipe_enabled(dev, pipe)) {
44d98a61 176 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
9db4a9c7 177 "pipe %c\n", pipe_name(pipe));
9880b7a5
JB
178 return 0;
179 }
180
181 return I915_READ(reg);
182}
183
f71d4af4 184static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
0af7e4df
MK
185 int *vpos, int *hpos)
186{
187 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
188 u32 vbl = 0, position = 0;
189 int vbl_start, vbl_end, htotal, vtotal;
190 bool in_vbl = true;
191 int ret = 0;
fe2b8f9d
PZ
192 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
193 pipe);
0af7e4df
MK
194
195 if (!i915_pipe_enabled(dev, pipe)) {
196 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
9db4a9c7 197 "pipe %c\n", pipe_name(pipe));
0af7e4df
MK
198 return 0;
199 }
200
201 /* Get vtotal. */
fe2b8f9d 202 vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
0af7e4df
MK
203
204 if (INTEL_INFO(dev)->gen >= 4) {
205 /* No obvious pixelcount register. Only query vertical
206 * scanout position from Display scan line register.
207 */
208 position = I915_READ(PIPEDSL(pipe));
209
210 /* Decode into vertical scanout position. Don't have
211 * horizontal scanout position.
212 */
213 *vpos = position & 0x1fff;
214 *hpos = 0;
215 } else {
216 /* Have access to pixelcount since start of frame.
217 * We can split this into vertical and horizontal
218 * scanout position.
219 */
220 position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
221
fe2b8f9d 222 htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
0af7e4df
MK
223 *vpos = position / htotal;
224 *hpos = position - (*vpos * htotal);
225 }
226
227 /* Query vblank area. */
fe2b8f9d 228 vbl = I915_READ(VBLANK(cpu_transcoder));
0af7e4df
MK
229
230 /* Test position against vblank region. */
231 vbl_start = vbl & 0x1fff;
232 vbl_end = (vbl >> 16) & 0x1fff;
233
234 if ((*vpos < vbl_start) || (*vpos > vbl_end))
235 in_vbl = false;
236
237 /* Inside "upper part" of vblank area? Apply corrective offset: */
238 if (in_vbl && (*vpos >= vbl_start))
239 *vpos = *vpos - vtotal;
240
241 /* Readouts valid? */
242 if (vbl > 0)
243 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
244
245 /* In vblank? */
246 if (in_vbl)
247 ret |= DRM_SCANOUTPOS_INVBL;
248
249 return ret;
250}
251
f71d4af4 252static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
0af7e4df
MK
253 int *max_error,
254 struct timeval *vblank_time,
255 unsigned flags)
256{
4041b853 257 struct drm_crtc *crtc;
0af7e4df 258
7eb552ae 259 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
4041b853 260 DRM_ERROR("Invalid crtc %d\n", pipe);
0af7e4df
MK
261 return -EINVAL;
262 }
263
264 /* Get drm_crtc to timestamp: */
4041b853
CW
265 crtc = intel_get_crtc_for_pipe(dev, pipe);
266 if (crtc == NULL) {
267 DRM_ERROR("Invalid crtc %d\n", pipe);
268 return -EINVAL;
269 }
270
271 if (!crtc->enabled) {
272 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
273 return -EBUSY;
274 }
0af7e4df
MK
275
276 /* Helper routine in DRM core does all the work: */
4041b853
CW
277 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
278 vblank_time, flags,
279 crtc);
0af7e4df
MK
280}
281
5ca58282
JB
282/*
283 * Handle hotplug events outside the interrupt handler proper.
284 */
285static void i915_hotplug_work_func(struct work_struct *work)
286{
287 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
288 hotplug_work);
289 struct drm_device *dev = dev_priv->dev;
c31c4ba3 290 struct drm_mode_config *mode_config = &dev->mode_config;
4ef69c7a
CW
291 struct intel_encoder *encoder;
292
52d7eced
DV
293 /* HPD irq before everything is fully set up. */
294 if (!dev_priv->enable_hotplug_processing)
295 return;
296
a65e34c7 297 mutex_lock(&mode_config->mutex);
e67189ab
JB
298 DRM_DEBUG_KMS("running encoder hotplug functions\n");
299
4ef69c7a
CW
300 list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
301 if (encoder->hot_plug)
302 encoder->hot_plug(encoder);
303
40ee3381
KP
304 mutex_unlock(&mode_config->mutex);
305
5ca58282 306 /* Just fire off a uevent and let userspace tell us what to do */
eb1f8e4f 307 drm_helper_hpd_irq_event(dev);
5ca58282
JB
308}
309
73edd18f 310static void ironlake_handle_rps_change(struct drm_device *dev)
f97108d1
JB
311{
312 drm_i915_private_t *dev_priv = dev->dev_private;
b5b72e89 313 u32 busy_up, busy_down, max_avg, min_avg;
9270388e
DV
314 u8 new_delay;
315 unsigned long flags;
316
317 spin_lock_irqsave(&mchdev_lock, flags);
f97108d1 318
73edd18f
DV
319 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
320
20e4d407 321 new_delay = dev_priv->ips.cur_delay;
9270388e 322
7648fa99 323 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
b5b72e89
MG
324 busy_up = I915_READ(RCPREVBSYTUPAVG);
325 busy_down = I915_READ(RCPREVBSYTDNAVG);
f97108d1
JB
326 max_avg = I915_READ(RCBMAXAVG);
327 min_avg = I915_READ(RCBMINAVG);
328
329 /* Handle RCS change request from hw */
b5b72e89 330 if (busy_up > max_avg) {
20e4d407
DV
331 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
332 new_delay = dev_priv->ips.cur_delay - 1;
333 if (new_delay < dev_priv->ips.max_delay)
334 new_delay = dev_priv->ips.max_delay;
b5b72e89 335 } else if (busy_down < min_avg) {
20e4d407
DV
336 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
337 new_delay = dev_priv->ips.cur_delay + 1;
338 if (new_delay > dev_priv->ips.min_delay)
339 new_delay = dev_priv->ips.min_delay;
f97108d1
JB
340 }
341
7648fa99 342 if (ironlake_set_drps(dev, new_delay))
20e4d407 343 dev_priv->ips.cur_delay = new_delay;
f97108d1 344
9270388e
DV
345 spin_unlock_irqrestore(&mchdev_lock, flags);
346
f97108d1
JB
347 return;
348}
349
549f7365
CW
350static void notify_ring(struct drm_device *dev,
351 struct intel_ring_buffer *ring)
352{
353 struct drm_i915_private *dev_priv = dev->dev_private;
9862e600 354
475553de
CW
355 if (ring->obj == NULL)
356 return;
357
b2eadbc8 358 trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
9862e600 359
549f7365 360 wake_up_all(&ring->irq_queue);
3e0dc6b0 361 if (i915_enable_hangcheck) {
99584db3
DV
362 dev_priv->gpu_error.hangcheck_count = 0;
363 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
cecc21fe 364 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
3e0dc6b0 365 }
549f7365
CW
366}
367
4912d041 368static void gen6_pm_rps_work(struct work_struct *work)
3b8d8d91 369{
4912d041 370 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
c6a828d3 371 rps.work);
4912d041 372 u32 pm_iir, pm_imr;
7b9e0ae6 373 u8 new_delay;
4912d041 374
c6a828d3
DV
375 spin_lock_irq(&dev_priv->rps.lock);
376 pm_iir = dev_priv->rps.pm_iir;
377 dev_priv->rps.pm_iir = 0;
4912d041 378 pm_imr = I915_READ(GEN6_PMIMR);
a9e2641d 379 I915_WRITE(GEN6_PMIMR, 0);
c6a828d3 380 spin_unlock_irq(&dev_priv->rps.lock);
3b8d8d91 381
7b9e0ae6 382 if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
3b8d8d91
JB
383 return;
384
4fc688ce 385 mutex_lock(&dev_priv->rps.hw_lock);
7b9e0ae6
CW
386
387 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
c6a828d3 388 new_delay = dev_priv->rps.cur_delay + 1;
7b9e0ae6 389 else
c6a828d3 390 new_delay = dev_priv->rps.cur_delay - 1;
3b8d8d91 391
79249636
BW
392 /* sysfs frequency interfaces may have snuck in while servicing the
393 * interrupt
394 */
395 if (!(new_delay > dev_priv->rps.max_delay ||
396 new_delay < dev_priv->rps.min_delay)) {
397 gen6_set_rps(dev_priv->dev, new_delay);
398 }
3b8d8d91 399
4fc688ce 400 mutex_unlock(&dev_priv->rps.hw_lock);
3b8d8d91
JB
401}
402
e3689190
BW
403
404/**
405 * ivybridge_parity_work - Workqueue called when a parity error interrupt
406 * occurred.
407 * @work: workqueue struct
408 *
409 * Doesn't actually do anything except notify userspace. As a consequence of
410 * this event, userspace should try to remap the bad rows since statistically
411 * it is likely the same row is more likely to go bad again.
412 */
413static void ivybridge_parity_work(struct work_struct *work)
414{
415 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
a4da4fa4 416 l3_parity.error_work);
e3689190
BW
417 u32 error_status, row, bank, subbank;
418 char *parity_event[5];
419 uint32_t misccpctl;
420 unsigned long flags;
421
422 /* We must turn off DOP level clock gating to access the L3 registers.
423 * In order to prevent a get/put style interface, acquire struct mutex
424 * any time we access those registers.
425 */
426 mutex_lock(&dev_priv->dev->struct_mutex);
427
428 misccpctl = I915_READ(GEN7_MISCCPCTL);
429 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
430 POSTING_READ(GEN7_MISCCPCTL);
431
432 error_status = I915_READ(GEN7_L3CDERRST1);
433 row = GEN7_PARITY_ERROR_ROW(error_status);
434 bank = GEN7_PARITY_ERROR_BANK(error_status);
435 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
436
437 I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
438 GEN7_L3CDERRST1_ENABLE);
439 POSTING_READ(GEN7_L3CDERRST1);
440
441 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
442
443 spin_lock_irqsave(&dev_priv->irq_lock, flags);
444 dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
445 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
446 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
447
448 mutex_unlock(&dev_priv->dev->struct_mutex);
449
450 parity_event[0] = "L3_PARITY_ERROR=1";
451 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
452 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
453 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
454 parity_event[4] = NULL;
455
456 kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
457 KOBJ_CHANGE, parity_event);
458
459 DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
460 row, bank, subbank);
461
462 kfree(parity_event[3]);
463 kfree(parity_event[2]);
464 kfree(parity_event[1]);
465}
466
d2ba8470 467static void ivybridge_handle_parity_error(struct drm_device *dev)
e3689190
BW
468{
469 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
470 unsigned long flags;
471
e1ef7cc2 472 if (!HAS_L3_GPU_CACHE(dev))
e3689190
BW
473 return;
474
475 spin_lock_irqsave(&dev_priv->irq_lock, flags);
476 dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
477 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
478 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
479
a4da4fa4 480 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
e3689190
BW
481}
482
e7b4c6b1
DV
483static void snb_gt_irq_handler(struct drm_device *dev,
484 struct drm_i915_private *dev_priv,
485 u32 gt_iir)
486{
487
488 if (gt_iir & (GEN6_RENDER_USER_INTERRUPT |
489 GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT))
490 notify_ring(dev, &dev_priv->ring[RCS]);
491 if (gt_iir & GEN6_BSD_USER_INTERRUPT)
492 notify_ring(dev, &dev_priv->ring[VCS]);
493 if (gt_iir & GEN6_BLITTER_USER_INTERRUPT)
494 notify_ring(dev, &dev_priv->ring[BCS]);
495
496 if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT |
497 GT_GEN6_BSD_CS_ERROR_INTERRUPT |
498 GT_RENDER_CS_ERROR_INTERRUPT)) {
499 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
500 i915_handle_error(dev, false);
501 }
e3689190
BW
502
503 if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT)
504 ivybridge_handle_parity_error(dev);
e7b4c6b1
DV
505}
506
fc6826d1
CW
507static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
508 u32 pm_iir)
509{
510 unsigned long flags;
511
512 /*
513 * IIR bits should never already be set because IMR should
514 * prevent an interrupt from being shown in IIR. The warning
515 * displays a case where we've unsafely cleared
c6a828d3 516 * dev_priv->rps.pm_iir. Although missing an interrupt of the same
fc6826d1
CW
517 * type is not a problem, it displays a problem in the logic.
518 *
c6a828d3 519 * The mask bit in IMR is cleared by dev_priv->rps.work.
fc6826d1
CW
520 */
521
c6a828d3 522 spin_lock_irqsave(&dev_priv->rps.lock, flags);
c6a828d3
DV
523 dev_priv->rps.pm_iir |= pm_iir;
524 I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
fc6826d1 525 POSTING_READ(GEN6_PMIMR);
c6a828d3 526 spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
fc6826d1 527
c6a828d3 528 queue_work(dev_priv->wq, &dev_priv->rps.work);
fc6826d1
CW
529}
530
515ac2bb
DV
531static void gmbus_irq_handler(struct drm_device *dev)
532{
28c70f16
DV
533 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
534
28c70f16 535 wake_up_all(&dev_priv->gmbus_wait_queue);
515ac2bb
DV
536}
537
ce99c256
DV
538static void dp_aux_irq_handler(struct drm_device *dev)
539{
9ee32fea
DV
540 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
541
9ee32fea 542 wake_up_all(&dev_priv->gmbus_wait_queue);
ce99c256
DV
543}
544
ff1f525e 545static irqreturn_t valleyview_irq_handler(int irq, void *arg)
7e231dbe
JB
546{
547 struct drm_device *dev = (struct drm_device *) arg;
548 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
549 u32 iir, gt_iir, pm_iir;
550 irqreturn_t ret = IRQ_NONE;
551 unsigned long irqflags;
552 int pipe;
553 u32 pipe_stats[I915_MAX_PIPES];
7e231dbe
JB
554
555 atomic_inc(&dev_priv->irq_received);
556
7e231dbe
JB
557 while (true) {
558 iir = I915_READ(VLV_IIR);
559 gt_iir = I915_READ(GTIIR);
560 pm_iir = I915_READ(GEN6_PMIIR);
561
562 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
563 goto out;
564
565 ret = IRQ_HANDLED;
566
e7b4c6b1 567 snb_gt_irq_handler(dev, dev_priv, gt_iir);
7e231dbe
JB
568
569 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
570 for_each_pipe(pipe) {
571 int reg = PIPESTAT(pipe);
572 pipe_stats[pipe] = I915_READ(reg);
573
574 /*
575 * Clear the PIPE*STAT regs before the IIR
576 */
577 if (pipe_stats[pipe] & 0x8000ffff) {
578 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
579 DRM_DEBUG_DRIVER("pipe %c underrun\n",
580 pipe_name(pipe));
581 I915_WRITE(reg, pipe_stats[pipe]);
582 }
583 }
584 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
585
31acc7f5
JB
586 for_each_pipe(pipe) {
587 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
588 drm_handle_vblank(dev, pipe);
589
590 if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
591 intel_prepare_page_flip(dev, pipe);
592 intel_finish_page_flip(dev, pipe);
593 }
594 }
595
7e231dbe
JB
596 /* Consume port. Then clear IIR or we'll miss events */
597 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
598 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
599
600 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
601 hotplug_status);
602 if (hotplug_status & dev_priv->hotplug_supported_mask)
603 queue_work(dev_priv->wq,
604 &dev_priv->hotplug_work);
605
606 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
607 I915_READ(PORT_HOTPLUG_STAT);
608 }
609
515ac2bb
DV
610 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
611 gmbus_irq_handler(dev);
7e231dbe 612
fc6826d1
CW
613 if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
614 gen6_queue_rps_work(dev_priv, pm_iir);
7e231dbe
JB
615
616 I915_WRITE(GTIIR, gt_iir);
617 I915_WRITE(GEN6_PMIIR, pm_iir);
618 I915_WRITE(VLV_IIR, iir);
619 }
620
621out:
622 return ret;
623}
624
23e81d69 625static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
776ad806
JB
626{
627 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
9db4a9c7 628 int pipe;
776ad806 629
76e43830
DV
630 if (pch_iir & SDE_HOTPLUG_MASK)
631 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
632
776ad806
JB
633 if (pch_iir & SDE_AUDIO_POWER_MASK)
634 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
635 (pch_iir & SDE_AUDIO_POWER_MASK) >>
636 SDE_AUDIO_POWER_SHIFT);
637
ce99c256
DV
638 if (pch_iir & SDE_AUX_MASK)
639 dp_aux_irq_handler(dev);
640
776ad806 641 if (pch_iir & SDE_GMBUS)
515ac2bb 642 gmbus_irq_handler(dev);
776ad806
JB
643
644 if (pch_iir & SDE_AUDIO_HDCP_MASK)
645 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
646
647 if (pch_iir & SDE_AUDIO_TRANS_MASK)
648 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
649
650 if (pch_iir & SDE_POISON)
651 DRM_ERROR("PCH poison interrupt\n");
652
9db4a9c7
JB
653 if (pch_iir & SDE_FDI_MASK)
654 for_each_pipe(pipe)
655 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
656 pipe_name(pipe),
657 I915_READ(FDI_RX_IIR(pipe)));
776ad806
JB
658
659 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
660 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
661
662 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
663 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
664
665 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
666 DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n");
667 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
668 DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
669}
670
23e81d69
AJ
671static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
672{
673 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
674 int pipe;
675
76e43830
DV
676 if (pch_iir & SDE_HOTPLUG_MASK_CPT)
677 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
678
23e81d69
AJ
679 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
680 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
681 (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
682 SDE_AUDIO_POWER_SHIFT_CPT);
683
684 if (pch_iir & SDE_AUX_MASK_CPT)
ce99c256 685 dp_aux_irq_handler(dev);
23e81d69
AJ
686
687 if (pch_iir & SDE_GMBUS_CPT)
515ac2bb 688 gmbus_irq_handler(dev);
23e81d69
AJ
689
690 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
691 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
692
693 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
694 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
695
696 if (pch_iir & SDE_FDI_MASK_CPT)
697 for_each_pipe(pipe)
698 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
699 pipe_name(pipe),
700 I915_READ(FDI_RX_IIR(pipe)));
701}
702
ff1f525e 703static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
b1f14ad0
JB
704{
705 struct drm_device *dev = (struct drm_device *) arg;
706 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
44498aea 707 u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
0e43406b
CW
708 irqreturn_t ret = IRQ_NONE;
709 int i;
b1f14ad0
JB
710
711 atomic_inc(&dev_priv->irq_received);
712
713 /* disable master interrupt before clearing iir */
714 de_ier = I915_READ(DEIER);
715 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
b1f14ad0 716
44498aea
PZ
717 /* Disable south interrupts. We'll only write to SDEIIR once, so further
718 * interrupts will will be stored on its back queue, and then we'll be
719 * able to process them after we restore SDEIER (as soon as we restore
720 * it, we'll get an interrupt if SDEIIR still has something to process
721 * due to its back queue). */
722 sde_ier = I915_READ(SDEIER);
723 I915_WRITE(SDEIER, 0);
724 POSTING_READ(SDEIER);
725
b1f14ad0 726 gt_iir = I915_READ(GTIIR);
0e43406b
CW
727 if (gt_iir) {
728 snb_gt_irq_handler(dev, dev_priv, gt_iir);
729 I915_WRITE(GTIIR, gt_iir);
730 ret = IRQ_HANDLED;
b1f14ad0
JB
731 }
732
0e43406b
CW
733 de_iir = I915_READ(DEIIR);
734 if (de_iir) {
ce99c256
DV
735 if (de_iir & DE_AUX_CHANNEL_A_IVB)
736 dp_aux_irq_handler(dev);
737
0e43406b
CW
738 if (de_iir & DE_GSE_IVB)
739 intel_opregion_gse_intr(dev);
740
741 for (i = 0; i < 3; i++) {
74d44445
DV
742 if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
743 drm_handle_vblank(dev, i);
0e43406b
CW
744 if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
745 intel_prepare_page_flip(dev, i);
746 intel_finish_page_flip_plane(dev, i);
747 }
0e43406b 748 }
b615b57a 749
0e43406b
CW
750 /* check event from PCH */
751 if (de_iir & DE_PCH_EVENT_IVB) {
752 u32 pch_iir = I915_READ(SDEIIR);
b1f14ad0 753
23e81d69 754 cpt_irq_handler(dev, pch_iir);
b1f14ad0 755
0e43406b
CW
756 /* clear PCH hotplug event before clear CPU irq */
757 I915_WRITE(SDEIIR, pch_iir);
758 }
b615b57a 759
0e43406b
CW
760 I915_WRITE(DEIIR, de_iir);
761 ret = IRQ_HANDLED;
b1f14ad0
JB
762 }
763
0e43406b
CW
764 pm_iir = I915_READ(GEN6_PMIIR);
765 if (pm_iir) {
766 if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
767 gen6_queue_rps_work(dev_priv, pm_iir);
768 I915_WRITE(GEN6_PMIIR, pm_iir);
769 ret = IRQ_HANDLED;
770 }
b1f14ad0 771
b1f14ad0
JB
772 I915_WRITE(DEIER, de_ier);
773 POSTING_READ(DEIER);
44498aea
PZ
774 I915_WRITE(SDEIER, sde_ier);
775 POSTING_READ(SDEIER);
b1f14ad0
JB
776
777 return ret;
778}
779
e7b4c6b1
DV
780static void ilk_gt_irq_handler(struct drm_device *dev,
781 struct drm_i915_private *dev_priv,
782 u32 gt_iir)
783{
784 if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
785 notify_ring(dev, &dev_priv->ring[RCS]);
786 if (gt_iir & GT_BSD_USER_INTERRUPT)
787 notify_ring(dev, &dev_priv->ring[VCS]);
788}
789
ff1f525e 790static irqreturn_t ironlake_irq_handler(int irq, void *arg)
036a4a7d 791{
4697995b 792 struct drm_device *dev = (struct drm_device *) arg;
036a4a7d
ZW
793 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
794 int ret = IRQ_NONE;
44498aea 795 u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
881f47b6 796
4697995b
JB
797 atomic_inc(&dev_priv->irq_received);
798
2d109a84
ZN
799 /* disable master interrupt before clearing iir */
800 de_ier = I915_READ(DEIER);
801 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
3143a2bf 802 POSTING_READ(DEIER);
2d109a84 803
44498aea
PZ
804 /* Disable south interrupts. We'll only write to SDEIIR once, so further
805 * interrupts will will be stored on its back queue, and then we'll be
806 * able to process them after we restore SDEIER (as soon as we restore
807 * it, we'll get an interrupt if SDEIIR still has something to process
808 * due to its back queue). */
809 sde_ier = I915_READ(SDEIER);
810 I915_WRITE(SDEIER, 0);
811 POSTING_READ(SDEIER);
812
036a4a7d
ZW
813 de_iir = I915_READ(DEIIR);
814 gt_iir = I915_READ(GTIIR);
3b8d8d91 815 pm_iir = I915_READ(GEN6_PMIIR);
036a4a7d 816
acd15b6c 817 if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0))
c7c85101 818 goto done;
036a4a7d 819
c7c85101 820 ret = IRQ_HANDLED;
036a4a7d 821
e7b4c6b1
DV
822 if (IS_GEN5(dev))
823 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
824 else
825 snb_gt_irq_handler(dev, dev_priv, gt_iir);
01c66889 826
ce99c256
DV
827 if (de_iir & DE_AUX_CHANNEL_A)
828 dp_aux_irq_handler(dev);
829
c7c85101 830 if (de_iir & DE_GSE)
3b617967 831 intel_opregion_gse_intr(dev);
c650156a 832
74d44445
DV
833 if (de_iir & DE_PIPEA_VBLANK)
834 drm_handle_vblank(dev, 0);
835
836 if (de_iir & DE_PIPEB_VBLANK)
837 drm_handle_vblank(dev, 1);
838
f072d2e7 839 if (de_iir & DE_PLANEA_FLIP_DONE) {
013d5aa2 840 intel_prepare_page_flip(dev, 0);
2bbda389 841 intel_finish_page_flip_plane(dev, 0);
f072d2e7 842 }
013d5aa2 843
f072d2e7 844 if (de_iir & DE_PLANEB_FLIP_DONE) {
013d5aa2 845 intel_prepare_page_flip(dev, 1);
2bbda389 846 intel_finish_page_flip_plane(dev, 1);
f072d2e7 847 }
013d5aa2 848
c7c85101 849 /* check event from PCH */
776ad806 850 if (de_iir & DE_PCH_EVENT) {
acd15b6c
DV
851 u32 pch_iir = I915_READ(SDEIIR);
852
23e81d69
AJ
853 if (HAS_PCH_CPT(dev))
854 cpt_irq_handler(dev, pch_iir);
855 else
856 ibx_irq_handler(dev, pch_iir);
acd15b6c
DV
857
858 /* should clear PCH hotplug event before clear CPU irq */
859 I915_WRITE(SDEIIR, pch_iir);
776ad806 860 }
036a4a7d 861
73edd18f
DV
862 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
863 ironlake_handle_rps_change(dev);
f97108d1 864
fc6826d1
CW
865 if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
866 gen6_queue_rps_work(dev_priv, pm_iir);
3b8d8d91 867
c7c85101
ZN
868 I915_WRITE(GTIIR, gt_iir);
869 I915_WRITE(DEIIR, de_iir);
4912d041 870 I915_WRITE(GEN6_PMIIR, pm_iir);
c7c85101
ZN
871
872done:
2d109a84 873 I915_WRITE(DEIER, de_ier);
3143a2bf 874 POSTING_READ(DEIER);
44498aea
PZ
875 I915_WRITE(SDEIER, sde_ier);
876 POSTING_READ(SDEIER);
2d109a84 877
036a4a7d
ZW
878 return ret;
879}
880
8a905236
JB
881/**
882 * i915_error_work_func - do process context error handling work
883 * @work: work struct
884 *
885 * Fire an error uevent so userspace can see that a hang or error
886 * was detected.
887 */
888static void i915_error_work_func(struct work_struct *work)
889{
1f83fee0
DV
890 struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
891 work);
892 drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
893 gpu_error);
8a905236 894 struct drm_device *dev = dev_priv->dev;
f69061be 895 struct intel_ring_buffer *ring;
f316a42c
BG
896 char *error_event[] = { "ERROR=1", NULL };
897 char *reset_event[] = { "RESET=1", NULL };
898 char *reset_done_event[] = { "ERROR=0", NULL };
f69061be 899 int i, ret;
8a905236 900
f316a42c
BG
901 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
902
7db0ba24
DV
903 /*
904 * Note that there's only one work item which does gpu resets, so we
905 * need not worry about concurrent gpu resets potentially incrementing
906 * error->reset_counter twice. We only need to take care of another
907 * racing irq/hangcheck declaring the gpu dead for a second time. A
908 * quick check for that is good enough: schedule_work ensures the
909 * correct ordering between hang detection and this work item, and since
910 * the reset in-progress bit is only ever set by code outside of this
911 * work we don't need to worry about any other races.
912 */
913 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
f803aa55 914 DRM_DEBUG_DRIVER("resetting chip\n");
7db0ba24
DV
915 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
916 reset_event);
1f83fee0 917
f69061be
DV
918 ret = i915_reset(dev);
919
920 if (ret == 0) {
921 /*
922 * After all the gem state is reset, increment the reset
923 * counter and wake up everyone waiting for the reset to
924 * complete.
925 *
926 * Since unlock operations are a one-sided barrier only,
927 * we need to insert a barrier here to order any seqno
928 * updates before
929 * the counter increment.
930 */
931 smp_mb__before_atomic_inc();
932 atomic_inc(&dev_priv->gpu_error.reset_counter);
933
934 kobject_uevent_env(&dev->primary->kdev.kobj,
935 KOBJ_CHANGE, reset_done_event);
1f83fee0
DV
936 } else {
937 atomic_set(&error->reset_counter, I915_WEDGED);
f316a42c 938 }
1f83fee0 939
f69061be
DV
940 for_each_ring(ring, dev_priv, i)
941 wake_up_all(&ring->irq_queue);
942
96a02917
VS
943 intel_display_handle_reset(dev);
944
1f83fee0 945 wake_up_all(&dev_priv->gpu_error.reset_queue);
f316a42c 946 }
8a905236
JB
947}
948
85f9e50d
DV
949/* NB: please notice the memset */
950static void i915_get_extra_instdone(struct drm_device *dev,
951 uint32_t *instdone)
952{
953 struct drm_i915_private *dev_priv = dev->dev_private;
954 memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
955
956 switch(INTEL_INFO(dev)->gen) {
957 case 2:
958 case 3:
959 instdone[0] = I915_READ(INSTDONE);
960 break;
961 case 4:
962 case 5:
963 case 6:
964 instdone[0] = I915_READ(INSTDONE_I965);
965 instdone[1] = I915_READ(INSTDONE1);
966 break;
967 default:
968 WARN_ONCE(1, "Unsupported platform\n");
969 case 7:
970 instdone[0] = I915_READ(GEN7_INSTDONE_1);
971 instdone[1] = I915_READ(GEN7_SC_INSTDONE);
972 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
973 instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
974 break;
975 }
976}
977
3bd3c932 978#ifdef CONFIG_DEBUG_FS
9df30794 979static struct drm_i915_error_object *
d0d045e8
BW
980i915_error_object_create_sized(struct drm_i915_private *dev_priv,
981 struct drm_i915_gem_object *src,
982 const int num_pages)
9df30794
CW
983{
984 struct drm_i915_error_object *dst;
d0d045e8 985 int i;
e56660dd 986 u32 reloc_offset;
9df30794 987
05394f39 988 if (src == NULL || src->pages == NULL)
9df30794
CW
989 return NULL;
990
d0d045e8 991 dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
9df30794
CW
992 if (dst == NULL)
993 return NULL;
994
05394f39 995 reloc_offset = src->gtt_offset;
d0d045e8 996 for (i = 0; i < num_pages; i++) {
788885ae 997 unsigned long flags;
e56660dd 998 void *d;
788885ae 999
e56660dd 1000 d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
9df30794
CW
1001 if (d == NULL)
1002 goto unwind;
e56660dd 1003
788885ae 1004 local_irq_save(flags);
5d4545ae 1005 if (reloc_offset < dev_priv->gtt.mappable_end &&
74898d7e 1006 src->has_global_gtt_mapping) {
172975aa
CW
1007 void __iomem *s;
1008
1009 /* Simply ignore tiling or any overlapping fence.
1010 * It's part of the error state, and this hopefully
1011 * captures what the GPU read.
1012 */
1013
5d4545ae 1014 s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
172975aa
CW
1015 reloc_offset);
1016 memcpy_fromio(d, s, PAGE_SIZE);
1017 io_mapping_unmap_atomic(s);
960e3564
CW
1018 } else if (src->stolen) {
1019 unsigned long offset;
1020
1021 offset = dev_priv->mm.stolen_base;
1022 offset += src->stolen->start;
1023 offset += i << PAGE_SHIFT;
1024
1a240d4d 1025 memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
172975aa 1026 } else {
9da3da66 1027 struct page *page;
172975aa
CW
1028 void *s;
1029
9da3da66 1030 page = i915_gem_object_get_page(src, i);
172975aa 1031
9da3da66
CW
1032 drm_clflush_pages(&page, 1);
1033
1034 s = kmap_atomic(page);
172975aa
CW
1035 memcpy(d, s, PAGE_SIZE);
1036 kunmap_atomic(s);
1037
9da3da66 1038 drm_clflush_pages(&page, 1);
172975aa 1039 }
788885ae 1040 local_irq_restore(flags);
e56660dd 1041
9da3da66 1042 dst->pages[i] = d;
e56660dd
CW
1043
1044 reloc_offset += PAGE_SIZE;
9df30794 1045 }
d0d045e8 1046 dst->page_count = num_pages;
05394f39 1047 dst->gtt_offset = src->gtt_offset;
9df30794
CW
1048
1049 return dst;
1050
1051unwind:
9da3da66
CW
1052 while (i--)
1053 kfree(dst->pages[i]);
9df30794
CW
1054 kfree(dst);
1055 return NULL;
1056}
d0d045e8
BW
1057#define i915_error_object_create(dev_priv, src) \
1058 i915_error_object_create_sized((dev_priv), (src), \
1059 (src)->base.size>>PAGE_SHIFT)
9df30794
CW
1060
1061static void
1062i915_error_object_free(struct drm_i915_error_object *obj)
1063{
1064 int page;
1065
1066 if (obj == NULL)
1067 return;
1068
1069 for (page = 0; page < obj->page_count; page++)
1070 kfree(obj->pages[page]);
1071
1072 kfree(obj);
1073}
1074
742cbee8
DV
1075void
1076i915_error_state_free(struct kref *error_ref)
9df30794 1077{
742cbee8
DV
1078 struct drm_i915_error_state *error = container_of(error_ref,
1079 typeof(*error), ref);
e2f973d5
CW
1080 int i;
1081
52d39a21
CW
1082 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
1083 i915_error_object_free(error->ring[i].batchbuffer);
1084 i915_error_object_free(error->ring[i].ringbuffer);
1085 kfree(error->ring[i].requests);
1086 }
e2f973d5 1087
9df30794 1088 kfree(error->active_bo);
6ef3d427 1089 kfree(error->overlay);
9df30794
CW
1090 kfree(error);
1091}
1b50247a
CW
1092static void capture_bo(struct drm_i915_error_buffer *err,
1093 struct drm_i915_gem_object *obj)
1094{
1095 err->size = obj->base.size;
1096 err->name = obj->base.name;
0201f1ec
CW
1097 err->rseqno = obj->last_read_seqno;
1098 err->wseqno = obj->last_write_seqno;
1b50247a
CW
1099 err->gtt_offset = obj->gtt_offset;
1100 err->read_domains = obj->base.read_domains;
1101 err->write_domain = obj->base.write_domain;
1102 err->fence_reg = obj->fence_reg;
1103 err->pinned = 0;
1104 if (obj->pin_count > 0)
1105 err->pinned = 1;
1106 if (obj->user_pin_count > 0)
1107 err->pinned = -1;
1108 err->tiling = obj->tiling_mode;
1109 err->dirty = obj->dirty;
1110 err->purgeable = obj->madv != I915_MADV_WILLNEED;
1111 err->ring = obj->ring ? obj->ring->id : -1;
1112 err->cache_level = obj->cache_level;
1113}
9df30794 1114
1b50247a
CW
1115static u32 capture_active_bo(struct drm_i915_error_buffer *err,
1116 int count, struct list_head *head)
c724e8a9
CW
1117{
1118 struct drm_i915_gem_object *obj;
1119 int i = 0;
1120
1121 list_for_each_entry(obj, head, mm_list) {
1b50247a 1122 capture_bo(err++, obj);
c724e8a9
CW
1123 if (++i == count)
1124 break;
1b50247a
CW
1125 }
1126
1127 return i;
1128}
1129
1130static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
1131 int count, struct list_head *head)
1132{
1133 struct drm_i915_gem_object *obj;
1134 int i = 0;
1135
1136 list_for_each_entry(obj, head, gtt_list) {
1137 if (obj->pin_count == 0)
1138 continue;
c724e8a9 1139
1b50247a
CW
1140 capture_bo(err++, obj);
1141 if (++i == count)
1142 break;
c724e8a9
CW
1143 }
1144
1145 return i;
1146}
1147
748ebc60
CW
1148static void i915_gem_record_fences(struct drm_device *dev,
1149 struct drm_i915_error_state *error)
1150{
1151 struct drm_i915_private *dev_priv = dev->dev_private;
1152 int i;
1153
1154 /* Fences */
1155 switch (INTEL_INFO(dev)->gen) {
775d17b6 1156 case 7:
748ebc60
CW
1157 case 6:
1158 for (i = 0; i < 16; i++)
1159 error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
1160 break;
1161 case 5:
1162 case 4:
1163 for (i = 0; i < 16; i++)
1164 error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
1165 break;
1166 case 3:
1167 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
1168 for (i = 0; i < 8; i++)
1169 error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
1170 case 2:
1171 for (i = 0; i < 8; i++)
1172 error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
1173 break;
1174
7dbf9d6e
BW
1175 default:
1176 BUG();
748ebc60
CW
1177 }
1178}
1179
bcfb2e28
CW
1180static struct drm_i915_error_object *
1181i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
1182 struct intel_ring_buffer *ring)
1183{
1184 struct drm_i915_gem_object *obj;
1185 u32 seqno;
1186
1187 if (!ring->get_seqno)
1188 return NULL;
1189
b45305fc
DV
1190 if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
1191 u32 acthd = I915_READ(ACTHD);
1192
1193 if (WARN_ON(ring->id != RCS))
1194 return NULL;
1195
1196 obj = ring->private;
1197 if (acthd >= obj->gtt_offset &&
1198 acthd < obj->gtt_offset + obj->base.size)
1199 return i915_error_object_create(dev_priv, obj);
1200 }
1201
b2eadbc8 1202 seqno = ring->get_seqno(ring, false);
bcfb2e28
CW
1203 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
1204 if (obj->ring != ring)
1205 continue;
1206
0201f1ec 1207 if (i915_seqno_passed(seqno, obj->last_read_seqno))
bcfb2e28
CW
1208 continue;
1209
1210 if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
1211 continue;
1212
1213 /* We need to copy these to an anonymous buffer as the simplest
1214 * method to avoid being overwritten by userspace.
1215 */
1216 return i915_error_object_create(dev_priv, obj);
1217 }
1218
1219 return NULL;
1220}
1221
d27b1e0e
DV
1222static void i915_record_ring_state(struct drm_device *dev,
1223 struct drm_i915_error_state *error,
1224 struct intel_ring_buffer *ring)
1225{
1226 struct drm_i915_private *dev_priv = dev->dev_private;
1227
33f3f518 1228 if (INTEL_INFO(dev)->gen >= 6) {
12f55818 1229 error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
33f3f518 1230 error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
7e3b8737
DV
1231 error->semaphore_mboxes[ring->id][0]
1232 = I915_READ(RING_SYNC_0(ring->mmio_base));
1233 error->semaphore_mboxes[ring->id][1]
1234 = I915_READ(RING_SYNC_1(ring->mmio_base));
df2b23d9
CW
1235 error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
1236 error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
33f3f518 1237 }
c1cd90ed 1238
d27b1e0e 1239 if (INTEL_INFO(dev)->gen >= 4) {
9d2f41fa 1240 error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
d27b1e0e
DV
1241 error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
1242 error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
1243 error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
c1cd90ed 1244 error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
050ee91f 1245 if (ring->id == RCS)
d27b1e0e 1246 error->bbaddr = I915_READ64(BB_ADDR);
d27b1e0e 1247 } else {
9d2f41fa 1248 error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
d27b1e0e
DV
1249 error->ipeir[ring->id] = I915_READ(IPEIR);
1250 error->ipehr[ring->id] = I915_READ(IPEHR);
1251 error->instdone[ring->id] = I915_READ(INSTDONE);
d27b1e0e
DV
1252 }
1253
9574b3fe 1254 error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
c1cd90ed 1255 error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
b2eadbc8 1256 error->seqno[ring->id] = ring->get_seqno(ring, false);
d27b1e0e 1257 error->acthd[ring->id] = intel_ring_get_active_head(ring);
c1cd90ed
DV
1258 error->head[ring->id] = I915_READ_HEAD(ring);
1259 error->tail[ring->id] = I915_READ_TAIL(ring);
0f3b6849 1260 error->ctl[ring->id] = I915_READ_CTL(ring);
7e3b8737
DV
1261
1262 error->cpu_ring_head[ring->id] = ring->head;
1263 error->cpu_ring_tail[ring->id] = ring->tail;
d27b1e0e
DV
1264}
1265
8c123e54
BW
1266
1267static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
1268 struct drm_i915_error_state *error,
1269 struct drm_i915_error_ring *ering)
1270{
1271 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1272 struct drm_i915_gem_object *obj;
1273
1274 /* Currently render ring is the only HW context user */
1275 if (ring->id != RCS || !error->ccid)
1276 return;
1277
1278 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
1279 if ((error->ccid & PAGE_MASK) == obj->gtt_offset) {
1280 ering->ctx = i915_error_object_create_sized(dev_priv,
1281 obj, 1);
1282 }
1283 }
1284}
1285
52d39a21
CW
1286static void i915_gem_record_rings(struct drm_device *dev,
1287 struct drm_i915_error_state *error)
1288{
1289 struct drm_i915_private *dev_priv = dev->dev_private;
b4519513 1290 struct intel_ring_buffer *ring;
52d39a21
CW
1291 struct drm_i915_gem_request *request;
1292 int i, count;
1293
b4519513 1294 for_each_ring(ring, dev_priv, i) {
52d39a21
CW
1295 i915_record_ring_state(dev, error, ring);
1296
1297 error->ring[i].batchbuffer =
1298 i915_error_first_batchbuffer(dev_priv, ring);
1299
1300 error->ring[i].ringbuffer =
1301 i915_error_object_create(dev_priv, ring->obj);
1302
8c123e54
BW
1303
1304 i915_gem_record_active_context(ring, error, &error->ring[i]);
1305
52d39a21
CW
1306 count = 0;
1307 list_for_each_entry(request, &ring->request_list, list)
1308 count++;
1309
1310 error->ring[i].num_requests = count;
1311 error->ring[i].requests =
1312 kmalloc(count*sizeof(struct drm_i915_error_request),
1313 GFP_ATOMIC);
1314 if (error->ring[i].requests == NULL) {
1315 error->ring[i].num_requests = 0;
1316 continue;
1317 }
1318
1319 count = 0;
1320 list_for_each_entry(request, &ring->request_list, list) {
1321 struct drm_i915_error_request *erq;
1322
1323 erq = &error->ring[i].requests[count++];
1324 erq->seqno = request->seqno;
1325 erq->jiffies = request->emitted_jiffies;
ee4f42b1 1326 erq->tail = request->tail;
52d39a21
CW
1327 }
1328 }
1329}
1330
8a905236
JB
1331/**
1332 * i915_capture_error_state - capture an error record for later analysis
1333 * @dev: drm device
1334 *
1335 * Should be called when an error is detected (either a hang or an error
1336 * interrupt) to capture error state from the time of the error. Fills
1337 * out a structure which becomes available in debugfs for user level tools
1338 * to pick up.
1339 */
63eeaf38
JB
1340static void i915_capture_error_state(struct drm_device *dev)
1341{
1342 struct drm_i915_private *dev_priv = dev->dev_private;
05394f39 1343 struct drm_i915_gem_object *obj;
63eeaf38
JB
1344 struct drm_i915_error_state *error;
1345 unsigned long flags;
9db4a9c7 1346 int i, pipe;
63eeaf38 1347
99584db3
DV
1348 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1349 error = dev_priv->gpu_error.first_error;
1350 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
9df30794
CW
1351 if (error)
1352 return;
63eeaf38 1353
9db4a9c7 1354 /* Account for pipe specific data like PIPE*STAT */
33f3f518 1355 error = kzalloc(sizeof(*error), GFP_ATOMIC);
63eeaf38 1356 if (!error) {
9df30794
CW
1357 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1358 return;
63eeaf38
JB
1359 }
1360
5d83d294 1361 DRM_INFO("capturing error event; look for more information in "
2f86f191 1362 "/sys/kernel/debug/dri/%d/i915_error_state\n",
b6f7833b 1363 dev->primary->index);
2fa772f3 1364
742cbee8 1365 kref_init(&error->ref);
63eeaf38
JB
1366 error->eir = I915_READ(EIR);
1367 error->pgtbl_er = I915_READ(PGTBL_ER);
211816ec
BW
1368 if (HAS_HW_CONTEXTS(dev))
1369 error->ccid = I915_READ(CCID);
be998e2e
BW
1370
1371 if (HAS_PCH_SPLIT(dev))
1372 error->ier = I915_READ(DEIER) | I915_READ(GTIER);
1373 else if (IS_VALLEYVIEW(dev))
1374 error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
1375 else if (IS_GEN2(dev))
1376 error->ier = I915_READ16(IER);
1377 else
1378 error->ier = I915_READ(IER);
1379
0f3b6849
CW
1380 if (INTEL_INFO(dev)->gen >= 6)
1381 error->derrmr = I915_READ(DERRMR);
1382
1383 if (IS_VALLEYVIEW(dev))
1384 error->forcewake = I915_READ(FORCEWAKE_VLV);
1385 else if (INTEL_INFO(dev)->gen >= 7)
1386 error->forcewake = I915_READ(FORCEWAKE_MT);
1387 else if (INTEL_INFO(dev)->gen == 6)
1388 error->forcewake = I915_READ(FORCEWAKE);
1389
4f3308b9
PZ
1390 if (!HAS_PCH_SPLIT(dev))
1391 for_each_pipe(pipe)
1392 error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
d27b1e0e 1393
33f3f518 1394 if (INTEL_INFO(dev)->gen >= 6) {
f406839f 1395 error->error = I915_READ(ERROR_GEN6);
33f3f518
DV
1396 error->done_reg = I915_READ(DONE_REG);
1397 }
d27b1e0e 1398
71e172e8
BW
1399 if (INTEL_INFO(dev)->gen == 7)
1400 error->err_int = I915_READ(GEN7_ERR_INT);
1401
050ee91f
BW
1402 i915_get_extra_instdone(dev, error->extra_instdone);
1403
748ebc60 1404 i915_gem_record_fences(dev, error);
52d39a21 1405 i915_gem_record_rings(dev, error);
9df30794 1406
c724e8a9 1407 /* Record buffers on the active and pinned lists. */
9df30794 1408 error->active_bo = NULL;
c724e8a9 1409 error->pinned_bo = NULL;
9df30794 1410
bcfb2e28
CW
1411 i = 0;
1412 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
1413 i++;
1414 error->active_bo_count = i;
6c085a72 1415 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
1b50247a
CW
1416 if (obj->pin_count)
1417 i++;
bcfb2e28 1418 error->pinned_bo_count = i - error->active_bo_count;
c724e8a9 1419
8e934dbf
CW
1420 error->active_bo = NULL;
1421 error->pinned_bo = NULL;
bcfb2e28
CW
1422 if (i) {
1423 error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
9df30794 1424 GFP_ATOMIC);
c724e8a9
CW
1425 if (error->active_bo)
1426 error->pinned_bo =
1427 error->active_bo + error->active_bo_count;
9df30794
CW
1428 }
1429
c724e8a9
CW
1430 if (error->active_bo)
1431 error->active_bo_count =
1b50247a
CW
1432 capture_active_bo(error->active_bo,
1433 error->active_bo_count,
1434 &dev_priv->mm.active_list);
c724e8a9
CW
1435
1436 if (error->pinned_bo)
1437 error->pinned_bo_count =
1b50247a
CW
1438 capture_pinned_bo(error->pinned_bo,
1439 error->pinned_bo_count,
6c085a72 1440 &dev_priv->mm.bound_list);
c724e8a9 1441
9df30794
CW
1442 do_gettimeofday(&error->time);
1443
6ef3d427 1444 error->overlay = intel_overlay_capture_error_state(dev);
c4a1d9e4 1445 error->display = intel_display_capture_error_state(dev);
6ef3d427 1446
99584db3
DV
1447 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1448 if (dev_priv->gpu_error.first_error == NULL) {
1449 dev_priv->gpu_error.first_error = error;
9df30794
CW
1450 error = NULL;
1451 }
99584db3 1452 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
9df30794
CW
1453
1454 if (error)
742cbee8 1455 i915_error_state_free(&error->ref);
9df30794
CW
1456}
1457
1458void i915_destroy_error_state(struct drm_device *dev)
1459{
1460 struct drm_i915_private *dev_priv = dev->dev_private;
1461 struct drm_i915_error_state *error;
6dc0e816 1462 unsigned long flags;
9df30794 1463
99584db3
DV
1464 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1465 error = dev_priv->gpu_error.first_error;
1466 dev_priv->gpu_error.first_error = NULL;
1467 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
9df30794
CW
1468
1469 if (error)
742cbee8 1470 kref_put(&error->ref, i915_error_state_free);
63eeaf38 1471}
3bd3c932
CW
1472#else
1473#define i915_capture_error_state(x)
1474#endif
63eeaf38 1475
35aed2e6 1476static void i915_report_and_clear_eir(struct drm_device *dev)
8a905236
JB
1477{
1478 struct drm_i915_private *dev_priv = dev->dev_private;
bd9854f9 1479 uint32_t instdone[I915_NUM_INSTDONE_REG];
8a905236 1480 u32 eir = I915_READ(EIR);
050ee91f 1481 int pipe, i;
8a905236 1482
35aed2e6
CW
1483 if (!eir)
1484 return;
8a905236 1485
a70491cc 1486 pr_err("render error detected, EIR: 0x%08x\n", eir);
8a905236 1487
bd9854f9
BW
1488 i915_get_extra_instdone(dev, instdone);
1489
8a905236
JB
1490 if (IS_G4X(dev)) {
1491 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
1492 u32 ipeir = I915_READ(IPEIR_I965);
1493
a70491cc
JP
1494 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1495 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
050ee91f
BW
1496 for (i = 0; i < ARRAY_SIZE(instdone); i++)
1497 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
a70491cc 1498 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
a70491cc 1499 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
8a905236 1500 I915_WRITE(IPEIR_I965, ipeir);
3143a2bf 1501 POSTING_READ(IPEIR_I965);
8a905236
JB
1502 }
1503 if (eir & GM45_ERROR_PAGE_TABLE) {
1504 u32 pgtbl_err = I915_READ(PGTBL_ER);
a70491cc
JP
1505 pr_err("page table error\n");
1506 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
8a905236 1507 I915_WRITE(PGTBL_ER, pgtbl_err);
3143a2bf 1508 POSTING_READ(PGTBL_ER);
8a905236
JB
1509 }
1510 }
1511
a6c45cf0 1512 if (!IS_GEN2(dev)) {
8a905236
JB
1513 if (eir & I915_ERROR_PAGE_TABLE) {
1514 u32 pgtbl_err = I915_READ(PGTBL_ER);
a70491cc
JP
1515 pr_err("page table error\n");
1516 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
8a905236 1517 I915_WRITE(PGTBL_ER, pgtbl_err);
3143a2bf 1518 POSTING_READ(PGTBL_ER);
8a905236
JB
1519 }
1520 }
1521
1522 if (eir & I915_ERROR_MEMORY_REFRESH) {
a70491cc 1523 pr_err("memory refresh error:\n");
9db4a9c7 1524 for_each_pipe(pipe)
a70491cc 1525 pr_err("pipe %c stat: 0x%08x\n",
9db4a9c7 1526 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
8a905236
JB
1527 /* pipestat has already been acked */
1528 }
1529 if (eir & I915_ERROR_INSTRUCTION) {
a70491cc
JP
1530 pr_err("instruction error\n");
1531 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
050ee91f
BW
1532 for (i = 0; i < ARRAY_SIZE(instdone); i++)
1533 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
a6c45cf0 1534 if (INTEL_INFO(dev)->gen < 4) {
8a905236
JB
1535 u32 ipeir = I915_READ(IPEIR);
1536
a70491cc
JP
1537 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
1538 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
a70491cc 1539 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
8a905236 1540 I915_WRITE(IPEIR, ipeir);
3143a2bf 1541 POSTING_READ(IPEIR);
8a905236
JB
1542 } else {
1543 u32 ipeir = I915_READ(IPEIR_I965);
1544
a70491cc
JP
1545 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1546 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
a70491cc 1547 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
a70491cc 1548 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
8a905236 1549 I915_WRITE(IPEIR_I965, ipeir);
3143a2bf 1550 POSTING_READ(IPEIR_I965);
8a905236
JB
1551 }
1552 }
1553
1554 I915_WRITE(EIR, eir);
3143a2bf 1555 POSTING_READ(EIR);
8a905236
JB
1556 eir = I915_READ(EIR);
1557 if (eir) {
1558 /*
1559 * some errors might have become stuck,
1560 * mask them.
1561 */
1562 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
1563 I915_WRITE(EMR, I915_READ(EMR) | eir);
1564 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
1565 }
35aed2e6
CW
1566}
1567
1568/**
1569 * i915_handle_error - handle an error interrupt
1570 * @dev: drm device
1571 *
1572 * Do some basic checking of regsiter state at error interrupt time and
1573 * dump it to the syslog. Also call i915_capture_error_state() to make
1574 * sure we get a record and make it available in debugfs. Fire a uevent
1575 * so userspace knows something bad happened (should trigger collection
1576 * of a ring dump etc.).
1577 */
527f9e90 1578void i915_handle_error(struct drm_device *dev, bool wedged)
35aed2e6
CW
1579{
1580 struct drm_i915_private *dev_priv = dev->dev_private;
b4519513
CW
1581 struct intel_ring_buffer *ring;
1582 int i;
35aed2e6
CW
1583
1584 i915_capture_error_state(dev);
1585 i915_report_and_clear_eir(dev);
8a905236 1586
ba1234d1 1587 if (wedged) {
f69061be
DV
1588 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
1589 &dev_priv->gpu_error.reset_counter);
ba1234d1 1590
11ed50ec 1591 /*
1f83fee0
DV
1592 * Wakeup waiting processes so that the reset work item
1593 * doesn't deadlock trying to grab various locks.
11ed50ec 1594 */
b4519513
CW
1595 for_each_ring(ring, dev_priv, i)
1596 wake_up_all(&ring->irq_queue);
11ed50ec
BG
1597 }
1598
99584db3 1599 queue_work(dev_priv->wq, &dev_priv->gpu_error.work);
8a905236
JB
1600}
1601
21ad8330 1602static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
4e5359cd
SF
1603{
1604 drm_i915_private_t *dev_priv = dev->dev_private;
1605 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1606 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
05394f39 1607 struct drm_i915_gem_object *obj;
4e5359cd
SF
1608 struct intel_unpin_work *work;
1609 unsigned long flags;
1610 bool stall_detected;
1611
1612 /* Ignore early vblank irqs */
1613 if (intel_crtc == NULL)
1614 return;
1615
1616 spin_lock_irqsave(&dev->event_lock, flags);
1617 work = intel_crtc->unpin_work;
1618
e7d841ca
CW
1619 if (work == NULL ||
1620 atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
1621 !work->enable_stall_check) {
4e5359cd
SF
1622 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
1623 spin_unlock_irqrestore(&dev->event_lock, flags);
1624 return;
1625 }
1626
1627 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
05394f39 1628 obj = work->pending_flip_obj;
a6c45cf0 1629 if (INTEL_INFO(dev)->gen >= 4) {
9db4a9c7 1630 int dspsurf = DSPSURF(intel_crtc->plane);
446f2545
AR
1631 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
1632 obj->gtt_offset;
4e5359cd 1633 } else {
9db4a9c7 1634 int dspaddr = DSPADDR(intel_crtc->plane);
05394f39 1635 stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
01f2c773 1636 crtc->y * crtc->fb->pitches[0] +
4e5359cd
SF
1637 crtc->x * crtc->fb->bits_per_pixel/8);
1638 }
1639
1640 spin_unlock_irqrestore(&dev->event_lock, flags);
1641
1642 if (stall_detected) {
1643 DRM_DEBUG_DRIVER("Pageflip stall detected\n");
1644 intel_prepare_page_flip(dev, intel_crtc->plane);
1645 }
1646}
1647
42f52ef8
KP
1648/* Called from drm generic code, passed 'crtc' which
1649 * we use as a pipe index
1650 */
f71d4af4 1651static int i915_enable_vblank(struct drm_device *dev, int pipe)
0a3e67a4
JB
1652{
1653 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
e9d21d7f 1654 unsigned long irqflags;
71e0ffa5 1655
5eddb70b 1656 if (!i915_pipe_enabled(dev, pipe))
71e0ffa5 1657 return -EINVAL;
0a3e67a4 1658
1ec14ad3 1659 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
f796cf8f 1660 if (INTEL_INFO(dev)->gen >= 4)
7c463586
KP
1661 i915_enable_pipestat(dev_priv, pipe,
1662 PIPE_START_VBLANK_INTERRUPT_ENABLE);
e9d21d7f 1663 else
7c463586
KP
1664 i915_enable_pipestat(dev_priv, pipe,
1665 PIPE_VBLANK_INTERRUPT_ENABLE);
8692d00e
CW
1666
1667 /* maintain vblank delivery even in deep C-states */
1668 if (dev_priv->info->gen == 3)
6b26c86d 1669 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
1ec14ad3 1670 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
8692d00e 1671
0a3e67a4
JB
1672 return 0;
1673}
1674
f71d4af4 1675static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
f796cf8f
JB
1676{
1677 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1678 unsigned long irqflags;
1679
1680 if (!i915_pipe_enabled(dev, pipe))
1681 return -EINVAL;
1682
1683 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1684 ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
0206e353 1685 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
f796cf8f
JB
1686 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1687
1688 return 0;
1689}
1690
f71d4af4 1691static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
b1f14ad0
JB
1692{
1693 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1694 unsigned long irqflags;
1695
1696 if (!i915_pipe_enabled(dev, pipe))
1697 return -EINVAL;
1698
1699 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
b615b57a
CW
1700 ironlake_enable_display_irq(dev_priv,
1701 DE_PIPEA_VBLANK_IVB << (5 * pipe));
b1f14ad0
JB
1702 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1703
1704 return 0;
1705}
1706
7e231dbe
JB
1707static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
1708{
1709 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1710 unsigned long irqflags;
31acc7f5 1711 u32 imr;
7e231dbe
JB
1712
1713 if (!i915_pipe_enabled(dev, pipe))
1714 return -EINVAL;
1715
1716 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
7e231dbe 1717 imr = I915_READ(VLV_IMR);
31acc7f5 1718 if (pipe == 0)
7e231dbe 1719 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
31acc7f5 1720 else
7e231dbe 1721 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
7e231dbe 1722 I915_WRITE(VLV_IMR, imr);
31acc7f5
JB
1723 i915_enable_pipestat(dev_priv, pipe,
1724 PIPE_START_VBLANK_INTERRUPT_ENABLE);
7e231dbe
JB
1725 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1726
1727 return 0;
1728}
1729
42f52ef8
KP
1730/* Called from drm generic code, passed 'crtc' which
1731 * we use as a pipe index
1732 */
f71d4af4 1733static void i915_disable_vblank(struct drm_device *dev, int pipe)
0a3e67a4
JB
1734{
1735 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
e9d21d7f 1736 unsigned long irqflags;
0a3e67a4 1737
1ec14ad3 1738 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
8692d00e 1739 if (dev_priv->info->gen == 3)
6b26c86d 1740 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
8692d00e 1741
f796cf8f
JB
1742 i915_disable_pipestat(dev_priv, pipe,
1743 PIPE_VBLANK_INTERRUPT_ENABLE |
1744 PIPE_START_VBLANK_INTERRUPT_ENABLE);
1745 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1746}
1747
f71d4af4 1748static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
f796cf8f
JB
1749{
1750 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1751 unsigned long irqflags;
1752
1753 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1754 ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
0206e353 1755 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
1ec14ad3 1756 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
0a3e67a4
JB
1757}
1758
f71d4af4 1759static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
b1f14ad0
JB
1760{
1761 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1762 unsigned long irqflags;
1763
1764 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
b615b57a
CW
1765 ironlake_disable_display_irq(dev_priv,
1766 DE_PIPEA_VBLANK_IVB << (pipe * 5));
b1f14ad0
JB
1767 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1768}
1769
7e231dbe
JB
1770static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
1771{
1772 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1773 unsigned long irqflags;
31acc7f5 1774 u32 imr;
7e231dbe
JB
1775
1776 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
31acc7f5
JB
1777 i915_disable_pipestat(dev_priv, pipe,
1778 PIPE_START_VBLANK_INTERRUPT_ENABLE);
7e231dbe 1779 imr = I915_READ(VLV_IMR);
31acc7f5 1780 if (pipe == 0)
7e231dbe 1781 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
31acc7f5 1782 else
7e231dbe 1783 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
7e231dbe 1784 I915_WRITE(VLV_IMR, imr);
7e231dbe
JB
1785 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1786}
1787
893eead0
CW
1788static u32
1789ring_last_seqno(struct intel_ring_buffer *ring)
852835f3 1790{
893eead0
CW
1791 return list_entry(ring->request_list.prev,
1792 struct drm_i915_gem_request, list)->seqno;
1793}
1794
1795static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
1796{
1797 if (list_empty(&ring->request_list) ||
b2eadbc8
CW
1798 i915_seqno_passed(ring->get_seqno(ring, false),
1799 ring_last_seqno(ring))) {
893eead0 1800 /* Issue a wake-up to catch stuck h/w. */
9574b3fe
BW
1801 if (waitqueue_active(&ring->irq_queue)) {
1802 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
1803 ring->name);
893eead0
CW
1804 wake_up_all(&ring->irq_queue);
1805 *err = true;
1806 }
1807 return true;
1808 }
1809 return false;
f65d9421
BG
1810}
1811
a24a11e6
CW
1812static bool semaphore_passed(struct intel_ring_buffer *ring)
1813{
1814 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1815 u32 acthd = intel_ring_get_active_head(ring) & HEAD_ADDR;
1816 struct intel_ring_buffer *signaller;
1817 u32 cmd, ipehr, acthd_min;
1818
1819 ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
1820 if ((ipehr & ~(0x3 << 16)) !=
1821 (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER))
1822 return false;
1823
1824 /* ACTHD is likely pointing to the dword after the actual command,
1825 * so scan backwards until we find the MBOX.
1826 */
1827 acthd_min = max((int)acthd - 3 * 4, 0);
1828 do {
1829 cmd = ioread32(ring->virtual_start + acthd);
1830 if (cmd == ipehr)
1831 break;
1832
1833 acthd -= 4;
1834 if (acthd < acthd_min)
1835 return false;
1836 } while (1);
1837
1838 signaller = &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3];
1839 return i915_seqno_passed(signaller->get_seqno(signaller, false),
1840 ioread32(ring->virtual_start+acthd+4)+1);
1841}
1842
1ec14ad3
CW
1843static bool kick_ring(struct intel_ring_buffer *ring)
1844{
1845 struct drm_device *dev = ring->dev;
1846 struct drm_i915_private *dev_priv = dev->dev_private;
1847 u32 tmp = I915_READ_CTL(ring);
1848 if (tmp & RING_WAIT) {
1849 DRM_ERROR("Kicking stuck wait on %s\n",
1850 ring->name);
1851 I915_WRITE_CTL(ring, tmp);
1852 return true;
1853 }
a24a11e6
CW
1854
1855 if (INTEL_INFO(dev)->gen >= 6 &&
1856 tmp & RING_WAIT_SEMAPHORE &&
1857 semaphore_passed(ring)) {
1858 DRM_ERROR("Kicking stuck semaphore on %s\n",
1859 ring->name);
1860 I915_WRITE_CTL(ring, tmp);
1861 return true;
1862 }
1ec14ad3
CW
1863 return false;
1864}
1865
d1e61e7f
CW
1866static bool i915_hangcheck_hung(struct drm_device *dev)
1867{
1868 drm_i915_private_t *dev_priv = dev->dev_private;
1869
99584db3 1870 if (dev_priv->gpu_error.hangcheck_count++ > 1) {
b4519513
CW
1871 bool hung = true;
1872
d1e61e7f
CW
1873 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
1874 i915_handle_error(dev, true);
1875
1876 if (!IS_GEN2(dev)) {
b4519513
CW
1877 struct intel_ring_buffer *ring;
1878 int i;
1879
d1e61e7f
CW
1880 /* Is the chip hanging on a WAIT_FOR_EVENT?
1881 * If so we can simply poke the RB_WAIT bit
1882 * and break the hang. This should work on
1883 * all but the second generation chipsets.
1884 */
b4519513
CW
1885 for_each_ring(ring, dev_priv, i)
1886 hung &= !kick_ring(ring);
d1e61e7f
CW
1887 }
1888
b4519513 1889 return hung;
d1e61e7f
CW
1890 }
1891
1892 return false;
1893}
1894
f65d9421
BG
1895/**
1896 * This is called when the chip hasn't reported back with completed
1897 * batchbuffers in a long time. The first time this is called we simply record
1898 * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
1899 * again, we assume the chip is wedged and try to fix it.
1900 */
1901void i915_hangcheck_elapsed(unsigned long data)
1902{
1903 struct drm_device *dev = (struct drm_device *)data;
1904 drm_i915_private_t *dev_priv = dev->dev_private;
bd9854f9 1905 uint32_t acthd[I915_NUM_RINGS], instdone[I915_NUM_INSTDONE_REG];
b4519513
CW
1906 struct intel_ring_buffer *ring;
1907 bool err = false, idle;
1908 int i;
893eead0 1909
3e0dc6b0
BW
1910 if (!i915_enable_hangcheck)
1911 return;
1912
b4519513
CW
1913 memset(acthd, 0, sizeof(acthd));
1914 idle = true;
1915 for_each_ring(ring, dev_priv, i) {
1916 idle &= i915_hangcheck_ring_idle(ring, &err);
1917 acthd[i] = intel_ring_get_active_head(ring);
1918 }
1919
893eead0 1920 /* If all work is done then ACTHD clearly hasn't advanced. */
b4519513 1921 if (idle) {
d1e61e7f
CW
1922 if (err) {
1923 if (i915_hangcheck_hung(dev))
1924 return;
1925
893eead0 1926 goto repeat;
d1e61e7f
CW
1927 }
1928
99584db3 1929 dev_priv->gpu_error.hangcheck_count = 0;
893eead0
CW
1930 return;
1931 }
b9201c14 1932
bd9854f9 1933 i915_get_extra_instdone(dev, instdone);
99584db3
DV
1934 if (memcmp(dev_priv->gpu_error.last_acthd, acthd,
1935 sizeof(acthd)) == 0 &&
1936 memcmp(dev_priv->gpu_error.prev_instdone, instdone,
1937 sizeof(instdone)) == 0) {
d1e61e7f 1938 if (i915_hangcheck_hung(dev))
cbb465e7 1939 return;
cbb465e7 1940 } else {
99584db3 1941 dev_priv->gpu_error.hangcheck_count = 0;
cbb465e7 1942
99584db3
DV
1943 memcpy(dev_priv->gpu_error.last_acthd, acthd,
1944 sizeof(acthd));
1945 memcpy(dev_priv->gpu_error.prev_instdone, instdone,
1946 sizeof(instdone));
cbb465e7 1947 }
f65d9421 1948
893eead0 1949repeat:
f65d9421 1950 /* Reset timer case chip hangs without another request being added */
99584db3 1951 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
cecc21fe 1952 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
f65d9421
BG
1953}
1954
1da177e4
LT
1955/* drm_dma.h hooks
1956*/
f71d4af4 1957static void ironlake_irq_preinstall(struct drm_device *dev)
036a4a7d
ZW
1958{
1959 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1960
4697995b
JB
1961 atomic_set(&dev_priv->irq_received, 0);
1962
036a4a7d 1963 I915_WRITE(HWSTAM, 0xeffe);
bdfcdb63 1964
036a4a7d
ZW
1965 /* XXX hotplug from PCH */
1966
1967 I915_WRITE(DEIMR, 0xffffffff);
1968 I915_WRITE(DEIER, 0x0);
3143a2bf 1969 POSTING_READ(DEIER);
036a4a7d
ZW
1970
1971 /* and GT */
1972 I915_WRITE(GTIMR, 0xffffffff);
1973 I915_WRITE(GTIER, 0x0);
3143a2bf 1974 POSTING_READ(GTIER);
c650156a
ZW
1975
1976 /* south display irq */
1977 I915_WRITE(SDEIMR, 0xffffffff);
1978 I915_WRITE(SDEIER, 0x0);
3143a2bf 1979 POSTING_READ(SDEIER);
036a4a7d
ZW
1980}
1981
7e231dbe
JB
1982static void valleyview_irq_preinstall(struct drm_device *dev)
1983{
1984 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1985 int pipe;
1986
1987 atomic_set(&dev_priv->irq_received, 0);
1988
7e231dbe
JB
1989 /* VLV magic */
1990 I915_WRITE(VLV_IMR, 0);
1991 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
1992 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
1993 I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
1994
7e231dbe
JB
1995 /* and GT */
1996 I915_WRITE(GTIIR, I915_READ(GTIIR));
1997 I915_WRITE(GTIIR, I915_READ(GTIIR));
1998 I915_WRITE(GTIMR, 0xffffffff);
1999 I915_WRITE(GTIER, 0x0);
2000 POSTING_READ(GTIER);
2001
2002 I915_WRITE(DPINVGTT, 0xff);
2003
2004 I915_WRITE(PORT_HOTPLUG_EN, 0);
2005 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2006 for_each_pipe(pipe)
2007 I915_WRITE(PIPESTAT(pipe), 0xffff);
2008 I915_WRITE(VLV_IIR, 0xffffffff);
2009 I915_WRITE(VLV_IMR, 0xffffffff);
2010 I915_WRITE(VLV_IER, 0x0);
2011 POSTING_READ(VLV_IER);
2012}
2013
7fe0b973
KP
2014/*
2015 * Enable digital hotplug on the PCH, and configure the DP short pulse
2016 * duration to 2ms (which is the minimum in the Display Port spec)
2017 *
2018 * This register is the same on all known PCH chips.
2019 */
2020
d46da437 2021static void ibx_enable_hotplug(struct drm_device *dev)
7fe0b973
KP
2022{
2023 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2024 u32 hotplug;
2025
2026 hotplug = I915_READ(PCH_PORT_HOTPLUG);
2027 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
2028 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
2029 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
2030 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
2031 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
2032}
2033
d46da437
PZ
2034static void ibx_irq_postinstall(struct drm_device *dev)
2035{
2036 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2037 u32 mask;
2038
2039 if (HAS_PCH_IBX(dev))
2040 mask = SDE_HOTPLUG_MASK |
2041 SDE_GMBUS |
2042 SDE_AUX_MASK;
2043 else
2044 mask = SDE_HOTPLUG_MASK_CPT |
2045 SDE_GMBUS_CPT |
2046 SDE_AUX_MASK_CPT;
2047
2048 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2049 I915_WRITE(SDEIMR, ~mask);
2050 I915_WRITE(SDEIER, mask);
2051 POSTING_READ(SDEIER);
2052
2053 ibx_enable_hotplug(dev);
2054}
2055
f71d4af4 2056static int ironlake_irq_postinstall(struct drm_device *dev)
036a4a7d
ZW
2057{
2058 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2059 /* enable kind of interrupts always enabled */
013d5aa2 2060 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
ce99c256
DV
2061 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
2062 DE_AUX_CHANNEL_A;
1ec14ad3 2063 u32 render_irqs;
036a4a7d 2064
1ec14ad3 2065 dev_priv->irq_mask = ~display_mask;
036a4a7d
ZW
2066
2067 /* should always can generate irq */
2068 I915_WRITE(DEIIR, I915_READ(DEIIR));
1ec14ad3
CW
2069 I915_WRITE(DEIMR, dev_priv->irq_mask);
2070 I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
3143a2bf 2071 POSTING_READ(DEIER);
036a4a7d 2072
1ec14ad3 2073 dev_priv->gt_irq_mask = ~0;
036a4a7d
ZW
2074
2075 I915_WRITE(GTIIR, I915_READ(GTIIR));
1ec14ad3 2076 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
881f47b6 2077
1ec14ad3
CW
2078 if (IS_GEN6(dev))
2079 render_irqs =
2080 GT_USER_INTERRUPT |
e2a1e2f0
BW
2081 GEN6_BSD_USER_INTERRUPT |
2082 GEN6_BLITTER_USER_INTERRUPT;
1ec14ad3
CW
2083 else
2084 render_irqs =
88f23b8f 2085 GT_USER_INTERRUPT |
c6df541c 2086 GT_PIPE_NOTIFY |
1ec14ad3
CW
2087 GT_BSD_USER_INTERRUPT;
2088 I915_WRITE(GTIER, render_irqs);
3143a2bf 2089 POSTING_READ(GTIER);
036a4a7d 2090
d46da437 2091 ibx_irq_postinstall(dev);
7fe0b973 2092
f97108d1
JB
2093 if (IS_IRONLAKE_M(dev)) {
2094 /* Clear & enable PCU event interrupts */
2095 I915_WRITE(DEIIR, DE_PCU_EVENT);
2096 I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
2097 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
2098 }
2099
036a4a7d
ZW
2100 return 0;
2101}
2102
f71d4af4 2103static int ivybridge_irq_postinstall(struct drm_device *dev)
b1f14ad0
JB
2104{
2105 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2106 /* enable kind of interrupts always enabled */
b615b57a
CW
2107 u32 display_mask =
2108 DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
2109 DE_PLANEC_FLIP_DONE_IVB |
2110 DE_PLANEB_FLIP_DONE_IVB |
ce99c256
DV
2111 DE_PLANEA_FLIP_DONE_IVB |
2112 DE_AUX_CHANNEL_A_IVB;
b1f14ad0 2113 u32 render_irqs;
b1f14ad0 2114
b1f14ad0
JB
2115 dev_priv->irq_mask = ~display_mask;
2116
2117 /* should always can generate irq */
2118 I915_WRITE(DEIIR, I915_READ(DEIIR));
2119 I915_WRITE(DEIMR, dev_priv->irq_mask);
b615b57a
CW
2120 I915_WRITE(DEIER,
2121 display_mask |
2122 DE_PIPEC_VBLANK_IVB |
2123 DE_PIPEB_VBLANK_IVB |
2124 DE_PIPEA_VBLANK_IVB);
b1f14ad0
JB
2125 POSTING_READ(DEIER);
2126
15b9f80e 2127 dev_priv->gt_irq_mask = ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
b1f14ad0
JB
2128
2129 I915_WRITE(GTIIR, I915_READ(GTIIR));
2130 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2131
e2a1e2f0 2132 render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
15b9f80e 2133 GEN6_BLITTER_USER_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
b1f14ad0
JB
2134 I915_WRITE(GTIER, render_irqs);
2135 POSTING_READ(GTIER);
2136
d46da437 2137 ibx_irq_postinstall(dev);
7fe0b973 2138
b1f14ad0
JB
2139 return 0;
2140}
2141
7e231dbe
JB
2142static int valleyview_irq_postinstall(struct drm_device *dev)
2143{
2144 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
7e231dbe 2145 u32 enable_mask;
31acc7f5 2146 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
3bcedbe5 2147 u32 render_irqs;
7e231dbe
JB
2148 u16 msid;
2149
2150 enable_mask = I915_DISPLAY_PORT_INTERRUPT;
31acc7f5
JB
2151 enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2152 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2153 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
7e231dbe
JB
2154 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2155
31acc7f5
JB
2156 /*
2157 *Leave vblank interrupts masked initially. enable/disable will
2158 * toggle them based on usage.
2159 */
2160 dev_priv->irq_mask = (~enable_mask) |
2161 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2162 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
7e231dbe 2163
7e231dbe
JB
2164 /* Hack for broken MSIs on VLV */
2165 pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000);
2166 pci_read_config_word(dev->pdev, 0x98, &msid);
2167 msid &= 0xff; /* mask out delivery bits */
2168 msid |= (1<<14);
2169 pci_write_config_word(dev_priv->dev->pdev, 0x98, msid);
2170
20afbda2
DV
2171 I915_WRITE(PORT_HOTPLUG_EN, 0);
2172 POSTING_READ(PORT_HOTPLUG_EN);
2173
7e231dbe
JB
2174 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
2175 I915_WRITE(VLV_IER, enable_mask);
2176 I915_WRITE(VLV_IIR, 0xffffffff);
2177 I915_WRITE(PIPESTAT(0), 0xffff);
2178 I915_WRITE(PIPESTAT(1), 0xffff);
2179 POSTING_READ(VLV_IER);
2180
31acc7f5 2181 i915_enable_pipestat(dev_priv, 0, pipestat_enable);
515ac2bb 2182 i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
31acc7f5
JB
2183 i915_enable_pipestat(dev_priv, 1, pipestat_enable);
2184
7e231dbe
JB
2185 I915_WRITE(VLV_IIR, 0xffffffff);
2186 I915_WRITE(VLV_IIR, 0xffffffff);
2187
7e231dbe 2188 I915_WRITE(GTIIR, I915_READ(GTIIR));
31acc7f5 2189 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
3bcedbe5
JB
2190
2191 render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
2192 GEN6_BLITTER_USER_INTERRUPT;
2193 I915_WRITE(GTIER, render_irqs);
7e231dbe
JB
2194 POSTING_READ(GTIER);
2195
2196 /* ack & enable invalid PTE error interrupts */
2197#if 0 /* FIXME: add support to irq handler for checking these bits */
2198 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
2199 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
2200#endif
2201
2202 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
20afbda2
DV
2203
2204 return 0;
2205}
2206
7e231dbe
JB
2207static void valleyview_irq_uninstall(struct drm_device *dev)
2208{
2209 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2210 int pipe;
2211
2212 if (!dev_priv)
2213 return;
2214
7e231dbe
JB
2215 for_each_pipe(pipe)
2216 I915_WRITE(PIPESTAT(pipe), 0xffff);
2217
2218 I915_WRITE(HWSTAM, 0xffffffff);
2219 I915_WRITE(PORT_HOTPLUG_EN, 0);
2220 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2221 for_each_pipe(pipe)
2222 I915_WRITE(PIPESTAT(pipe), 0xffff);
2223 I915_WRITE(VLV_IIR, 0xffffffff);
2224 I915_WRITE(VLV_IMR, 0xffffffff);
2225 I915_WRITE(VLV_IER, 0x0);
2226 POSTING_READ(VLV_IER);
2227}
2228
f71d4af4 2229static void ironlake_irq_uninstall(struct drm_device *dev)
036a4a7d
ZW
2230{
2231 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
4697995b
JB
2232
2233 if (!dev_priv)
2234 return;
2235
036a4a7d
ZW
2236 I915_WRITE(HWSTAM, 0xffffffff);
2237
2238 I915_WRITE(DEIMR, 0xffffffff);
2239 I915_WRITE(DEIER, 0x0);
2240 I915_WRITE(DEIIR, I915_READ(DEIIR));
2241
2242 I915_WRITE(GTIMR, 0xffffffff);
2243 I915_WRITE(GTIER, 0x0);
2244 I915_WRITE(GTIIR, I915_READ(GTIIR));
192aac1f
KP
2245
2246 I915_WRITE(SDEIMR, 0xffffffff);
2247 I915_WRITE(SDEIER, 0x0);
2248 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
036a4a7d
ZW
2249}
2250
a266c7d5 2251static void i8xx_irq_preinstall(struct drm_device * dev)
1da177e4
LT
2252{
2253 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
9db4a9c7 2254 int pipe;
91e3738e 2255
a266c7d5 2256 atomic_set(&dev_priv->irq_received, 0);
5ca58282 2257
9db4a9c7
JB
2258 for_each_pipe(pipe)
2259 I915_WRITE(PIPESTAT(pipe), 0);
a266c7d5
CW
2260 I915_WRITE16(IMR, 0xffff);
2261 I915_WRITE16(IER, 0x0);
2262 POSTING_READ16(IER);
c2798b19
CW
2263}
2264
2265static int i8xx_irq_postinstall(struct drm_device *dev)
2266{
2267 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2268
c2798b19
CW
2269 I915_WRITE16(EMR,
2270 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2271
2272 /* Unmask the interrupts that we always want on. */
2273 dev_priv->irq_mask =
2274 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2275 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2276 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2277 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2278 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2279 I915_WRITE16(IMR, dev_priv->irq_mask);
2280
2281 I915_WRITE16(IER,
2282 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2283 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2284 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2285 I915_USER_INTERRUPT);
2286 POSTING_READ16(IER);
2287
2288 return 0;
2289}
2290
90a72f87
VS
2291/*
2292 * Returns true when a page flip has completed.
2293 */
2294static bool i8xx_handle_vblank(struct drm_device *dev,
2295 int pipe, u16 iir)
2296{
2297 drm_i915_private_t *dev_priv = dev->dev_private;
2298 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe);
2299
2300 if (!drm_handle_vblank(dev, pipe))
2301 return false;
2302
2303 if ((iir & flip_pending) == 0)
2304 return false;
2305
2306 intel_prepare_page_flip(dev, pipe);
2307
2308 /* We detect FlipDone by looking for the change in PendingFlip from '1'
2309 * to '0' on the following vblank, i.e. IIR has the Pendingflip
2310 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
2311 * the flip is completed (no longer pending). Since this doesn't raise
2312 * an interrupt per se, we watch for the change at vblank.
2313 */
2314 if (I915_READ16(ISR) & flip_pending)
2315 return false;
2316
2317 intel_finish_page_flip(dev, pipe);
2318
2319 return true;
2320}
2321
ff1f525e 2322static irqreturn_t i8xx_irq_handler(int irq, void *arg)
c2798b19
CW
2323{
2324 struct drm_device *dev = (struct drm_device *) arg;
2325 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
c2798b19
CW
2326 u16 iir, new_iir;
2327 u32 pipe_stats[2];
2328 unsigned long irqflags;
2329 int irq_received;
2330 int pipe;
2331 u16 flip_mask =
2332 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2333 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2334
2335 atomic_inc(&dev_priv->irq_received);
2336
2337 iir = I915_READ16(IIR);
2338 if (iir == 0)
2339 return IRQ_NONE;
2340
2341 while (iir & ~flip_mask) {
2342 /* Can't rely on pipestat interrupt bit in iir as it might
2343 * have been cleared after the pipestat interrupt was received.
2344 * It doesn't set the bit in iir again, but it still produces
2345 * interrupts (for non-MSI).
2346 */
2347 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2348 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2349 i915_handle_error(dev, false);
2350
2351 for_each_pipe(pipe) {
2352 int reg = PIPESTAT(pipe);
2353 pipe_stats[pipe] = I915_READ(reg);
2354
2355 /*
2356 * Clear the PIPE*STAT regs before the IIR
2357 */
2358 if (pipe_stats[pipe] & 0x8000ffff) {
2359 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2360 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2361 pipe_name(pipe));
2362 I915_WRITE(reg, pipe_stats[pipe]);
2363 irq_received = 1;
2364 }
2365 }
2366 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2367
2368 I915_WRITE16(IIR, iir & ~flip_mask);
2369 new_iir = I915_READ16(IIR); /* Flush posted writes */
2370
d05c617e 2371 i915_update_dri1_breadcrumb(dev);
c2798b19
CW
2372
2373 if (iir & I915_USER_INTERRUPT)
2374 notify_ring(dev, &dev_priv->ring[RCS]);
2375
2376 if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
90a72f87
VS
2377 i8xx_handle_vblank(dev, 0, iir))
2378 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0);
c2798b19
CW
2379
2380 if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
90a72f87
VS
2381 i8xx_handle_vblank(dev, 1, iir))
2382 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1);
c2798b19
CW
2383
2384 iir = new_iir;
2385 }
2386
2387 return IRQ_HANDLED;
2388}
2389
2390static void i8xx_irq_uninstall(struct drm_device * dev)
2391{
2392 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2393 int pipe;
2394
c2798b19
CW
2395 for_each_pipe(pipe) {
2396 /* Clear enable bits; then clear status bits */
2397 I915_WRITE(PIPESTAT(pipe), 0);
2398 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2399 }
2400 I915_WRITE16(IMR, 0xffff);
2401 I915_WRITE16(IER, 0x0);
2402 I915_WRITE16(IIR, I915_READ16(IIR));
2403}
2404
a266c7d5
CW
2405static void i915_irq_preinstall(struct drm_device * dev)
2406{
2407 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2408 int pipe;
2409
2410 atomic_set(&dev_priv->irq_received, 0);
2411
2412 if (I915_HAS_HOTPLUG(dev)) {
2413 I915_WRITE(PORT_HOTPLUG_EN, 0);
2414 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2415 }
2416
00d98ebd 2417 I915_WRITE16(HWSTAM, 0xeffe);
a266c7d5
CW
2418 for_each_pipe(pipe)
2419 I915_WRITE(PIPESTAT(pipe), 0);
2420 I915_WRITE(IMR, 0xffffffff);
2421 I915_WRITE(IER, 0x0);
2422 POSTING_READ(IER);
2423}
2424
2425static int i915_irq_postinstall(struct drm_device *dev)
2426{
2427 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
38bde180 2428 u32 enable_mask;
a266c7d5 2429
38bde180
CW
2430 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2431
2432 /* Unmask the interrupts that we always want on. */
2433 dev_priv->irq_mask =
2434 ~(I915_ASLE_INTERRUPT |
2435 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2436 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2437 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2438 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2439 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2440
2441 enable_mask =
2442 I915_ASLE_INTERRUPT |
2443 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2444 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2445 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2446 I915_USER_INTERRUPT;
2447
a266c7d5 2448 if (I915_HAS_HOTPLUG(dev)) {
20afbda2
DV
2449 I915_WRITE(PORT_HOTPLUG_EN, 0);
2450 POSTING_READ(PORT_HOTPLUG_EN);
2451
a266c7d5
CW
2452 /* Enable in IER... */
2453 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
2454 /* and unmask in IMR */
2455 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
2456 }
2457
a266c7d5
CW
2458 I915_WRITE(IMR, dev_priv->irq_mask);
2459 I915_WRITE(IER, enable_mask);
2460 POSTING_READ(IER);
2461
20afbda2
DV
2462 intel_opregion_enable_asle(dev);
2463
2464 return 0;
2465}
2466
2467static void i915_hpd_irq_setup(struct drm_device *dev)
2468{
2469 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2470 u32 hotplug_en;
2471
a266c7d5 2472 if (I915_HAS_HOTPLUG(dev)) {
20afbda2 2473 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
a266c7d5 2474
26739f12
DV
2475 if (dev_priv->hotplug_supported_mask & PORTB_HOTPLUG_INT_STATUS)
2476 hotplug_en |= PORTB_HOTPLUG_INT_EN;
2477 if (dev_priv->hotplug_supported_mask & PORTC_HOTPLUG_INT_STATUS)
2478 hotplug_en |= PORTC_HOTPLUG_INT_EN;
2479 if (dev_priv->hotplug_supported_mask & PORTD_HOTPLUG_INT_STATUS)
2480 hotplug_en |= PORTD_HOTPLUG_INT_EN;
084b612e 2481 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
a266c7d5 2482 hotplug_en |= SDVOC_HOTPLUG_INT_EN;
084b612e 2483 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
a266c7d5
CW
2484 hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2485 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
2486 hotplug_en |= CRT_HOTPLUG_INT_EN;
a266c7d5
CW
2487 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2488 }
2489
2490 /* Ignore TV since it's buggy */
2491
2492 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2493 }
a266c7d5
CW
2494}
2495
90a72f87
VS
2496/*
2497 * Returns true when a page flip has completed.
2498 */
2499static bool i915_handle_vblank(struct drm_device *dev,
2500 int plane, int pipe, u32 iir)
2501{
2502 drm_i915_private_t *dev_priv = dev->dev_private;
2503 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
2504
2505 if (!drm_handle_vblank(dev, pipe))
2506 return false;
2507
2508 if ((iir & flip_pending) == 0)
2509 return false;
2510
2511 intel_prepare_page_flip(dev, plane);
2512
2513 /* We detect FlipDone by looking for the change in PendingFlip from '1'
2514 * to '0' on the following vblank, i.e. IIR has the Pendingflip
2515 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
2516 * the flip is completed (no longer pending). Since this doesn't raise
2517 * an interrupt per se, we watch for the change at vblank.
2518 */
2519 if (I915_READ(ISR) & flip_pending)
2520 return false;
2521
2522 intel_finish_page_flip(dev, pipe);
2523
2524 return true;
2525}
2526
ff1f525e 2527static irqreturn_t i915_irq_handler(int irq, void *arg)
a266c7d5
CW
2528{
2529 struct drm_device *dev = (struct drm_device *) arg;
2530 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
8291ee90 2531 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
a266c7d5 2532 unsigned long irqflags;
38bde180
CW
2533 u32 flip_mask =
2534 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2535 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
38bde180 2536 int pipe, ret = IRQ_NONE;
a266c7d5
CW
2537
2538 atomic_inc(&dev_priv->irq_received);
2539
2540 iir = I915_READ(IIR);
38bde180
CW
2541 do {
2542 bool irq_received = (iir & ~flip_mask) != 0;
8291ee90 2543 bool blc_event = false;
a266c7d5
CW
2544
2545 /* Can't rely on pipestat interrupt bit in iir as it might
2546 * have been cleared after the pipestat interrupt was received.
2547 * It doesn't set the bit in iir again, but it still produces
2548 * interrupts (for non-MSI).
2549 */
2550 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2551 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2552 i915_handle_error(dev, false);
2553
2554 for_each_pipe(pipe) {
2555 int reg = PIPESTAT(pipe);
2556 pipe_stats[pipe] = I915_READ(reg);
2557
38bde180 2558 /* Clear the PIPE*STAT regs before the IIR */
a266c7d5
CW
2559 if (pipe_stats[pipe] & 0x8000ffff) {
2560 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2561 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2562 pipe_name(pipe));
2563 I915_WRITE(reg, pipe_stats[pipe]);
38bde180 2564 irq_received = true;
a266c7d5
CW
2565 }
2566 }
2567 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2568
2569 if (!irq_received)
2570 break;
2571
a266c7d5
CW
2572 /* Consume port. Then clear IIR or we'll miss events */
2573 if ((I915_HAS_HOTPLUG(dev)) &&
2574 (iir & I915_DISPLAY_PORT_INTERRUPT)) {
2575 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2576
2577 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2578 hotplug_status);
2579 if (hotplug_status & dev_priv->hotplug_supported_mask)
2580 queue_work(dev_priv->wq,
2581 &dev_priv->hotplug_work);
2582
2583 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
38bde180 2584 POSTING_READ(PORT_HOTPLUG_STAT);
a266c7d5
CW
2585 }
2586
38bde180 2587 I915_WRITE(IIR, iir & ~flip_mask);
a266c7d5
CW
2588 new_iir = I915_READ(IIR); /* Flush posted writes */
2589
a266c7d5
CW
2590 if (iir & I915_USER_INTERRUPT)
2591 notify_ring(dev, &dev_priv->ring[RCS]);
a266c7d5 2592
a266c7d5 2593 for_each_pipe(pipe) {
38bde180
CW
2594 int plane = pipe;
2595 if (IS_MOBILE(dev))
2596 plane = !plane;
90a72f87 2597
8291ee90 2598 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
90a72f87
VS
2599 i915_handle_vblank(dev, plane, pipe, iir))
2600 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
a266c7d5
CW
2601
2602 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2603 blc_event = true;
2604 }
2605
a266c7d5
CW
2606 if (blc_event || (iir & I915_ASLE_INTERRUPT))
2607 intel_opregion_asle_intr(dev);
2608
2609 /* With MSI, interrupts are only generated when iir
2610 * transitions from zero to nonzero. If another bit got
2611 * set while we were handling the existing iir bits, then
2612 * we would never get another interrupt.
2613 *
2614 * This is fine on non-MSI as well, as if we hit this path
2615 * we avoid exiting the interrupt handler only to generate
2616 * another one.
2617 *
2618 * Note that for MSI this could cause a stray interrupt report
2619 * if an interrupt landed in the time between writing IIR and
2620 * the posting read. This should be rare enough to never
2621 * trigger the 99% of 100,000 interrupts test for disabling
2622 * stray interrupts.
2623 */
38bde180 2624 ret = IRQ_HANDLED;
a266c7d5 2625 iir = new_iir;
38bde180 2626 } while (iir & ~flip_mask);
a266c7d5 2627
d05c617e 2628 i915_update_dri1_breadcrumb(dev);
8291ee90 2629
a266c7d5
CW
2630 return ret;
2631}
2632
2633static void i915_irq_uninstall(struct drm_device * dev)
2634{
2635 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2636 int pipe;
2637
a266c7d5
CW
2638 if (I915_HAS_HOTPLUG(dev)) {
2639 I915_WRITE(PORT_HOTPLUG_EN, 0);
2640 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2641 }
2642
00d98ebd 2643 I915_WRITE16(HWSTAM, 0xffff);
55b39755
CW
2644 for_each_pipe(pipe) {
2645 /* Clear enable bits; then clear status bits */
a266c7d5 2646 I915_WRITE(PIPESTAT(pipe), 0);
55b39755
CW
2647 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2648 }
a266c7d5
CW
2649 I915_WRITE(IMR, 0xffffffff);
2650 I915_WRITE(IER, 0x0);
2651
a266c7d5
CW
2652 I915_WRITE(IIR, I915_READ(IIR));
2653}
2654
2655static void i965_irq_preinstall(struct drm_device * dev)
2656{
2657 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2658 int pipe;
2659
2660 atomic_set(&dev_priv->irq_received, 0);
2661
adca4730
CW
2662 I915_WRITE(PORT_HOTPLUG_EN, 0);
2663 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
a266c7d5
CW
2664
2665 I915_WRITE(HWSTAM, 0xeffe);
2666 for_each_pipe(pipe)
2667 I915_WRITE(PIPESTAT(pipe), 0);
2668 I915_WRITE(IMR, 0xffffffff);
2669 I915_WRITE(IER, 0x0);
2670 POSTING_READ(IER);
2671}
2672
2673static int i965_irq_postinstall(struct drm_device *dev)
2674{
2675 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
bbba0a97 2676 u32 enable_mask;
a266c7d5
CW
2677 u32 error_mask;
2678
a266c7d5 2679 /* Unmask the interrupts that we always want on. */
bbba0a97 2680 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
adca4730 2681 I915_DISPLAY_PORT_INTERRUPT |
bbba0a97
CW
2682 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2683 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2684 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2685 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2686 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2687
2688 enable_mask = ~dev_priv->irq_mask;
21ad8330
VS
2689 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2690 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
bbba0a97
CW
2691 enable_mask |= I915_USER_INTERRUPT;
2692
2693 if (IS_G4X(dev))
2694 enable_mask |= I915_BSD_USER_INTERRUPT;
a266c7d5 2695
515ac2bb 2696 i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
a266c7d5 2697
a266c7d5
CW
2698 /*
2699 * Enable some error detection, note the instruction error mask
2700 * bit is reserved, so we leave it masked.
2701 */
2702 if (IS_G4X(dev)) {
2703 error_mask = ~(GM45_ERROR_PAGE_TABLE |
2704 GM45_ERROR_MEM_PRIV |
2705 GM45_ERROR_CP_PRIV |
2706 I915_ERROR_MEMORY_REFRESH);
2707 } else {
2708 error_mask = ~(I915_ERROR_PAGE_TABLE |
2709 I915_ERROR_MEMORY_REFRESH);
2710 }
2711 I915_WRITE(EMR, error_mask);
2712
2713 I915_WRITE(IMR, dev_priv->irq_mask);
2714 I915_WRITE(IER, enable_mask);
2715 POSTING_READ(IER);
2716
20afbda2
DV
2717 I915_WRITE(PORT_HOTPLUG_EN, 0);
2718 POSTING_READ(PORT_HOTPLUG_EN);
2719
2720 intel_opregion_enable_asle(dev);
2721
2722 return 0;
2723}
2724
2725static void i965_hpd_irq_setup(struct drm_device *dev)
2726{
2727 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2728 u32 hotplug_en;
2729
adca4730
CW
2730 /* Note HDMI and DP share hotplug bits */
2731 hotplug_en = 0;
26739f12
DV
2732 if (dev_priv->hotplug_supported_mask & PORTB_HOTPLUG_INT_STATUS)
2733 hotplug_en |= PORTB_HOTPLUG_INT_EN;
2734 if (dev_priv->hotplug_supported_mask & PORTC_HOTPLUG_INT_STATUS)
2735 hotplug_en |= PORTC_HOTPLUG_INT_EN;
2736 if (dev_priv->hotplug_supported_mask & PORTD_HOTPLUG_INT_STATUS)
2737 hotplug_en |= PORTD_HOTPLUG_INT_EN;
084b612e
CW
2738 if (IS_G4X(dev)) {
2739 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_G4X)
2740 hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2741 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_G4X)
2742 hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2743 } else {
2744 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I965)
2745 hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2746 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I965)
2747 hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2748 }
adca4730
CW
2749 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
2750 hotplug_en |= CRT_HOTPLUG_INT_EN;
a266c7d5 2751
adca4730
CW
2752 /* Programming the CRT detection parameters tends
2753 to generate a spurious hotplug event about three
2754 seconds later. So just do it once.
2755 */
2756 if (IS_G4X(dev))
2757 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
2758 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2759 }
a266c7d5 2760
adca4730 2761 /* Ignore TV since it's buggy */
a266c7d5 2762
adca4730 2763 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
a266c7d5
CW
2764}
2765
ff1f525e 2766static irqreturn_t i965_irq_handler(int irq, void *arg)
a266c7d5
CW
2767{
2768 struct drm_device *dev = (struct drm_device *) arg;
2769 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
a266c7d5
CW
2770 u32 iir, new_iir;
2771 u32 pipe_stats[I915_MAX_PIPES];
a266c7d5
CW
2772 unsigned long irqflags;
2773 int irq_received;
2774 int ret = IRQ_NONE, pipe;
21ad8330
VS
2775 u32 flip_mask =
2776 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2777 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
a266c7d5
CW
2778
2779 atomic_inc(&dev_priv->irq_received);
2780
2781 iir = I915_READ(IIR);
2782
a266c7d5 2783 for (;;) {
2c8ba29f
CW
2784 bool blc_event = false;
2785
21ad8330 2786 irq_received = (iir & ~flip_mask) != 0;
a266c7d5
CW
2787
2788 /* Can't rely on pipestat interrupt bit in iir as it might
2789 * have been cleared after the pipestat interrupt was received.
2790 * It doesn't set the bit in iir again, but it still produces
2791 * interrupts (for non-MSI).
2792 */
2793 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2794 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2795 i915_handle_error(dev, false);
2796
2797 for_each_pipe(pipe) {
2798 int reg = PIPESTAT(pipe);
2799 pipe_stats[pipe] = I915_READ(reg);
2800
2801 /*
2802 * Clear the PIPE*STAT regs before the IIR
2803 */
2804 if (pipe_stats[pipe] & 0x8000ffff) {
2805 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2806 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2807 pipe_name(pipe));
2808 I915_WRITE(reg, pipe_stats[pipe]);
2809 irq_received = 1;
2810 }
2811 }
2812 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2813
2814 if (!irq_received)
2815 break;
2816
2817 ret = IRQ_HANDLED;
2818
2819 /* Consume port. Then clear IIR or we'll miss events */
adca4730 2820 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
a266c7d5
CW
2821 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2822
2823 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2824 hotplug_status);
2825 if (hotplug_status & dev_priv->hotplug_supported_mask)
2826 queue_work(dev_priv->wq,
2827 &dev_priv->hotplug_work);
2828
2829 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2830 I915_READ(PORT_HOTPLUG_STAT);
2831 }
2832
21ad8330 2833 I915_WRITE(IIR, iir & ~flip_mask);
a266c7d5
CW
2834 new_iir = I915_READ(IIR); /* Flush posted writes */
2835
a266c7d5
CW
2836 if (iir & I915_USER_INTERRUPT)
2837 notify_ring(dev, &dev_priv->ring[RCS]);
2838 if (iir & I915_BSD_USER_INTERRUPT)
2839 notify_ring(dev, &dev_priv->ring[VCS]);
2840
a266c7d5 2841 for_each_pipe(pipe) {
2c8ba29f 2842 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
90a72f87
VS
2843 i915_handle_vblank(dev, pipe, pipe, iir))
2844 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
a266c7d5
CW
2845
2846 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2847 blc_event = true;
2848 }
2849
2850
2851 if (blc_event || (iir & I915_ASLE_INTERRUPT))
2852 intel_opregion_asle_intr(dev);
2853
515ac2bb
DV
2854 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
2855 gmbus_irq_handler(dev);
2856
a266c7d5
CW
2857 /* With MSI, interrupts are only generated when iir
2858 * transitions from zero to nonzero. If another bit got
2859 * set while we were handling the existing iir bits, then
2860 * we would never get another interrupt.
2861 *
2862 * This is fine on non-MSI as well, as if we hit this path
2863 * we avoid exiting the interrupt handler only to generate
2864 * another one.
2865 *
2866 * Note that for MSI this could cause a stray interrupt report
2867 * if an interrupt landed in the time between writing IIR and
2868 * the posting read. This should be rare enough to never
2869 * trigger the 99% of 100,000 interrupts test for disabling
2870 * stray interrupts.
2871 */
2872 iir = new_iir;
2873 }
2874
d05c617e 2875 i915_update_dri1_breadcrumb(dev);
2c8ba29f 2876
a266c7d5
CW
2877 return ret;
2878}
2879
2880static void i965_irq_uninstall(struct drm_device * dev)
2881{
2882 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2883 int pipe;
2884
2885 if (!dev_priv)
2886 return;
2887
adca4730
CW
2888 I915_WRITE(PORT_HOTPLUG_EN, 0);
2889 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
a266c7d5
CW
2890
2891 I915_WRITE(HWSTAM, 0xffffffff);
2892 for_each_pipe(pipe)
2893 I915_WRITE(PIPESTAT(pipe), 0);
2894 I915_WRITE(IMR, 0xffffffff);
2895 I915_WRITE(IER, 0x0);
2896
2897 for_each_pipe(pipe)
2898 I915_WRITE(PIPESTAT(pipe),
2899 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
2900 I915_WRITE(IIR, I915_READ(IIR));
2901}
2902
f71d4af4
JB
2903void intel_irq_init(struct drm_device *dev)
2904{
8b2e326d
CW
2905 struct drm_i915_private *dev_priv = dev->dev_private;
2906
2907 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
99584db3 2908 INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
c6a828d3 2909 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
a4da4fa4 2910 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
8b2e326d 2911
99584db3
DV
2912 setup_timer(&dev_priv->gpu_error.hangcheck_timer,
2913 i915_hangcheck_elapsed,
61bac78e
DV
2914 (unsigned long) dev);
2915
97a19a24 2916 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
9ee32fea 2917
f71d4af4
JB
2918 dev->driver->get_vblank_counter = i915_get_vblank_counter;
2919 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
7d4e146f 2920 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
f71d4af4
JB
2921 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
2922 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
2923 }
2924
c3613de9
KP
2925 if (drm_core_check_feature(dev, DRIVER_MODESET))
2926 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
2927 else
2928 dev->driver->get_vblank_timestamp = NULL;
f71d4af4
JB
2929 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
2930
7e231dbe
JB
2931 if (IS_VALLEYVIEW(dev)) {
2932 dev->driver->irq_handler = valleyview_irq_handler;
2933 dev->driver->irq_preinstall = valleyview_irq_preinstall;
2934 dev->driver->irq_postinstall = valleyview_irq_postinstall;
2935 dev->driver->irq_uninstall = valleyview_irq_uninstall;
2936 dev->driver->enable_vblank = valleyview_enable_vblank;
2937 dev->driver->disable_vblank = valleyview_disable_vblank;
fa00abe0 2938 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4a06e201 2939 } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
f71d4af4
JB
2940 /* Share pre & uninstall handlers with ILK/SNB */
2941 dev->driver->irq_handler = ivybridge_irq_handler;
2942 dev->driver->irq_preinstall = ironlake_irq_preinstall;
2943 dev->driver->irq_postinstall = ivybridge_irq_postinstall;
2944 dev->driver->irq_uninstall = ironlake_irq_uninstall;
2945 dev->driver->enable_vblank = ivybridge_enable_vblank;
2946 dev->driver->disable_vblank = ivybridge_disable_vblank;
2947 } else if (HAS_PCH_SPLIT(dev)) {
2948 dev->driver->irq_handler = ironlake_irq_handler;
2949 dev->driver->irq_preinstall = ironlake_irq_preinstall;
2950 dev->driver->irq_postinstall = ironlake_irq_postinstall;
2951 dev->driver->irq_uninstall = ironlake_irq_uninstall;
2952 dev->driver->enable_vblank = ironlake_enable_vblank;
2953 dev->driver->disable_vblank = ironlake_disable_vblank;
2954 } else {
c2798b19
CW
2955 if (INTEL_INFO(dev)->gen == 2) {
2956 dev->driver->irq_preinstall = i8xx_irq_preinstall;
2957 dev->driver->irq_postinstall = i8xx_irq_postinstall;
2958 dev->driver->irq_handler = i8xx_irq_handler;
2959 dev->driver->irq_uninstall = i8xx_irq_uninstall;
a266c7d5
CW
2960 } else if (INTEL_INFO(dev)->gen == 3) {
2961 dev->driver->irq_preinstall = i915_irq_preinstall;
2962 dev->driver->irq_postinstall = i915_irq_postinstall;
2963 dev->driver->irq_uninstall = i915_irq_uninstall;
2964 dev->driver->irq_handler = i915_irq_handler;
20afbda2 2965 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
c2798b19 2966 } else {
a266c7d5
CW
2967 dev->driver->irq_preinstall = i965_irq_preinstall;
2968 dev->driver->irq_postinstall = i965_irq_postinstall;
2969 dev->driver->irq_uninstall = i965_irq_uninstall;
2970 dev->driver->irq_handler = i965_irq_handler;
20afbda2 2971 dev_priv->display.hpd_irq_setup = i965_hpd_irq_setup;
c2798b19 2972 }
f71d4af4
JB
2973 dev->driver->enable_vblank = i915_enable_vblank;
2974 dev->driver->disable_vblank = i915_disable_vblank;
2975 }
2976}
20afbda2
DV
2977
2978void intel_hpd_init(struct drm_device *dev)
2979{
2980 struct drm_i915_private *dev_priv = dev->dev_private;
2981
2982 if (dev_priv->display.hpd_irq_setup)
2983 dev_priv->display.hpd_irq_setup(dev);
2984}
This page took 1.155397 seconds and 5 git commands to generate.