1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
37 #include "i915_trace.h"
38 #include "intel_drv.h"
40 #define MAX_NOPID ((u32)~0)
43 * Interrupts that are always left unmasked.
45 * Since pipe events are edge-triggered from the PIPESTAT register to IIR,
46 * we leave them always unmasked in IMR and then control enabling them through
49 #define I915_INTERRUPT_ENABLE_FIX \
50 (I915_ASLE_INTERRUPT | \
51 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
52 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | \
53 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | \
54 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | \
55 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
57 /** Interrupts that we mask and unmask at runtime. */
58 #define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT | I915_BSD_USER_INTERRUPT)
60 #define I915_PIPE_VBLANK_STATUS (PIPE_START_VBLANK_INTERRUPT_STATUS |\
61 PIPE_VBLANK_INTERRUPT_STATUS)
63 #define I915_PIPE_VBLANK_ENABLE (PIPE_START_VBLANK_INTERRUPT_ENABLE |\
64 PIPE_VBLANK_INTERRUPT_ENABLE)
66 #define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \
67 DRM_I915_VBLANK_PIPE_B)
69 /* For display hotplug interrupt */
71 ironlake_enable_display_irq(drm_i915_private_t
*dev_priv
, u32 mask
)
73 if ((dev_priv
->irq_mask
& mask
) != 0) {
74 dev_priv
->irq_mask
&= ~mask
;
75 I915_WRITE(DEIMR
, dev_priv
->irq_mask
);
81 ironlake_disable_display_irq(drm_i915_private_t
*dev_priv
, u32 mask
)
83 if ((dev_priv
->irq_mask
& mask
) != mask
) {
84 dev_priv
->irq_mask
|= mask
;
85 I915_WRITE(DEIMR
, dev_priv
->irq_mask
);
91 i915_enable_pipestat(drm_i915_private_t
*dev_priv
, int pipe
, u32 mask
)
93 if ((dev_priv
->pipestat
[pipe
] & mask
) != mask
) {
94 u32 reg
= PIPESTAT(pipe
);
96 dev_priv
->pipestat
[pipe
] |= mask
;
97 /* Enable the interrupt, clear any pending status */
98 I915_WRITE(reg
, dev_priv
->pipestat
[pipe
] | (mask
>> 16));
104 i915_disable_pipestat(drm_i915_private_t
*dev_priv
, int pipe
, u32 mask
)
106 if ((dev_priv
->pipestat
[pipe
] & mask
) != 0) {
107 u32 reg
= PIPESTAT(pipe
);
109 dev_priv
->pipestat
[pipe
] &= ~mask
;
110 I915_WRITE(reg
, dev_priv
->pipestat
[pipe
]);
116 * intel_enable_asle - enable ASLE interrupt for OpRegion
118 void intel_enable_asle(struct drm_device
*dev
)
120 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
121 unsigned long irqflags
;
123 /* FIXME: opregion/asle for VLV */
124 if (IS_VALLEYVIEW(dev
))
127 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
129 if (HAS_PCH_SPLIT(dev
))
130 ironlake_enable_display_irq(dev_priv
, DE_GSE
);
132 i915_enable_pipestat(dev_priv
, 1,
133 PIPE_LEGACY_BLC_EVENT_ENABLE
);
134 if (INTEL_INFO(dev
)->gen
>= 4)
135 i915_enable_pipestat(dev_priv
, 0,
136 PIPE_LEGACY_BLC_EVENT_ENABLE
);
139 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
143 * i915_pipe_enabled - check if a pipe is enabled
145 * @pipe: pipe to check
147 * Reading certain registers when the pipe is disabled can hang the chip.
148 * Use this routine to make sure the PLL is running and the pipe is active
149 * before reading such registers if unsure.
152 i915_pipe_enabled(struct drm_device
*dev
, int pipe
)
154 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
155 return I915_READ(PIPECONF(pipe
)) & PIPECONF_ENABLE
;
158 /* Called from drm generic code, passed a 'crtc', which
159 * we use as a pipe index
161 static u32
i915_get_vblank_counter(struct drm_device
*dev
, int pipe
)
163 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
164 unsigned long high_frame
;
165 unsigned long low_frame
;
166 u32 high1
, high2
, low
;
168 if (!i915_pipe_enabled(dev
, pipe
)) {
169 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
170 "pipe %c\n", pipe_name(pipe
));
174 high_frame
= PIPEFRAME(pipe
);
175 low_frame
= PIPEFRAMEPIXEL(pipe
);
178 * High & low register fields aren't synchronized, so make sure
179 * we get a low value that's stable across two reads of the high
183 high1
= I915_READ(high_frame
) & PIPE_FRAME_HIGH_MASK
;
184 low
= I915_READ(low_frame
) & PIPE_FRAME_LOW_MASK
;
185 high2
= I915_READ(high_frame
) & PIPE_FRAME_HIGH_MASK
;
186 } while (high1
!= high2
);
188 high1
>>= PIPE_FRAME_HIGH_SHIFT
;
189 low
>>= PIPE_FRAME_LOW_SHIFT
;
190 return (high1
<< 8) | low
;
193 static u32
gm45_get_vblank_counter(struct drm_device
*dev
, int pipe
)
195 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
196 int reg
= PIPE_FRMCOUNT_GM45(pipe
);
198 if (!i915_pipe_enabled(dev
, pipe
)) {
199 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
200 "pipe %c\n", pipe_name(pipe
));
204 return I915_READ(reg
);
207 static int i915_get_crtc_scanoutpos(struct drm_device
*dev
, int pipe
,
208 int *vpos
, int *hpos
)
210 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
211 u32 vbl
= 0, position
= 0;
212 int vbl_start
, vbl_end
, htotal
, vtotal
;
216 if (!i915_pipe_enabled(dev
, pipe
)) {
217 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
218 "pipe %c\n", pipe_name(pipe
));
223 vtotal
= 1 + ((I915_READ(VTOTAL(pipe
)) >> 16) & 0x1fff);
225 if (INTEL_INFO(dev
)->gen
>= 4) {
226 /* No obvious pixelcount register. Only query vertical
227 * scanout position from Display scan line register.
229 position
= I915_READ(PIPEDSL(pipe
));
231 /* Decode into vertical scanout position. Don't have
232 * horizontal scanout position.
234 *vpos
= position
& 0x1fff;
237 /* Have access to pixelcount since start of frame.
238 * We can split this into vertical and horizontal
241 position
= (I915_READ(PIPEFRAMEPIXEL(pipe
)) & PIPE_PIXEL_MASK
) >> PIPE_PIXEL_SHIFT
;
243 htotal
= 1 + ((I915_READ(HTOTAL(pipe
)) >> 16) & 0x1fff);
244 *vpos
= position
/ htotal
;
245 *hpos
= position
- (*vpos
* htotal
);
248 /* Query vblank area. */
249 vbl
= I915_READ(VBLANK(pipe
));
251 /* Test position against vblank region. */
252 vbl_start
= vbl
& 0x1fff;
253 vbl_end
= (vbl
>> 16) & 0x1fff;
255 if ((*vpos
< vbl_start
) || (*vpos
> vbl_end
))
258 /* Inside "upper part" of vblank area? Apply corrective offset: */
259 if (in_vbl
&& (*vpos
>= vbl_start
))
260 *vpos
= *vpos
- vtotal
;
262 /* Readouts valid? */
264 ret
|= DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_ACCURATE
;
268 ret
|= DRM_SCANOUTPOS_INVBL
;
273 static int i915_get_vblank_timestamp(struct drm_device
*dev
, int pipe
,
275 struct timeval
*vblank_time
,
278 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
279 struct drm_crtc
*crtc
;
281 if (pipe
< 0 || pipe
>= dev_priv
->num_pipe
) {
282 DRM_ERROR("Invalid crtc %d\n", pipe
);
286 /* Get drm_crtc to timestamp: */
287 crtc
= intel_get_crtc_for_pipe(dev
, pipe
);
289 DRM_ERROR("Invalid crtc %d\n", pipe
);
293 if (!crtc
->enabled
) {
294 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe
);
298 /* Helper routine in DRM core does all the work: */
299 return drm_calc_vbltimestamp_from_scanoutpos(dev
, pipe
, max_error
,
305 * Handle hotplug events outside the interrupt handler proper.
307 static void i915_hotplug_work_func(struct work_struct
*work
)
309 drm_i915_private_t
*dev_priv
= container_of(work
, drm_i915_private_t
,
311 struct drm_device
*dev
= dev_priv
->dev
;
312 struct drm_mode_config
*mode_config
= &dev
->mode_config
;
313 struct intel_encoder
*encoder
;
315 mutex_lock(&mode_config
->mutex
);
316 DRM_DEBUG_KMS("running encoder hotplug functions\n");
318 list_for_each_entry(encoder
, &mode_config
->encoder_list
, base
.head
)
319 if (encoder
->hot_plug
)
320 encoder
->hot_plug(encoder
);
322 mutex_unlock(&mode_config
->mutex
);
324 /* Just fire off a uevent and let userspace tell us what to do */
325 drm_helper_hpd_irq_event(dev
);
328 static void i915_handle_rps_change(struct drm_device
*dev
)
330 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
331 u32 busy_up
, busy_down
, max_avg
, min_avg
;
332 u8 new_delay
= dev_priv
->cur_delay
;
334 I915_WRITE16(MEMINTRSTS
, MEMINT_EVAL_CHG
);
335 busy_up
= I915_READ(RCPREVBSYTUPAVG
);
336 busy_down
= I915_READ(RCPREVBSYTDNAVG
);
337 max_avg
= I915_READ(RCBMAXAVG
);
338 min_avg
= I915_READ(RCBMINAVG
);
340 /* Handle RCS change request from hw */
341 if (busy_up
> max_avg
) {
342 if (dev_priv
->cur_delay
!= dev_priv
->max_delay
)
343 new_delay
= dev_priv
->cur_delay
- 1;
344 if (new_delay
< dev_priv
->max_delay
)
345 new_delay
= dev_priv
->max_delay
;
346 } else if (busy_down
< min_avg
) {
347 if (dev_priv
->cur_delay
!= dev_priv
->min_delay
)
348 new_delay
= dev_priv
->cur_delay
+ 1;
349 if (new_delay
> dev_priv
->min_delay
)
350 new_delay
= dev_priv
->min_delay
;
353 if (ironlake_set_drps(dev
, new_delay
))
354 dev_priv
->cur_delay
= new_delay
;
359 static void notify_ring(struct drm_device
*dev
,
360 struct intel_ring_buffer
*ring
)
362 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
365 if (ring
->obj
== NULL
)
368 seqno
= ring
->get_seqno(ring
);
369 trace_i915_gem_request_complete(ring
, seqno
);
371 ring
->irq_seqno
= seqno
;
372 wake_up_all(&ring
->irq_queue
);
373 if (i915_enable_hangcheck
) {
374 dev_priv
->hangcheck_count
= 0;
375 mod_timer(&dev_priv
->hangcheck_timer
,
377 msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD
));
381 static void gen6_pm_rps_work(struct work_struct
*work
)
383 drm_i915_private_t
*dev_priv
= container_of(work
, drm_i915_private_t
,
385 u8 new_delay
= dev_priv
->cur_delay
;
388 spin_lock_irq(&dev_priv
->rps_lock
);
389 pm_iir
= dev_priv
->pm_iir
;
390 dev_priv
->pm_iir
= 0;
391 pm_imr
= I915_READ(GEN6_PMIMR
);
392 I915_WRITE(GEN6_PMIMR
, 0);
393 spin_unlock_irq(&dev_priv
->rps_lock
);
398 mutex_lock(&dev_priv
->dev
->struct_mutex
);
399 if (pm_iir
& GEN6_PM_RP_UP_THRESHOLD
) {
400 if (dev_priv
->cur_delay
!= dev_priv
->max_delay
)
401 new_delay
= dev_priv
->cur_delay
+ 1;
402 if (new_delay
> dev_priv
->max_delay
)
403 new_delay
= dev_priv
->max_delay
;
404 } else if (pm_iir
& (GEN6_PM_RP_DOWN_THRESHOLD
| GEN6_PM_RP_DOWN_TIMEOUT
)) {
405 gen6_gt_force_wake_get(dev_priv
);
406 if (dev_priv
->cur_delay
!= dev_priv
->min_delay
)
407 new_delay
= dev_priv
->cur_delay
- 1;
408 if (new_delay
< dev_priv
->min_delay
) {
409 new_delay
= dev_priv
->min_delay
;
410 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS
,
411 I915_READ(GEN6_RP_INTERRUPT_LIMITS
) |
412 ((new_delay
<< 16) & 0x3f0000));
414 /* Make sure we continue to get down interrupts
415 * until we hit the minimum frequency */
416 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS
,
417 I915_READ(GEN6_RP_INTERRUPT_LIMITS
) & ~0x3f0000);
419 gen6_gt_force_wake_put(dev_priv
);
422 gen6_set_rps(dev_priv
->dev
, new_delay
);
423 dev_priv
->cur_delay
= new_delay
;
426 * rps_lock not held here because clearing is non-destructive. There is
427 * an *extremely* unlikely race with gen6_rps_enable() that is prevented
428 * by holding struct_mutex for the duration of the write.
430 mutex_unlock(&dev_priv
->dev
->struct_mutex
);
433 static void snb_gt_irq_handler(struct drm_device
*dev
,
434 struct drm_i915_private
*dev_priv
,
438 if (gt_iir
& (GEN6_RENDER_USER_INTERRUPT
|
439 GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT
))
440 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
441 if (gt_iir
& GEN6_BSD_USER_INTERRUPT
)
442 notify_ring(dev
, &dev_priv
->ring
[VCS
]);
443 if (gt_iir
& GEN6_BLITTER_USER_INTERRUPT
)
444 notify_ring(dev
, &dev_priv
->ring
[BCS
]);
446 if (gt_iir
& (GT_GEN6_BLT_CS_ERROR_INTERRUPT
|
447 GT_GEN6_BSD_CS_ERROR_INTERRUPT
|
448 GT_RENDER_CS_ERROR_INTERRUPT
)) {
449 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir
);
450 i915_handle_error(dev
, false);
454 static void gen6_queue_rps_work(struct drm_i915_private
*dev_priv
,
460 * IIR bits should never already be set because IMR should
461 * prevent an interrupt from being shown in IIR. The warning
462 * displays a case where we've unsafely cleared
463 * dev_priv->pm_iir. Although missing an interrupt of the same
464 * type is not a problem, it displays a problem in the logic.
466 * The mask bit in IMR is cleared by rps_work.
469 spin_lock_irqsave(&dev_priv
->rps_lock
, flags
);
470 WARN(dev_priv
->pm_iir
& pm_iir
, "Missed a PM interrupt\n");
471 dev_priv
->pm_iir
|= pm_iir
;
472 I915_WRITE(GEN6_PMIMR
, dev_priv
->pm_iir
);
473 POSTING_READ(GEN6_PMIMR
);
474 spin_unlock_irqrestore(&dev_priv
->rps_lock
, flags
);
476 queue_work(dev_priv
->wq
, &dev_priv
->rps_work
);
479 static irqreturn_t
valleyview_irq_handler(DRM_IRQ_ARGS
)
481 struct drm_device
*dev
= (struct drm_device
*) arg
;
482 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
483 u32 iir
, gt_iir
, pm_iir
;
484 irqreturn_t ret
= IRQ_NONE
;
485 unsigned long irqflags
;
487 u32 pipe_stats
[I915_MAX_PIPES
];
492 atomic_inc(&dev_priv
->irq_received
);
494 vblank_status
= PIPE_START_VBLANK_INTERRUPT_STATUS
|
495 PIPE_VBLANK_INTERRUPT_STATUS
;
498 iir
= I915_READ(VLV_IIR
);
499 gt_iir
= I915_READ(GTIIR
);
500 pm_iir
= I915_READ(GEN6_PMIIR
);
502 if (gt_iir
== 0 && pm_iir
== 0 && iir
== 0)
507 snb_gt_irq_handler(dev
, dev_priv
, gt_iir
);
509 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
510 for_each_pipe(pipe
) {
511 int reg
= PIPESTAT(pipe
);
512 pipe_stats
[pipe
] = I915_READ(reg
);
515 * Clear the PIPE*STAT regs before the IIR
517 if (pipe_stats
[pipe
] & 0x8000ffff) {
518 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
)
519 DRM_DEBUG_DRIVER("pipe %c underrun\n",
521 I915_WRITE(reg
, pipe_stats
[pipe
]);
524 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
526 /* Consume port. Then clear IIR or we'll miss events */
527 if (iir
& I915_DISPLAY_PORT_INTERRUPT
) {
528 u32 hotplug_status
= I915_READ(PORT_HOTPLUG_STAT
);
530 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
532 if (hotplug_status
& dev_priv
->hotplug_supported_mask
)
533 queue_work(dev_priv
->wq
,
534 &dev_priv
->hotplug_work
);
536 I915_WRITE(PORT_HOTPLUG_STAT
, hotplug_status
);
537 I915_READ(PORT_HOTPLUG_STAT
);
541 if (iir
& I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT
) {
542 drm_handle_vblank(dev
, 0);
544 if (!dev_priv
->flip_pending_is_done
) {
545 intel_finish_page_flip(dev
, 0);
549 if (iir
& I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT
) {
550 drm_handle_vblank(dev
, 1);
552 if (!dev_priv
->flip_pending_is_done
) {
553 intel_finish_page_flip(dev
, 0);
557 if (pipe_stats
[pipe
] & PIPE_LEGACY_BLC_EVENT_STATUS
)
560 if (pm_iir
& GEN6_PM_DEFERRED_EVENTS
)
561 gen6_queue_rps_work(dev_priv
, pm_iir
);
563 I915_WRITE(GTIIR
, gt_iir
);
564 I915_WRITE(GEN6_PMIIR
, pm_iir
);
565 I915_WRITE(VLV_IIR
, iir
);
572 static void pch_irq_handler(struct drm_device
*dev
)
574 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
578 pch_iir
= I915_READ(SDEIIR
);
580 if (pch_iir
& SDE_AUDIO_POWER_MASK
)
581 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
582 (pch_iir
& SDE_AUDIO_POWER_MASK
) >>
583 SDE_AUDIO_POWER_SHIFT
);
585 if (pch_iir
& SDE_GMBUS
)
586 DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
588 if (pch_iir
& SDE_AUDIO_HDCP_MASK
)
589 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
591 if (pch_iir
& SDE_AUDIO_TRANS_MASK
)
592 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
594 if (pch_iir
& SDE_POISON
)
595 DRM_ERROR("PCH poison interrupt\n");
597 if (pch_iir
& SDE_FDI_MASK
)
599 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
601 I915_READ(FDI_RX_IIR(pipe
)));
603 if (pch_iir
& (SDE_TRANSB_CRC_DONE
| SDE_TRANSA_CRC_DONE
))
604 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
606 if (pch_iir
& (SDE_TRANSB_CRC_ERR
| SDE_TRANSA_CRC_ERR
))
607 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
609 if (pch_iir
& SDE_TRANSB_FIFO_UNDER
)
610 DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n");
611 if (pch_iir
& SDE_TRANSA_FIFO_UNDER
)
612 DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
615 static irqreturn_t
ivybridge_irq_handler(DRM_IRQ_ARGS
)
617 struct drm_device
*dev
= (struct drm_device
*) arg
;
618 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
620 u32 de_iir
, gt_iir
, de_ier
, pch_iir
, pm_iir
;
621 struct drm_i915_master_private
*master_priv
;
623 atomic_inc(&dev_priv
->irq_received
);
625 /* disable master interrupt before clearing iir */
626 de_ier
= I915_READ(DEIER
);
627 I915_WRITE(DEIER
, de_ier
& ~DE_MASTER_IRQ_CONTROL
);
630 de_iir
= I915_READ(DEIIR
);
631 gt_iir
= I915_READ(GTIIR
);
632 pch_iir
= I915_READ(SDEIIR
);
633 pm_iir
= I915_READ(GEN6_PMIIR
);
635 if (de_iir
== 0 && gt_iir
== 0 && pch_iir
== 0 && pm_iir
== 0)
640 if (dev
->primary
->master
) {
641 master_priv
= dev
->primary
->master
->driver_priv
;
642 if (master_priv
->sarea_priv
)
643 master_priv
->sarea_priv
->last_dispatch
=
644 READ_BREADCRUMB(dev_priv
);
647 snb_gt_irq_handler(dev
, dev_priv
, gt_iir
);
649 if (de_iir
& DE_GSE_IVB
)
650 intel_opregion_gse_intr(dev
);
652 if (de_iir
& DE_PLANEA_FLIP_DONE_IVB
) {
653 intel_prepare_page_flip(dev
, 0);
654 intel_finish_page_flip_plane(dev
, 0);
657 if (de_iir
& DE_PLANEB_FLIP_DONE_IVB
) {
658 intel_prepare_page_flip(dev
, 1);
659 intel_finish_page_flip_plane(dev
, 1);
662 if (de_iir
& DE_PIPEA_VBLANK_IVB
)
663 drm_handle_vblank(dev
, 0);
665 if (de_iir
& DE_PIPEB_VBLANK_IVB
)
666 drm_handle_vblank(dev
, 1);
668 /* check event from PCH */
669 if (de_iir
& DE_PCH_EVENT_IVB
) {
670 if (pch_iir
& SDE_HOTPLUG_MASK_CPT
)
671 queue_work(dev_priv
->wq
, &dev_priv
->hotplug_work
);
672 pch_irq_handler(dev
);
675 if (pm_iir
& GEN6_PM_DEFERRED_EVENTS
)
676 gen6_queue_rps_work(dev_priv
, pm_iir
);
678 /* should clear PCH hotplug event before clear CPU irq */
679 I915_WRITE(SDEIIR
, pch_iir
);
680 I915_WRITE(GTIIR
, gt_iir
);
681 I915_WRITE(DEIIR
, de_iir
);
682 I915_WRITE(GEN6_PMIIR
, pm_iir
);
685 I915_WRITE(DEIER
, de_ier
);
691 static void ilk_gt_irq_handler(struct drm_device
*dev
,
692 struct drm_i915_private
*dev_priv
,
695 if (gt_iir
& (GT_USER_INTERRUPT
| GT_PIPE_NOTIFY
))
696 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
697 if (gt_iir
& GT_BSD_USER_INTERRUPT
)
698 notify_ring(dev
, &dev_priv
->ring
[VCS
]);
701 static irqreturn_t
ironlake_irq_handler(DRM_IRQ_ARGS
)
703 struct drm_device
*dev
= (struct drm_device
*) arg
;
704 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
706 u32 de_iir
, gt_iir
, de_ier
, pch_iir
, pm_iir
;
708 struct drm_i915_master_private
*master_priv
;
710 atomic_inc(&dev_priv
->irq_received
);
712 /* disable master interrupt before clearing iir */
713 de_ier
= I915_READ(DEIER
);
714 I915_WRITE(DEIER
, de_ier
& ~DE_MASTER_IRQ_CONTROL
);
717 de_iir
= I915_READ(DEIIR
);
718 gt_iir
= I915_READ(GTIIR
);
719 pch_iir
= I915_READ(SDEIIR
);
720 pm_iir
= I915_READ(GEN6_PMIIR
);
722 if (de_iir
== 0 && gt_iir
== 0 && pch_iir
== 0 &&
723 (!IS_GEN6(dev
) || pm_iir
== 0))
726 if (HAS_PCH_CPT(dev
))
727 hotplug_mask
= SDE_HOTPLUG_MASK_CPT
;
729 hotplug_mask
= SDE_HOTPLUG_MASK
;
733 if (dev
->primary
->master
) {
734 master_priv
= dev
->primary
->master
->driver_priv
;
735 if (master_priv
->sarea_priv
)
736 master_priv
->sarea_priv
->last_dispatch
=
737 READ_BREADCRUMB(dev_priv
);
741 ilk_gt_irq_handler(dev
, dev_priv
, gt_iir
);
743 snb_gt_irq_handler(dev
, dev_priv
, gt_iir
);
746 intel_opregion_gse_intr(dev
);
748 if (de_iir
& DE_PLANEA_FLIP_DONE
) {
749 intel_prepare_page_flip(dev
, 0);
750 intel_finish_page_flip_plane(dev
, 0);
753 if (de_iir
& DE_PLANEB_FLIP_DONE
) {
754 intel_prepare_page_flip(dev
, 1);
755 intel_finish_page_flip_plane(dev
, 1);
758 if (de_iir
& DE_PIPEA_VBLANK
)
759 drm_handle_vblank(dev
, 0);
761 if (de_iir
& DE_PIPEB_VBLANK
)
762 drm_handle_vblank(dev
, 1);
764 /* check event from PCH */
765 if (de_iir
& DE_PCH_EVENT
) {
766 if (pch_iir
& hotplug_mask
)
767 queue_work(dev_priv
->wq
, &dev_priv
->hotplug_work
);
768 pch_irq_handler(dev
);
771 if (de_iir
& DE_PCU_EVENT
) {
772 I915_WRITE16(MEMINTRSTS
, I915_READ(MEMINTRSTS
));
773 i915_handle_rps_change(dev
);
776 if (IS_GEN6(dev
) && pm_iir
& GEN6_PM_DEFERRED_EVENTS
)
777 gen6_queue_rps_work(dev_priv
, pm_iir
);
779 /* should clear PCH hotplug event before clear CPU irq */
780 I915_WRITE(SDEIIR
, pch_iir
);
781 I915_WRITE(GTIIR
, gt_iir
);
782 I915_WRITE(DEIIR
, de_iir
);
783 I915_WRITE(GEN6_PMIIR
, pm_iir
);
786 I915_WRITE(DEIER
, de_ier
);
793 * i915_error_work_func - do process context error handling work
796 * Fire an error uevent so userspace can see that a hang or error
799 static void i915_error_work_func(struct work_struct
*work
)
801 drm_i915_private_t
*dev_priv
= container_of(work
, drm_i915_private_t
,
803 struct drm_device
*dev
= dev_priv
->dev
;
804 char *error_event
[] = { "ERROR=1", NULL
};
805 char *reset_event
[] = { "RESET=1", NULL
};
806 char *reset_done_event
[] = { "ERROR=0", NULL
};
808 kobject_uevent_env(&dev
->primary
->kdev
.kobj
, KOBJ_CHANGE
, error_event
);
810 if (atomic_read(&dev_priv
->mm
.wedged
)) {
811 DRM_DEBUG_DRIVER("resetting chip\n");
812 kobject_uevent_env(&dev
->primary
->kdev
.kobj
, KOBJ_CHANGE
, reset_event
);
813 if (!i915_reset(dev
, GRDOM_RENDER
)) {
814 atomic_set(&dev_priv
->mm
.wedged
, 0);
815 kobject_uevent_env(&dev
->primary
->kdev
.kobj
, KOBJ_CHANGE
, reset_done_event
);
817 complete_all(&dev_priv
->error_completion
);
821 #ifdef CONFIG_DEBUG_FS
822 static struct drm_i915_error_object
*
823 i915_error_object_create(struct drm_i915_private
*dev_priv
,
824 struct drm_i915_gem_object
*src
)
826 struct drm_i915_error_object
*dst
;
827 int page
, page_count
;
830 if (src
== NULL
|| src
->pages
== NULL
)
833 page_count
= src
->base
.size
/ PAGE_SIZE
;
835 dst
= kmalloc(sizeof(*dst
) + page_count
* sizeof(u32
*), GFP_ATOMIC
);
839 reloc_offset
= src
->gtt_offset
;
840 for (page
= 0; page
< page_count
; page
++) {
844 d
= kmalloc(PAGE_SIZE
, GFP_ATOMIC
);
848 local_irq_save(flags
);
849 if (reloc_offset
< dev_priv
->mm
.gtt_mappable_end
&&
850 src
->has_global_gtt_mapping
) {
853 /* Simply ignore tiling or any overlapping fence.
854 * It's part of the error state, and this hopefully
855 * captures what the GPU read.
858 s
= io_mapping_map_atomic_wc(dev_priv
->mm
.gtt_mapping
,
860 memcpy_fromio(d
, s
, PAGE_SIZE
);
861 io_mapping_unmap_atomic(s
);
865 drm_clflush_pages(&src
->pages
[page
], 1);
867 s
= kmap_atomic(src
->pages
[page
]);
868 memcpy(d
, s
, PAGE_SIZE
);
871 drm_clflush_pages(&src
->pages
[page
], 1);
873 local_irq_restore(flags
);
875 dst
->pages
[page
] = d
;
877 reloc_offset
+= PAGE_SIZE
;
879 dst
->page_count
= page_count
;
880 dst
->gtt_offset
= src
->gtt_offset
;
886 kfree(dst
->pages
[page
]);
892 i915_error_object_free(struct drm_i915_error_object
*obj
)
899 for (page
= 0; page
< obj
->page_count
; page
++)
900 kfree(obj
->pages
[page
]);
906 i915_error_state_free(struct drm_device
*dev
,
907 struct drm_i915_error_state
*error
)
911 for (i
= 0; i
< ARRAY_SIZE(error
->ring
); i
++) {
912 i915_error_object_free(error
->ring
[i
].batchbuffer
);
913 i915_error_object_free(error
->ring
[i
].ringbuffer
);
914 kfree(error
->ring
[i
].requests
);
917 kfree(error
->active_bo
);
918 kfree(error
->overlay
);
922 static u32
capture_bo_list(struct drm_i915_error_buffer
*err
,
924 struct list_head
*head
)
926 struct drm_i915_gem_object
*obj
;
929 list_for_each_entry(obj
, head
, mm_list
) {
930 err
->size
= obj
->base
.size
;
931 err
->name
= obj
->base
.name
;
932 err
->seqno
= obj
->last_rendering_seqno
;
933 err
->gtt_offset
= obj
->gtt_offset
;
934 err
->read_domains
= obj
->base
.read_domains
;
935 err
->write_domain
= obj
->base
.write_domain
;
936 err
->fence_reg
= obj
->fence_reg
;
938 if (obj
->pin_count
> 0)
940 if (obj
->user_pin_count
> 0)
942 err
->tiling
= obj
->tiling_mode
;
943 err
->dirty
= obj
->dirty
;
944 err
->purgeable
= obj
->madv
!= I915_MADV_WILLNEED
;
945 err
->ring
= obj
->ring
? obj
->ring
->id
: -1;
946 err
->cache_level
= obj
->cache_level
;
957 static void i915_gem_record_fences(struct drm_device
*dev
,
958 struct drm_i915_error_state
*error
)
960 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
964 switch (INTEL_INFO(dev
)->gen
) {
967 for (i
= 0; i
< 16; i
++)
968 error
->fence
[i
] = I915_READ64(FENCE_REG_SANDYBRIDGE_0
+ (i
* 8));
972 for (i
= 0; i
< 16; i
++)
973 error
->fence
[i
] = I915_READ64(FENCE_REG_965_0
+ (i
* 8));
976 if (IS_I945G(dev
) || IS_I945GM(dev
) || IS_G33(dev
))
977 for (i
= 0; i
< 8; i
++)
978 error
->fence
[i
+8] = I915_READ(FENCE_REG_945_8
+ (i
* 4));
980 for (i
= 0; i
< 8; i
++)
981 error
->fence
[i
] = I915_READ(FENCE_REG_830_0
+ (i
* 4));
987 static struct drm_i915_error_object
*
988 i915_error_first_batchbuffer(struct drm_i915_private
*dev_priv
,
989 struct intel_ring_buffer
*ring
)
991 struct drm_i915_gem_object
*obj
;
994 if (!ring
->get_seqno
)
997 seqno
= ring
->get_seqno(ring
);
998 list_for_each_entry(obj
, &dev_priv
->mm
.active_list
, mm_list
) {
999 if (obj
->ring
!= ring
)
1002 if (i915_seqno_passed(seqno
, obj
->last_rendering_seqno
))
1005 if ((obj
->base
.read_domains
& I915_GEM_DOMAIN_COMMAND
) == 0)
1008 /* We need to copy these to an anonymous buffer as the simplest
1009 * method to avoid being overwritten by userspace.
1011 return i915_error_object_create(dev_priv
, obj
);
1017 static void i915_record_ring_state(struct drm_device
*dev
,
1018 struct drm_i915_error_state
*error
,
1019 struct intel_ring_buffer
*ring
)
1021 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1023 if (INTEL_INFO(dev
)->gen
>= 6) {
1024 error
->fault_reg
[ring
->id
] = I915_READ(RING_FAULT_REG(ring
));
1025 error
->semaphore_mboxes
[ring
->id
][0]
1026 = I915_READ(RING_SYNC_0(ring
->mmio_base
));
1027 error
->semaphore_mboxes
[ring
->id
][1]
1028 = I915_READ(RING_SYNC_1(ring
->mmio_base
));
1031 if (INTEL_INFO(dev
)->gen
>= 4) {
1032 error
->faddr
[ring
->id
] = I915_READ(RING_DMA_FADD(ring
->mmio_base
));
1033 error
->ipeir
[ring
->id
] = I915_READ(RING_IPEIR(ring
->mmio_base
));
1034 error
->ipehr
[ring
->id
] = I915_READ(RING_IPEHR(ring
->mmio_base
));
1035 error
->instdone
[ring
->id
] = I915_READ(RING_INSTDONE(ring
->mmio_base
));
1036 error
->instps
[ring
->id
] = I915_READ(RING_INSTPS(ring
->mmio_base
));
1037 if (ring
->id
== RCS
) {
1038 error
->instdone1
= I915_READ(INSTDONE1
);
1039 error
->bbaddr
= I915_READ64(BB_ADDR
);
1042 error
->faddr
[ring
->id
] = I915_READ(DMA_FADD_I8XX
);
1043 error
->ipeir
[ring
->id
] = I915_READ(IPEIR
);
1044 error
->ipehr
[ring
->id
] = I915_READ(IPEHR
);
1045 error
->instdone
[ring
->id
] = I915_READ(INSTDONE
);
1048 error
->instpm
[ring
->id
] = I915_READ(RING_INSTPM(ring
->mmio_base
));
1049 error
->seqno
[ring
->id
] = ring
->get_seqno(ring
);
1050 error
->acthd
[ring
->id
] = intel_ring_get_active_head(ring
);
1051 error
->head
[ring
->id
] = I915_READ_HEAD(ring
);
1052 error
->tail
[ring
->id
] = I915_READ_TAIL(ring
);
1054 error
->cpu_ring_head
[ring
->id
] = ring
->head
;
1055 error
->cpu_ring_tail
[ring
->id
] = ring
->tail
;
1058 static void i915_gem_record_rings(struct drm_device
*dev
,
1059 struct drm_i915_error_state
*error
)
1061 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1062 struct drm_i915_gem_request
*request
;
1065 for (i
= 0; i
< I915_NUM_RINGS
; i
++) {
1066 struct intel_ring_buffer
*ring
= &dev_priv
->ring
[i
];
1068 if (ring
->obj
== NULL
)
1071 i915_record_ring_state(dev
, error
, ring
);
1073 error
->ring
[i
].batchbuffer
=
1074 i915_error_first_batchbuffer(dev_priv
, ring
);
1076 error
->ring
[i
].ringbuffer
=
1077 i915_error_object_create(dev_priv
, ring
->obj
);
1080 list_for_each_entry(request
, &ring
->request_list
, list
)
1083 error
->ring
[i
].num_requests
= count
;
1084 error
->ring
[i
].requests
=
1085 kmalloc(count
*sizeof(struct drm_i915_error_request
),
1087 if (error
->ring
[i
].requests
== NULL
) {
1088 error
->ring
[i
].num_requests
= 0;
1093 list_for_each_entry(request
, &ring
->request_list
, list
) {
1094 struct drm_i915_error_request
*erq
;
1096 erq
= &error
->ring
[i
].requests
[count
++];
1097 erq
->seqno
= request
->seqno
;
1098 erq
->jiffies
= request
->emitted_jiffies
;
1099 erq
->tail
= request
->tail
;
1105 * i915_capture_error_state - capture an error record for later analysis
1108 * Should be called when an error is detected (either a hang or an error
1109 * interrupt) to capture error state from the time of the error. Fills
1110 * out a structure which becomes available in debugfs for user level tools
1113 static void i915_capture_error_state(struct drm_device
*dev
)
1115 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1116 struct drm_i915_gem_object
*obj
;
1117 struct drm_i915_error_state
*error
;
1118 unsigned long flags
;
1121 spin_lock_irqsave(&dev_priv
->error_lock
, flags
);
1122 error
= dev_priv
->first_error
;
1123 spin_unlock_irqrestore(&dev_priv
->error_lock
, flags
);
1127 /* Account for pipe specific data like PIPE*STAT */
1128 error
= kzalloc(sizeof(*error
), GFP_ATOMIC
);
1130 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1134 DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n",
1135 dev
->primary
->index
);
1137 error
->eir
= I915_READ(EIR
);
1138 error
->pgtbl_er
= I915_READ(PGTBL_ER
);
1140 error
->pipestat
[pipe
] = I915_READ(PIPESTAT(pipe
));
1142 if (INTEL_INFO(dev
)->gen
>= 6) {
1143 error
->error
= I915_READ(ERROR_GEN6
);
1144 error
->done_reg
= I915_READ(DONE_REG
);
1147 i915_gem_record_fences(dev
, error
);
1148 i915_gem_record_rings(dev
, error
);
1150 /* Record buffers on the active and pinned lists. */
1151 error
->active_bo
= NULL
;
1152 error
->pinned_bo
= NULL
;
1155 list_for_each_entry(obj
, &dev_priv
->mm
.active_list
, mm_list
)
1157 error
->active_bo_count
= i
;
1158 list_for_each_entry(obj
, &dev_priv
->mm
.pinned_list
, mm_list
)
1160 error
->pinned_bo_count
= i
- error
->active_bo_count
;
1162 error
->active_bo
= NULL
;
1163 error
->pinned_bo
= NULL
;
1165 error
->active_bo
= kmalloc(sizeof(*error
->active_bo
)*i
,
1167 if (error
->active_bo
)
1169 error
->active_bo
+ error
->active_bo_count
;
1172 if (error
->active_bo
)
1173 error
->active_bo_count
=
1174 capture_bo_list(error
->active_bo
,
1175 error
->active_bo_count
,
1176 &dev_priv
->mm
.active_list
);
1178 if (error
->pinned_bo
)
1179 error
->pinned_bo_count
=
1180 capture_bo_list(error
->pinned_bo
,
1181 error
->pinned_bo_count
,
1182 &dev_priv
->mm
.pinned_list
);
1184 do_gettimeofday(&error
->time
);
1186 error
->overlay
= intel_overlay_capture_error_state(dev
);
1187 error
->display
= intel_display_capture_error_state(dev
);
1189 spin_lock_irqsave(&dev_priv
->error_lock
, flags
);
1190 if (dev_priv
->first_error
== NULL
) {
1191 dev_priv
->first_error
= error
;
1194 spin_unlock_irqrestore(&dev_priv
->error_lock
, flags
);
1197 i915_error_state_free(dev
, error
);
1200 void i915_destroy_error_state(struct drm_device
*dev
)
1202 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1203 struct drm_i915_error_state
*error
;
1204 unsigned long flags
;
1206 spin_lock_irqsave(&dev_priv
->error_lock
, flags
);
1207 error
= dev_priv
->first_error
;
1208 dev_priv
->first_error
= NULL
;
1209 spin_unlock_irqrestore(&dev_priv
->error_lock
, flags
);
1212 i915_error_state_free(dev
, error
);
1215 #define i915_capture_error_state(x)
1218 static void i915_report_and_clear_eir(struct drm_device
*dev
)
1220 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1221 u32 eir
= I915_READ(EIR
);
1227 pr_err("render error detected, EIR: 0x%08x\n", eir
);
1230 if (eir
& (GM45_ERROR_MEM_PRIV
| GM45_ERROR_CP_PRIV
)) {
1231 u32 ipeir
= I915_READ(IPEIR_I965
);
1233 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965
));
1234 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965
));
1235 pr_err(" INSTDONE: 0x%08x\n",
1236 I915_READ(INSTDONE_I965
));
1237 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS
));
1238 pr_err(" INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1
));
1239 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965
));
1240 I915_WRITE(IPEIR_I965
, ipeir
);
1241 POSTING_READ(IPEIR_I965
);
1243 if (eir
& GM45_ERROR_PAGE_TABLE
) {
1244 u32 pgtbl_err
= I915_READ(PGTBL_ER
);
1245 pr_err("page table error\n");
1246 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err
);
1247 I915_WRITE(PGTBL_ER
, pgtbl_err
);
1248 POSTING_READ(PGTBL_ER
);
1252 if (!IS_GEN2(dev
)) {
1253 if (eir
& I915_ERROR_PAGE_TABLE
) {
1254 u32 pgtbl_err
= I915_READ(PGTBL_ER
);
1255 pr_err("page table error\n");
1256 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err
);
1257 I915_WRITE(PGTBL_ER
, pgtbl_err
);
1258 POSTING_READ(PGTBL_ER
);
1262 if (eir
& I915_ERROR_MEMORY_REFRESH
) {
1263 pr_err("memory refresh error:\n");
1265 pr_err("pipe %c stat: 0x%08x\n",
1266 pipe_name(pipe
), I915_READ(PIPESTAT(pipe
)));
1267 /* pipestat has already been acked */
1269 if (eir
& I915_ERROR_INSTRUCTION
) {
1270 pr_err("instruction error\n");
1271 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM
));
1272 if (INTEL_INFO(dev
)->gen
< 4) {
1273 u32 ipeir
= I915_READ(IPEIR
);
1275 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR
));
1276 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR
));
1277 pr_err(" INSTDONE: 0x%08x\n", I915_READ(INSTDONE
));
1278 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD
));
1279 I915_WRITE(IPEIR
, ipeir
);
1280 POSTING_READ(IPEIR
);
1282 u32 ipeir
= I915_READ(IPEIR_I965
);
1284 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965
));
1285 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965
));
1286 pr_err(" INSTDONE: 0x%08x\n",
1287 I915_READ(INSTDONE_I965
));
1288 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS
));
1289 pr_err(" INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1
));
1290 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965
));
1291 I915_WRITE(IPEIR_I965
, ipeir
);
1292 POSTING_READ(IPEIR_I965
);
1296 I915_WRITE(EIR
, eir
);
1298 eir
= I915_READ(EIR
);
1301 * some errors might have become stuck,
1304 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir
);
1305 I915_WRITE(EMR
, I915_READ(EMR
) | eir
);
1306 I915_WRITE(IIR
, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
);
1311 * i915_handle_error - handle an error interrupt
1314 * Do some basic checking of regsiter state at error interrupt time and
1315 * dump it to the syslog. Also call i915_capture_error_state() to make
1316 * sure we get a record and make it available in debugfs. Fire a uevent
1317 * so userspace knows something bad happened (should trigger collection
1318 * of a ring dump etc.).
1320 void i915_handle_error(struct drm_device
*dev
, bool wedged
)
1322 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1324 i915_capture_error_state(dev
);
1325 i915_report_and_clear_eir(dev
);
1328 INIT_COMPLETION(dev_priv
->error_completion
);
1329 atomic_set(&dev_priv
->mm
.wedged
, 1);
1332 * Wakeup waiting processes so they don't hang
1334 wake_up_all(&dev_priv
->ring
[RCS
].irq_queue
);
1336 wake_up_all(&dev_priv
->ring
[VCS
].irq_queue
);
1338 wake_up_all(&dev_priv
->ring
[BCS
].irq_queue
);
1341 queue_work(dev_priv
->wq
, &dev_priv
->error_work
);
1344 static void i915_pageflip_stall_check(struct drm_device
*dev
, int pipe
)
1346 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1347 struct drm_crtc
*crtc
= dev_priv
->pipe_to_crtc_mapping
[pipe
];
1348 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
1349 struct drm_i915_gem_object
*obj
;
1350 struct intel_unpin_work
*work
;
1351 unsigned long flags
;
1352 bool stall_detected
;
1354 /* Ignore early vblank irqs */
1355 if (intel_crtc
== NULL
)
1358 spin_lock_irqsave(&dev
->event_lock
, flags
);
1359 work
= intel_crtc
->unpin_work
;
1361 if (work
== NULL
|| work
->pending
|| !work
->enable_stall_check
) {
1362 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
1363 spin_unlock_irqrestore(&dev
->event_lock
, flags
);
1367 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
1368 obj
= work
->pending_flip_obj
;
1369 if (INTEL_INFO(dev
)->gen
>= 4) {
1370 int dspsurf
= DSPSURF(intel_crtc
->plane
);
1371 stall_detected
= I915_HI_DISPBASE(I915_READ(dspsurf
)) ==
1374 int dspaddr
= DSPADDR(intel_crtc
->plane
);
1375 stall_detected
= I915_READ(dspaddr
) == (obj
->gtt_offset
+
1376 crtc
->y
* crtc
->fb
->pitches
[0] +
1377 crtc
->x
* crtc
->fb
->bits_per_pixel
/8);
1380 spin_unlock_irqrestore(&dev
->event_lock
, flags
);
1382 if (stall_detected
) {
1383 DRM_DEBUG_DRIVER("Pageflip stall detected\n");
1384 intel_prepare_page_flip(dev
, intel_crtc
->plane
);
1388 static irqreturn_t
i915_driver_irq_handler(DRM_IRQ_ARGS
)
1390 struct drm_device
*dev
= (struct drm_device
*) arg
;
1391 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1392 struct drm_i915_master_private
*master_priv
;
1394 u32 pipe_stats
[I915_MAX_PIPES
];
1397 unsigned long irqflags
;
1399 int ret
= IRQ_NONE
, pipe
;
1400 bool blc_event
= false;
1402 atomic_inc(&dev_priv
->irq_received
);
1404 iir
= I915_READ(IIR
);
1406 if (INTEL_INFO(dev
)->gen
>= 4)
1407 vblank_status
= PIPE_START_VBLANK_INTERRUPT_STATUS
;
1409 vblank_status
= PIPE_VBLANK_INTERRUPT_STATUS
;
1412 irq_received
= iir
!= 0;
1414 /* Can't rely on pipestat interrupt bit in iir as it might
1415 * have been cleared after the pipestat interrupt was received.
1416 * It doesn't set the bit in iir again, but it still produces
1417 * interrupts (for non-MSI).
1419 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
1420 if (iir
& I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
)
1421 i915_handle_error(dev
, false);
1423 for_each_pipe(pipe
) {
1424 int reg
= PIPESTAT(pipe
);
1425 pipe_stats
[pipe
] = I915_READ(reg
);
1428 * Clear the PIPE*STAT regs before the IIR
1430 if (pipe_stats
[pipe
] & 0x8000ffff) {
1431 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
)
1432 DRM_DEBUG_DRIVER("pipe %c underrun\n",
1434 I915_WRITE(reg
, pipe_stats
[pipe
]);
1438 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
1445 /* Consume port. Then clear IIR or we'll miss events */
1446 if ((I915_HAS_HOTPLUG(dev
)) &&
1447 (iir
& I915_DISPLAY_PORT_INTERRUPT
)) {
1448 u32 hotplug_status
= I915_READ(PORT_HOTPLUG_STAT
);
1450 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
1452 if (hotplug_status
& dev_priv
->hotplug_supported_mask
)
1453 queue_work(dev_priv
->wq
,
1454 &dev_priv
->hotplug_work
);
1456 I915_WRITE(PORT_HOTPLUG_STAT
, hotplug_status
);
1457 I915_READ(PORT_HOTPLUG_STAT
);
1460 I915_WRITE(IIR
, iir
);
1461 new_iir
= I915_READ(IIR
); /* Flush posted writes */
1463 if (dev
->primary
->master
) {
1464 master_priv
= dev
->primary
->master
->driver_priv
;
1465 if (master_priv
->sarea_priv
)
1466 master_priv
->sarea_priv
->last_dispatch
=
1467 READ_BREADCRUMB(dev_priv
);
1470 if (iir
& I915_USER_INTERRUPT
)
1471 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
1472 if (iir
& I915_BSD_USER_INTERRUPT
)
1473 notify_ring(dev
, &dev_priv
->ring
[VCS
]);
1475 if (iir
& I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
) {
1476 intel_prepare_page_flip(dev
, 0);
1477 if (dev_priv
->flip_pending_is_done
)
1478 intel_finish_page_flip_plane(dev
, 0);
1481 if (iir
& I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
) {
1482 intel_prepare_page_flip(dev
, 1);
1483 if (dev_priv
->flip_pending_is_done
)
1484 intel_finish_page_flip_plane(dev
, 1);
1487 for_each_pipe(pipe
) {
1488 if (pipe_stats
[pipe
] & vblank_status
&&
1489 drm_handle_vblank(dev
, pipe
)) {
1491 if (!dev_priv
->flip_pending_is_done
) {
1492 i915_pageflip_stall_check(dev
, pipe
);
1493 intel_finish_page_flip(dev
, pipe
);
1497 if (pipe_stats
[pipe
] & PIPE_LEGACY_BLC_EVENT_STATUS
)
1502 if (blc_event
|| (iir
& I915_ASLE_INTERRUPT
))
1503 intel_opregion_asle_intr(dev
);
1505 /* With MSI, interrupts are only generated when iir
1506 * transitions from zero to nonzero. If another bit got
1507 * set while we were handling the existing iir bits, then
1508 * we would never get another interrupt.
1510 * This is fine on non-MSI as well, as if we hit this path
1511 * we avoid exiting the interrupt handler only to generate
1514 * Note that for MSI this could cause a stray interrupt report
1515 * if an interrupt landed in the time between writing IIR and
1516 * the posting read. This should be rare enough to never
1517 * trigger the 99% of 100,000 interrupts test for disabling
1526 static int i915_emit_irq(struct drm_device
* dev
)
1528 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1529 struct drm_i915_master_private
*master_priv
= dev
->primary
->master
->driver_priv
;
1531 i915_kernel_lost_context(dev
);
1533 DRM_DEBUG_DRIVER("\n");
1535 dev_priv
->counter
++;
1536 if (dev_priv
->counter
> 0x7FFFFFFFUL
)
1537 dev_priv
->counter
= 1;
1538 if (master_priv
->sarea_priv
)
1539 master_priv
->sarea_priv
->last_enqueue
= dev_priv
->counter
;
1541 if (BEGIN_LP_RING(4) == 0) {
1542 OUT_RING(MI_STORE_DWORD_INDEX
);
1543 OUT_RING(I915_BREADCRUMB_INDEX
<< MI_STORE_DWORD_INDEX_SHIFT
);
1544 OUT_RING(dev_priv
->counter
);
1545 OUT_RING(MI_USER_INTERRUPT
);
1549 return dev_priv
->counter
;
1552 static int i915_wait_irq(struct drm_device
* dev
, int irq_nr
)
1554 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1555 struct drm_i915_master_private
*master_priv
= dev
->primary
->master
->driver_priv
;
1557 struct intel_ring_buffer
*ring
= LP_RING(dev_priv
);
1559 DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr
,
1560 READ_BREADCRUMB(dev_priv
));
1562 if (READ_BREADCRUMB(dev_priv
) >= irq_nr
) {
1563 if (master_priv
->sarea_priv
)
1564 master_priv
->sarea_priv
->last_dispatch
= READ_BREADCRUMB(dev_priv
);
1568 if (master_priv
->sarea_priv
)
1569 master_priv
->sarea_priv
->perf_boxes
|= I915_BOX_WAIT
;
1571 if (ring
->irq_get(ring
)) {
1572 DRM_WAIT_ON(ret
, ring
->irq_queue
, 3 * DRM_HZ
,
1573 READ_BREADCRUMB(dev_priv
) >= irq_nr
);
1574 ring
->irq_put(ring
);
1575 } else if (wait_for(READ_BREADCRUMB(dev_priv
) >= irq_nr
, 3000))
1578 if (ret
== -EBUSY
) {
1579 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
1580 READ_BREADCRUMB(dev_priv
), (int)dev_priv
->counter
);
1586 /* Needs the lock as it touches the ring.
1588 int i915_irq_emit(struct drm_device
*dev
, void *data
,
1589 struct drm_file
*file_priv
)
1591 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1592 drm_i915_irq_emit_t
*emit
= data
;
1595 if (!dev_priv
|| !LP_RING(dev_priv
)->virtual_start
) {
1596 DRM_ERROR("called with no initialization\n");
1600 RING_LOCK_TEST_WITH_RETURN(dev
, file_priv
);
1602 mutex_lock(&dev
->struct_mutex
);
1603 result
= i915_emit_irq(dev
);
1604 mutex_unlock(&dev
->struct_mutex
);
1606 if (DRM_COPY_TO_USER(emit
->irq_seq
, &result
, sizeof(int))) {
1607 DRM_ERROR("copy_to_user\n");
1614 /* Doesn't need the hardware lock.
1616 int i915_irq_wait(struct drm_device
*dev
, void *data
,
1617 struct drm_file
*file_priv
)
1619 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1620 drm_i915_irq_wait_t
*irqwait
= data
;
1623 DRM_ERROR("called with no initialization\n");
1627 return i915_wait_irq(dev
, irqwait
->irq_seq
);
1630 /* Called from drm generic code, passed 'crtc' which
1631 * we use as a pipe index
1633 static int i915_enable_vblank(struct drm_device
*dev
, int pipe
)
1635 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1636 unsigned long irqflags
;
1638 if (!i915_pipe_enabled(dev
, pipe
))
1641 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
1642 if (INTEL_INFO(dev
)->gen
>= 4)
1643 i915_enable_pipestat(dev_priv
, pipe
,
1644 PIPE_START_VBLANK_INTERRUPT_ENABLE
);
1646 i915_enable_pipestat(dev_priv
, pipe
,
1647 PIPE_VBLANK_INTERRUPT_ENABLE
);
1649 /* maintain vblank delivery even in deep C-states */
1650 if (dev_priv
->info
->gen
== 3)
1651 I915_WRITE(INSTPM
, INSTPM_AGPBUSY_DIS
<< 16);
1652 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
1657 static int ironlake_enable_vblank(struct drm_device
*dev
, int pipe
)
1659 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1660 unsigned long irqflags
;
1662 if (!i915_pipe_enabled(dev
, pipe
))
1665 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
1666 ironlake_enable_display_irq(dev_priv
, (pipe
== 0) ?
1667 DE_PIPEA_VBLANK
: DE_PIPEB_VBLANK
);
1668 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
1673 static int ivybridge_enable_vblank(struct drm_device
*dev
, int pipe
)
1675 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1676 unsigned long irqflags
;
1678 if (!i915_pipe_enabled(dev
, pipe
))
1681 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
1682 ironlake_enable_display_irq(dev_priv
, (pipe
== 0) ?
1683 DE_PIPEA_VBLANK_IVB
: DE_PIPEB_VBLANK_IVB
);
1684 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
1689 static int valleyview_enable_vblank(struct drm_device
*dev
, int pipe
)
1691 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1692 unsigned long irqflags
;
1695 if (!i915_pipe_enabled(dev
, pipe
))
1698 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
1699 dpfl
= I915_READ(VLV_DPFLIPSTAT
);
1700 imr
= I915_READ(VLV_IMR
);
1702 dpfl
|= PIPEA_VBLANK_INT_EN
;
1703 imr
&= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT
;
1705 dpfl
|= PIPEA_VBLANK_INT_EN
;
1706 imr
&= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT
;
1708 I915_WRITE(VLV_DPFLIPSTAT
, dpfl
);
1709 I915_WRITE(VLV_IMR
, imr
);
1710 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
1715 /* Called from drm generic code, passed 'crtc' which
1716 * we use as a pipe index
1718 static void i915_disable_vblank(struct drm_device
*dev
, int pipe
)
1720 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1721 unsigned long irqflags
;
1723 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
1724 if (dev_priv
->info
->gen
== 3)
1726 INSTPM_AGPBUSY_DIS
<< 16 | INSTPM_AGPBUSY_DIS
);
1728 i915_disable_pipestat(dev_priv
, pipe
,
1729 PIPE_VBLANK_INTERRUPT_ENABLE
|
1730 PIPE_START_VBLANK_INTERRUPT_ENABLE
);
1731 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
1734 static void ironlake_disable_vblank(struct drm_device
*dev
, int pipe
)
1736 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1737 unsigned long irqflags
;
1739 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
1740 ironlake_disable_display_irq(dev_priv
, (pipe
== 0) ?
1741 DE_PIPEA_VBLANK
: DE_PIPEB_VBLANK
);
1742 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
1745 static void ivybridge_disable_vblank(struct drm_device
*dev
, int pipe
)
1747 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1748 unsigned long irqflags
;
1750 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
1751 ironlake_disable_display_irq(dev_priv
, (pipe
== 0) ?
1752 DE_PIPEA_VBLANK_IVB
: DE_PIPEB_VBLANK_IVB
);
1753 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
1756 static void valleyview_disable_vblank(struct drm_device
*dev
, int pipe
)
1758 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1759 unsigned long irqflags
;
1762 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
1763 dpfl
= I915_READ(VLV_DPFLIPSTAT
);
1764 imr
= I915_READ(VLV_IMR
);
1766 dpfl
&= ~PIPEA_VBLANK_INT_EN
;
1767 imr
|= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT
;
1769 dpfl
&= ~PIPEB_VBLANK_INT_EN
;
1770 imr
|= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT
;
1772 I915_WRITE(VLV_IMR
, imr
);
1773 I915_WRITE(VLV_DPFLIPSTAT
, dpfl
);
1774 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
1778 /* Set the vblank monitor pipe
1780 int i915_vblank_pipe_set(struct drm_device
*dev
, void *data
,
1781 struct drm_file
*file_priv
)
1783 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1786 DRM_ERROR("called with no initialization\n");
1793 int i915_vblank_pipe_get(struct drm_device
*dev
, void *data
,
1794 struct drm_file
*file_priv
)
1796 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1797 drm_i915_vblank_pipe_t
*pipe
= data
;
1800 DRM_ERROR("called with no initialization\n");
1804 pipe
->pipe
= DRM_I915_VBLANK_PIPE_A
| DRM_I915_VBLANK_PIPE_B
;
1810 * Schedule buffer swap at given vertical blank.
1812 int i915_vblank_swap(struct drm_device
*dev
, void *data
,
1813 struct drm_file
*file_priv
)
1815 /* The delayed swap mechanism was fundamentally racy, and has been
1816 * removed. The model was that the client requested a delayed flip/swap
1817 * from the kernel, then waited for vblank before continuing to perform
1818 * rendering. The problem was that the kernel might wake the client
1819 * up before it dispatched the vblank swap (since the lock has to be
1820 * held while touching the ringbuffer), in which case the client would
1821 * clear and start the next frame before the swap occurred, and
1822 * flicker would occur in addition to likely missing the vblank.
1824 * In the absence of this ioctl, userland falls back to a correct path
1825 * of waiting for a vblank, then dispatching the swap on its own.
1826 * Context switching to userland and back is plenty fast enough for
1827 * meeting the requirements of vblank swapping.
1833 ring_last_seqno(struct intel_ring_buffer
*ring
)
1835 return list_entry(ring
->request_list
.prev
,
1836 struct drm_i915_gem_request
, list
)->seqno
;
1839 static bool i915_hangcheck_ring_idle(struct intel_ring_buffer
*ring
, bool *err
)
1841 if (list_empty(&ring
->request_list
) ||
1842 i915_seqno_passed(ring
->get_seqno(ring
), ring_last_seqno(ring
))) {
1843 /* Issue a wake-up to catch stuck h/w. */
1844 if (ring
->waiting_seqno
&& waitqueue_active(&ring
->irq_queue
)) {
1845 DRM_ERROR("Hangcheck timer elapsed... %s idle [waiting on %d, at %d], missed IRQ?\n",
1847 ring
->waiting_seqno
,
1848 ring
->get_seqno(ring
));
1849 wake_up_all(&ring
->irq_queue
);
1857 static bool kick_ring(struct intel_ring_buffer
*ring
)
1859 struct drm_device
*dev
= ring
->dev
;
1860 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1861 u32 tmp
= I915_READ_CTL(ring
);
1862 if (tmp
& RING_WAIT
) {
1863 DRM_ERROR("Kicking stuck wait on %s\n",
1865 I915_WRITE_CTL(ring
, tmp
);
1871 static bool i915_hangcheck_hung(struct drm_device
*dev
)
1873 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1875 if (dev_priv
->hangcheck_count
++ > 1) {
1876 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
1877 i915_handle_error(dev
, true);
1879 if (!IS_GEN2(dev
)) {
1880 /* Is the chip hanging on a WAIT_FOR_EVENT?
1881 * If so we can simply poke the RB_WAIT bit
1882 * and break the hang. This should work on
1883 * all but the second generation chipsets.
1885 if (kick_ring(&dev_priv
->ring
[RCS
]))
1888 if (HAS_BSD(dev
) && kick_ring(&dev_priv
->ring
[VCS
]))
1891 if (HAS_BLT(dev
) && kick_ring(&dev_priv
->ring
[BCS
]))
1902 * This is called when the chip hasn't reported back with completed
1903 * batchbuffers in a long time. The first time this is called we simply record
1904 * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
1905 * again, we assume the chip is wedged and try to fix it.
1907 void i915_hangcheck_elapsed(unsigned long data
)
1909 struct drm_device
*dev
= (struct drm_device
*)data
;
1910 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1911 uint32_t acthd
, instdone
, instdone1
, acthd_bsd
, acthd_blt
;
1914 if (!i915_enable_hangcheck
)
1917 /* If all work is done then ACTHD clearly hasn't advanced. */
1918 if (i915_hangcheck_ring_idle(&dev_priv
->ring
[RCS
], &err
) &&
1919 i915_hangcheck_ring_idle(&dev_priv
->ring
[VCS
], &err
) &&
1920 i915_hangcheck_ring_idle(&dev_priv
->ring
[BCS
], &err
)) {
1922 if (i915_hangcheck_hung(dev
))
1928 dev_priv
->hangcheck_count
= 0;
1932 if (INTEL_INFO(dev
)->gen
< 4) {
1933 instdone
= I915_READ(INSTDONE
);
1936 instdone
= I915_READ(INSTDONE_I965
);
1937 instdone1
= I915_READ(INSTDONE1
);
1939 acthd
= intel_ring_get_active_head(&dev_priv
->ring
[RCS
]);
1940 acthd_bsd
= HAS_BSD(dev
) ?
1941 intel_ring_get_active_head(&dev_priv
->ring
[VCS
]) : 0;
1942 acthd_blt
= HAS_BLT(dev
) ?
1943 intel_ring_get_active_head(&dev_priv
->ring
[BCS
]) : 0;
1945 if (dev_priv
->last_acthd
== acthd
&&
1946 dev_priv
->last_acthd_bsd
== acthd_bsd
&&
1947 dev_priv
->last_acthd_blt
== acthd_blt
&&
1948 dev_priv
->last_instdone
== instdone
&&
1949 dev_priv
->last_instdone1
== instdone1
) {
1950 if (i915_hangcheck_hung(dev
))
1953 dev_priv
->hangcheck_count
= 0;
1955 dev_priv
->last_acthd
= acthd
;
1956 dev_priv
->last_acthd_bsd
= acthd_bsd
;
1957 dev_priv
->last_acthd_blt
= acthd_blt
;
1958 dev_priv
->last_instdone
= instdone
;
1959 dev_priv
->last_instdone1
= instdone1
;
1963 /* Reset timer case chip hangs without another request being added */
1964 mod_timer(&dev_priv
->hangcheck_timer
,
1965 jiffies
+ msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD
));
1970 static void ironlake_irq_preinstall(struct drm_device
*dev
)
1972 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1974 atomic_set(&dev_priv
->irq_received
, 0);
1976 INIT_WORK(&dev_priv
->hotplug_work
, i915_hotplug_work_func
);
1977 INIT_WORK(&dev_priv
->error_work
, i915_error_work_func
);
1978 if (IS_GEN6(dev
) || IS_IVYBRIDGE(dev
))
1979 INIT_WORK(&dev_priv
->rps_work
, gen6_pm_rps_work
);
1981 I915_WRITE(HWSTAM
, 0xeffe);
1983 /* XXX hotplug from PCH */
1985 I915_WRITE(DEIMR
, 0xffffffff);
1986 I915_WRITE(DEIER
, 0x0);
1987 POSTING_READ(DEIER
);
1990 I915_WRITE(GTIMR
, 0xffffffff);
1991 I915_WRITE(GTIER
, 0x0);
1992 POSTING_READ(GTIER
);
1994 /* south display irq */
1995 I915_WRITE(SDEIMR
, 0xffffffff);
1996 I915_WRITE(SDEIER
, 0x0);
1997 POSTING_READ(SDEIER
);
2000 static void valleyview_irq_preinstall(struct drm_device
*dev
)
2002 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2005 atomic_set(&dev_priv
->irq_received
, 0);
2007 INIT_WORK(&dev_priv
->hotplug_work
, i915_hotplug_work_func
);
2008 INIT_WORK(&dev_priv
->error_work
, i915_error_work_func
);
2011 I915_WRITE(VLV_IMR
, 0);
2012 I915_WRITE(RING_IMR(RENDER_RING_BASE
), 0);
2013 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE
), 0);
2014 I915_WRITE(RING_IMR(BLT_RING_BASE
), 0);
2017 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
2018 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
2019 I915_WRITE(GTIMR
, 0xffffffff);
2020 I915_WRITE(GTIER
, 0x0);
2021 POSTING_READ(GTIER
);
2023 I915_WRITE(DPINVGTT
, 0xff);
2025 I915_WRITE(PORT_HOTPLUG_EN
, 0);
2026 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
2028 I915_WRITE(PIPESTAT(pipe
), 0xffff);
2029 I915_WRITE(VLV_IIR
, 0xffffffff);
2030 I915_WRITE(VLV_IMR
, 0xffffffff);
2031 I915_WRITE(VLV_IER
, 0x0);
2032 POSTING_READ(VLV_IER
);
2036 * Enable digital hotplug on the PCH, and configure the DP short pulse
2037 * duration to 2ms (which is the minimum in the Display Port spec)
2039 * This register is the same on all known PCH chips.
2042 static void ironlake_enable_pch_hotplug(struct drm_device
*dev
)
2044 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2047 hotplug
= I915_READ(PCH_PORT_HOTPLUG
);
2048 hotplug
&= ~(PORTD_PULSE_DURATION_MASK
|PORTC_PULSE_DURATION_MASK
|PORTB_PULSE_DURATION_MASK
);
2049 hotplug
|= PORTD_HOTPLUG_ENABLE
| PORTD_PULSE_DURATION_2ms
;
2050 hotplug
|= PORTC_HOTPLUG_ENABLE
| PORTC_PULSE_DURATION_2ms
;
2051 hotplug
|= PORTB_HOTPLUG_ENABLE
| PORTB_PULSE_DURATION_2ms
;
2052 I915_WRITE(PCH_PORT_HOTPLUG
, hotplug
);
2055 static int ironlake_irq_postinstall(struct drm_device
*dev
)
2057 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2058 /* enable kind of interrupts always enabled */
2059 u32 display_mask
= DE_MASTER_IRQ_CONTROL
| DE_GSE
| DE_PCH_EVENT
|
2060 DE_PLANEA_FLIP_DONE
| DE_PLANEB_FLIP_DONE
;
2064 DRM_INIT_WAITQUEUE(&dev_priv
->ring
[RCS
].irq_queue
);
2066 DRM_INIT_WAITQUEUE(&dev_priv
->ring
[VCS
].irq_queue
);
2068 DRM_INIT_WAITQUEUE(&dev_priv
->ring
[BCS
].irq_queue
);
2070 dev_priv
->vblank_pipe
= DRM_I915_VBLANK_PIPE_A
| DRM_I915_VBLANK_PIPE_B
;
2071 dev_priv
->irq_mask
= ~display_mask
;
2073 /* should always can generate irq */
2074 I915_WRITE(DEIIR
, I915_READ(DEIIR
));
2075 I915_WRITE(DEIMR
, dev_priv
->irq_mask
);
2076 I915_WRITE(DEIER
, display_mask
| DE_PIPEA_VBLANK
| DE_PIPEB_VBLANK
);
2077 POSTING_READ(DEIER
);
2079 dev_priv
->gt_irq_mask
= ~0;
2081 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
2082 I915_WRITE(GTIMR
, dev_priv
->gt_irq_mask
);
2087 GEN6_BSD_USER_INTERRUPT
|
2088 GEN6_BLITTER_USER_INTERRUPT
;
2093 GT_BSD_USER_INTERRUPT
;
2094 I915_WRITE(GTIER
, render_irqs
);
2095 POSTING_READ(GTIER
);
2097 if (HAS_PCH_CPT(dev
)) {
2098 hotplug_mask
= (SDE_CRT_HOTPLUG_CPT
|
2099 SDE_PORTB_HOTPLUG_CPT
|
2100 SDE_PORTC_HOTPLUG_CPT
|
2101 SDE_PORTD_HOTPLUG_CPT
);
2103 hotplug_mask
= (SDE_CRT_HOTPLUG
|
2110 dev_priv
->pch_irq_mask
= ~hotplug_mask
;
2112 I915_WRITE(SDEIIR
, I915_READ(SDEIIR
));
2113 I915_WRITE(SDEIMR
, dev_priv
->pch_irq_mask
);
2114 I915_WRITE(SDEIER
, hotplug_mask
);
2115 POSTING_READ(SDEIER
);
2117 ironlake_enable_pch_hotplug(dev
);
2119 if (IS_IRONLAKE_M(dev
)) {
2120 /* Clear & enable PCU event interrupts */
2121 I915_WRITE(DEIIR
, DE_PCU_EVENT
);
2122 I915_WRITE(DEIER
, I915_READ(DEIER
) | DE_PCU_EVENT
);
2123 ironlake_enable_display_irq(dev_priv
, DE_PCU_EVENT
);
2129 static int ivybridge_irq_postinstall(struct drm_device
*dev
)
2131 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2132 /* enable kind of interrupts always enabled */
2133 u32 display_mask
= DE_MASTER_IRQ_CONTROL
| DE_GSE_IVB
|
2134 DE_PCH_EVENT_IVB
| DE_PLANEA_FLIP_DONE_IVB
|
2135 DE_PLANEB_FLIP_DONE_IVB
;
2139 DRM_INIT_WAITQUEUE(&dev_priv
->ring
[RCS
].irq_queue
);
2141 DRM_INIT_WAITQUEUE(&dev_priv
->ring
[VCS
].irq_queue
);
2143 DRM_INIT_WAITQUEUE(&dev_priv
->ring
[BCS
].irq_queue
);
2145 dev_priv
->vblank_pipe
= DRM_I915_VBLANK_PIPE_A
| DRM_I915_VBLANK_PIPE_B
;
2146 dev_priv
->irq_mask
= ~display_mask
;
2148 /* should always can generate irq */
2149 I915_WRITE(DEIIR
, I915_READ(DEIIR
));
2150 I915_WRITE(DEIMR
, dev_priv
->irq_mask
);
2151 I915_WRITE(DEIER
, display_mask
| DE_PIPEA_VBLANK_IVB
|
2152 DE_PIPEB_VBLANK_IVB
);
2153 POSTING_READ(DEIER
);
2155 dev_priv
->gt_irq_mask
= ~0;
2157 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
2158 I915_WRITE(GTIMR
, dev_priv
->gt_irq_mask
);
2160 render_irqs
= GT_USER_INTERRUPT
| GEN6_BSD_USER_INTERRUPT
|
2161 GEN6_BLITTER_USER_INTERRUPT
;
2162 I915_WRITE(GTIER
, render_irqs
);
2163 POSTING_READ(GTIER
);
2165 hotplug_mask
= (SDE_CRT_HOTPLUG_CPT
|
2166 SDE_PORTB_HOTPLUG_CPT
|
2167 SDE_PORTC_HOTPLUG_CPT
|
2168 SDE_PORTD_HOTPLUG_CPT
);
2169 dev_priv
->pch_irq_mask
= ~hotplug_mask
;
2171 I915_WRITE(SDEIIR
, I915_READ(SDEIIR
));
2172 I915_WRITE(SDEIMR
, dev_priv
->pch_irq_mask
);
2173 I915_WRITE(SDEIER
, hotplug_mask
);
2174 POSTING_READ(SDEIER
);
2176 ironlake_enable_pch_hotplug(dev
);
2181 static int valleyview_irq_postinstall(struct drm_device
*dev
)
2183 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2186 u32 hotplug_en
= I915_READ(PORT_HOTPLUG_EN
);
2189 enable_mask
= I915_DISPLAY_PORT_INTERRUPT
;
2190 enable_mask
|= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT
|
2191 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT
;
2193 dev_priv
->irq_mask
= ~enable_mask
;
2196 DRM_INIT_WAITQUEUE(&dev_priv
->ring
[RCS
].irq_queue
);
2197 DRM_INIT_WAITQUEUE(&dev_priv
->ring
[VCS
].irq_queue
);
2198 DRM_INIT_WAITQUEUE(&dev_priv
->ring
[BCS
].irq_queue
);
2200 dev_priv
->pipestat
[0] = 0;
2201 dev_priv
->pipestat
[1] = 0;
2203 dev_priv
->vblank_pipe
= DRM_I915_VBLANK_PIPE_A
| DRM_I915_VBLANK_PIPE_B
;
2205 /* Hack for broken MSIs on VLV */
2206 pci_write_config_dword(dev_priv
->dev
->pdev
, 0x94, 0xfee00000);
2207 pci_read_config_word(dev
->pdev
, 0x98, &msid
);
2208 msid
&= 0xff; /* mask out delivery bits */
2210 pci_write_config_word(dev_priv
->dev
->pdev
, 0x98, msid
);
2212 I915_WRITE(VLV_IMR
, dev_priv
->irq_mask
);
2213 I915_WRITE(VLV_IER
, enable_mask
);
2214 I915_WRITE(VLV_IIR
, 0xffffffff);
2215 I915_WRITE(PIPESTAT(0), 0xffff);
2216 I915_WRITE(PIPESTAT(1), 0xffff);
2217 POSTING_READ(VLV_IER
);
2219 I915_WRITE(VLV_IIR
, 0xffffffff);
2220 I915_WRITE(VLV_IIR
, 0xffffffff);
2222 render_irqs
= GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT
|
2223 GT_GEN6_BLT_CS_ERROR_INTERRUPT
|
2224 GT_GEN6_BLT_USER_INTERRUPT
|
2225 GT_GEN6_BSD_USER_INTERRUPT
|
2226 GT_GEN6_BSD_CS_ERROR_INTERRUPT
|
2227 GT_GEN7_L3_PARITY_ERROR_INTERRUPT
|
2229 GT_RENDER_CS_ERROR_INTERRUPT
|
2233 dev_priv
->gt_irq_mask
= ~render_irqs
;
2235 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
2236 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
2237 I915_WRITE(GTIMR
, 0);
2238 I915_WRITE(GTIER
, render_irqs
);
2239 POSTING_READ(GTIER
);
2241 /* ack & enable invalid PTE error interrupts */
2242 #if 0 /* FIXME: add support to irq handler for checking these bits */
2243 I915_WRITE(DPINVGTT
, DPINVGTT_STATUS_MASK
);
2244 I915_WRITE(DPINVGTT
, DPINVGTT_EN_MASK
);
2247 I915_WRITE(VLV_MASTER_IER
, MASTER_INTERRUPT_ENABLE
);
2248 #if 0 /* FIXME: check register definitions; some have moved */
2249 /* Note HDMI and DP share bits */
2250 if (dev_priv
->hotplug_supported_mask
& HDMIB_HOTPLUG_INT_STATUS
)
2251 hotplug_en
|= HDMIB_HOTPLUG_INT_EN
;
2252 if (dev_priv
->hotplug_supported_mask
& HDMIC_HOTPLUG_INT_STATUS
)
2253 hotplug_en
|= HDMIC_HOTPLUG_INT_EN
;
2254 if (dev_priv
->hotplug_supported_mask
& HDMID_HOTPLUG_INT_STATUS
)
2255 hotplug_en
|= HDMID_HOTPLUG_INT_EN
;
2256 if (dev_priv
->hotplug_supported_mask
& SDVOC_HOTPLUG_INT_STATUS
)
2257 hotplug_en
|= SDVOC_HOTPLUG_INT_EN
;
2258 if (dev_priv
->hotplug_supported_mask
& SDVOB_HOTPLUG_INT_STATUS
)
2259 hotplug_en
|= SDVOB_HOTPLUG_INT_EN
;
2260 if (dev_priv
->hotplug_supported_mask
& CRT_HOTPLUG_INT_STATUS
) {
2261 hotplug_en
|= CRT_HOTPLUG_INT_EN
;
2262 hotplug_en
|= CRT_HOTPLUG_VOLTAGE_COMPARE_50
;
2266 I915_WRITE(PORT_HOTPLUG_EN
, hotplug_en
);
2271 static void i915_driver_irq_preinstall(struct drm_device
* dev
)
2273 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2276 atomic_set(&dev_priv
->irq_received
, 0);
2278 INIT_WORK(&dev_priv
->hotplug_work
, i915_hotplug_work_func
);
2279 INIT_WORK(&dev_priv
->error_work
, i915_error_work_func
);
2281 if (I915_HAS_HOTPLUG(dev
)) {
2282 I915_WRITE(PORT_HOTPLUG_EN
, 0);
2283 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
2286 I915_WRITE(HWSTAM
, 0xeffe);
2288 I915_WRITE(PIPESTAT(pipe
), 0);
2289 I915_WRITE(IMR
, 0xffffffff);
2290 I915_WRITE(IER
, 0x0);
2295 * Must be called after intel_modeset_init or hotplug interrupts won't be
2296 * enabled correctly.
2298 static int i915_driver_irq_postinstall(struct drm_device
*dev
)
2300 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2301 u32 enable_mask
= I915_INTERRUPT_ENABLE_FIX
| I915_INTERRUPT_ENABLE_VAR
;
2304 dev_priv
->vblank_pipe
= DRM_I915_VBLANK_PIPE_A
| DRM_I915_VBLANK_PIPE_B
;
2306 /* Unmask the interrupts that we always want on. */
2307 dev_priv
->irq_mask
= ~I915_INTERRUPT_ENABLE_FIX
;
2309 dev_priv
->pipestat
[0] = 0;
2310 dev_priv
->pipestat
[1] = 0;
2312 if (I915_HAS_HOTPLUG(dev
)) {
2313 /* Enable in IER... */
2314 enable_mask
|= I915_DISPLAY_PORT_INTERRUPT
;
2315 /* and unmask in IMR */
2316 dev_priv
->irq_mask
&= ~I915_DISPLAY_PORT_INTERRUPT
;
2320 * Enable some error detection, note the instruction error mask
2321 * bit is reserved, so we leave it masked.
2324 error_mask
= ~(GM45_ERROR_PAGE_TABLE
|
2325 GM45_ERROR_MEM_PRIV
|
2326 GM45_ERROR_CP_PRIV
|
2327 I915_ERROR_MEMORY_REFRESH
);
2329 error_mask
= ~(I915_ERROR_PAGE_TABLE
|
2330 I915_ERROR_MEMORY_REFRESH
);
2332 I915_WRITE(EMR
, error_mask
);
2334 I915_WRITE(IMR
, dev_priv
->irq_mask
);
2335 I915_WRITE(IER
, enable_mask
);
2338 if (I915_HAS_HOTPLUG(dev
)) {
2339 u32 hotplug_en
= I915_READ(PORT_HOTPLUG_EN
);
2341 /* Note HDMI and DP share bits */
2342 if (dev_priv
->hotplug_supported_mask
& HDMIB_HOTPLUG_INT_STATUS
)
2343 hotplug_en
|= HDMIB_HOTPLUG_INT_EN
;
2344 if (dev_priv
->hotplug_supported_mask
& HDMIC_HOTPLUG_INT_STATUS
)
2345 hotplug_en
|= HDMIC_HOTPLUG_INT_EN
;
2346 if (dev_priv
->hotplug_supported_mask
& HDMID_HOTPLUG_INT_STATUS
)
2347 hotplug_en
|= HDMID_HOTPLUG_INT_EN
;
2348 if (dev_priv
->hotplug_supported_mask
& SDVOC_HOTPLUG_INT_STATUS
)
2349 hotplug_en
|= SDVOC_HOTPLUG_INT_EN
;
2350 if (dev_priv
->hotplug_supported_mask
& SDVOB_HOTPLUG_INT_STATUS
)
2351 hotplug_en
|= SDVOB_HOTPLUG_INT_EN
;
2352 if (dev_priv
->hotplug_supported_mask
& CRT_HOTPLUG_INT_STATUS
) {
2353 hotplug_en
|= CRT_HOTPLUG_INT_EN
;
2355 /* Programming the CRT detection parameters tends
2356 to generate a spurious hotplug event about three
2357 seconds later. So just do it once.
2360 hotplug_en
|= CRT_HOTPLUG_ACTIVATION_PERIOD_64
;
2361 hotplug_en
|= CRT_HOTPLUG_VOLTAGE_COMPARE_50
;
2364 /* Ignore TV since it's buggy */
2366 I915_WRITE(PORT_HOTPLUG_EN
, hotplug_en
);
2369 intel_opregion_enable_asle(dev
);
2374 static void valleyview_irq_uninstall(struct drm_device
*dev
)
2376 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2382 dev_priv
->vblank_pipe
= 0;
2385 I915_WRITE(PIPESTAT(pipe
), 0xffff);
2387 I915_WRITE(HWSTAM
, 0xffffffff);
2388 I915_WRITE(PORT_HOTPLUG_EN
, 0);
2389 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
2391 I915_WRITE(PIPESTAT(pipe
), 0xffff);
2392 I915_WRITE(VLV_IIR
, 0xffffffff);
2393 I915_WRITE(VLV_IMR
, 0xffffffff);
2394 I915_WRITE(VLV_IER
, 0x0);
2395 POSTING_READ(VLV_IER
);
2398 static void ironlake_irq_uninstall(struct drm_device
*dev
)
2400 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2405 dev_priv
->vblank_pipe
= 0;
2407 I915_WRITE(HWSTAM
, 0xffffffff);
2409 I915_WRITE(DEIMR
, 0xffffffff);
2410 I915_WRITE(DEIER
, 0x0);
2411 I915_WRITE(DEIIR
, I915_READ(DEIIR
));
2413 I915_WRITE(GTIMR
, 0xffffffff);
2414 I915_WRITE(GTIER
, 0x0);
2415 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
2417 I915_WRITE(SDEIMR
, 0xffffffff);
2418 I915_WRITE(SDEIER
, 0x0);
2419 I915_WRITE(SDEIIR
, I915_READ(SDEIIR
));
2422 static void i915_driver_irq_uninstall(struct drm_device
* dev
)
2424 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2430 dev_priv
->vblank_pipe
= 0;
2432 if (I915_HAS_HOTPLUG(dev
)) {
2433 I915_WRITE(PORT_HOTPLUG_EN
, 0);
2434 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
2437 I915_WRITE(HWSTAM
, 0xffffffff);
2439 I915_WRITE(PIPESTAT(pipe
), 0);
2440 I915_WRITE(IMR
, 0xffffffff);
2441 I915_WRITE(IER
, 0x0);
2444 I915_WRITE(PIPESTAT(pipe
),
2445 I915_READ(PIPESTAT(pipe
)) & 0x8000ffff);
2446 I915_WRITE(IIR
, I915_READ(IIR
));
2449 static void i8xx_irq_preinstall(struct drm_device
* dev
)
2451 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2454 atomic_set(&dev_priv
->irq_received
, 0);
2457 I915_WRITE(PIPESTAT(pipe
), 0);
2458 I915_WRITE16(IMR
, 0xffff);
2459 I915_WRITE16(IER
, 0x0);
2460 POSTING_READ16(IER
);
2463 static int i8xx_irq_postinstall(struct drm_device
*dev
)
2465 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2467 dev_priv
->vblank_pipe
= DRM_I915_VBLANK_PIPE_A
| DRM_I915_VBLANK_PIPE_B
;
2469 dev_priv
->pipestat
[0] = 0;
2470 dev_priv
->pipestat
[1] = 0;
2473 ~(I915_ERROR_PAGE_TABLE
| I915_ERROR_MEMORY_REFRESH
));
2475 /* Unmask the interrupts that we always want on. */
2476 dev_priv
->irq_mask
=
2477 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
2478 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
2479 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
2480 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
|
2481 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
);
2482 I915_WRITE16(IMR
, dev_priv
->irq_mask
);
2485 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
2486 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
2487 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
|
2488 I915_USER_INTERRUPT
);
2489 POSTING_READ16(IER
);
2494 static irqreturn_t
i8xx_irq_handler(DRM_IRQ_ARGS
)
2496 struct drm_device
*dev
= (struct drm_device
*) arg
;
2497 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2498 struct drm_i915_master_private
*master_priv
;
2501 unsigned long irqflags
;
2505 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
2506 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
;
2508 atomic_inc(&dev_priv
->irq_received
);
2510 iir
= I915_READ16(IIR
);
2514 while (iir
& ~flip_mask
) {
2515 /* Can't rely on pipestat interrupt bit in iir as it might
2516 * have been cleared after the pipestat interrupt was received.
2517 * It doesn't set the bit in iir again, but it still produces
2518 * interrupts (for non-MSI).
2520 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2521 if (iir
& I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
)
2522 i915_handle_error(dev
, false);
2524 for_each_pipe(pipe
) {
2525 int reg
= PIPESTAT(pipe
);
2526 pipe_stats
[pipe
] = I915_READ(reg
);
2529 * Clear the PIPE*STAT regs before the IIR
2531 if (pipe_stats
[pipe
] & 0x8000ffff) {
2532 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
)
2533 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2535 I915_WRITE(reg
, pipe_stats
[pipe
]);
2539 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2541 I915_WRITE16(IIR
, iir
& ~flip_mask
);
2542 new_iir
= I915_READ16(IIR
); /* Flush posted writes */
2544 if (dev
->primary
->master
) {
2545 master_priv
= dev
->primary
->master
->driver_priv
;
2546 if (master_priv
->sarea_priv
)
2547 master_priv
->sarea_priv
->last_dispatch
=
2548 READ_BREADCRUMB(dev_priv
);
2551 if (iir
& I915_USER_INTERRUPT
)
2552 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
2554 if (pipe_stats
[0] & PIPE_VBLANK_INTERRUPT_STATUS
&&
2555 drm_handle_vblank(dev
, 0)) {
2556 if (iir
& I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
) {
2557 intel_prepare_page_flip(dev
, 0);
2558 intel_finish_page_flip(dev
, 0);
2559 flip_mask
&= ~I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
;
2563 if (pipe_stats
[1] & PIPE_VBLANK_INTERRUPT_STATUS
&&
2564 drm_handle_vblank(dev
, 1)) {
2565 if (iir
& I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
) {
2566 intel_prepare_page_flip(dev
, 1);
2567 intel_finish_page_flip(dev
, 1);
2568 flip_mask
&= ~I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
;
2578 static void i8xx_irq_uninstall(struct drm_device
* dev
)
2580 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2583 dev_priv
->vblank_pipe
= 0;
2585 for_each_pipe(pipe
) {
2586 /* Clear enable bits; then clear status bits */
2587 I915_WRITE(PIPESTAT(pipe
), 0);
2588 I915_WRITE(PIPESTAT(pipe
), I915_READ(PIPESTAT(pipe
)));
2590 I915_WRITE16(IMR
, 0xffff);
2591 I915_WRITE16(IER
, 0x0);
2592 I915_WRITE16(IIR
, I915_READ16(IIR
));
2595 void intel_irq_init(struct drm_device
*dev
)
2597 dev
->driver
->get_vblank_counter
= i915_get_vblank_counter
;
2598 dev
->max_vblank_count
= 0xffffff; /* only 24 bits of frame count */
2599 if (IS_G4X(dev
) || IS_GEN5(dev
) || IS_GEN6(dev
) || IS_IVYBRIDGE(dev
) ||
2600 IS_VALLEYVIEW(dev
)) {
2601 dev
->max_vblank_count
= 0xffffffff; /* full 32 bit counter */
2602 dev
->driver
->get_vblank_counter
= gm45_get_vblank_counter
;
2605 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
2606 dev
->driver
->get_vblank_timestamp
= i915_get_vblank_timestamp
;
2608 dev
->driver
->get_vblank_timestamp
= NULL
;
2609 dev
->driver
->get_scanout_position
= i915_get_crtc_scanoutpos
;
2611 if (IS_VALLEYVIEW(dev
)) {
2612 dev
->driver
->irq_handler
= valleyview_irq_handler
;
2613 dev
->driver
->irq_preinstall
= valleyview_irq_preinstall
;
2614 dev
->driver
->irq_postinstall
= valleyview_irq_postinstall
;
2615 dev
->driver
->irq_uninstall
= valleyview_irq_uninstall
;
2616 dev
->driver
->enable_vblank
= valleyview_enable_vblank
;
2617 dev
->driver
->disable_vblank
= valleyview_disable_vblank
;
2618 } else if (IS_IVYBRIDGE(dev
)) {
2619 /* Share pre & uninstall handlers with ILK/SNB */
2620 dev
->driver
->irq_handler
= ivybridge_irq_handler
;
2621 dev
->driver
->irq_preinstall
= ironlake_irq_preinstall
;
2622 dev
->driver
->irq_postinstall
= ivybridge_irq_postinstall
;
2623 dev
->driver
->irq_uninstall
= ironlake_irq_uninstall
;
2624 dev
->driver
->enable_vblank
= ivybridge_enable_vblank
;
2625 dev
->driver
->disable_vblank
= ivybridge_disable_vblank
;
2626 } else if (HAS_PCH_SPLIT(dev
)) {
2627 dev
->driver
->irq_handler
= ironlake_irq_handler
;
2628 dev
->driver
->irq_preinstall
= ironlake_irq_preinstall
;
2629 dev
->driver
->irq_postinstall
= ironlake_irq_postinstall
;
2630 dev
->driver
->irq_uninstall
= ironlake_irq_uninstall
;
2631 dev
->driver
->enable_vblank
= ironlake_enable_vblank
;
2632 dev
->driver
->disable_vblank
= ironlake_disable_vblank
;
2634 if (INTEL_INFO(dev
)->gen
== 2) {
2635 dev
->driver
->irq_preinstall
= i8xx_irq_preinstall
;
2636 dev
->driver
->irq_postinstall
= i8xx_irq_postinstall
;
2637 dev
->driver
->irq_handler
= i8xx_irq_handler
;
2638 dev
->driver
->irq_uninstall
= i8xx_irq_uninstall
;
2640 dev
->driver
->irq_preinstall
= i915_driver_irq_preinstall
;
2641 dev
->driver
->irq_postinstall
= i915_driver_irq_postinstall
;
2642 dev
->driver
->irq_uninstall
= i915_driver_irq_uninstall
;
2643 dev
->driver
->irq_handler
= i915_driver_irq_handler
;
2645 dev
->driver
->enable_vblank
= i915_enable_vblank
;
2646 dev
->driver
->disable_vblank
= i915_disable_vblank
;