1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
37 #include "i915_trace.h"
38 #include "intel_drv.h"
40 /* For display hotplug interrupt */
42 ironlake_enable_display_irq(drm_i915_private_t
*dev_priv
, u32 mask
)
44 if ((dev_priv
->irq_mask
& mask
) != 0) {
45 dev_priv
->irq_mask
&= ~mask
;
46 I915_WRITE(DEIMR
, dev_priv
->irq_mask
);
52 ironlake_disable_display_irq(drm_i915_private_t
*dev_priv
, u32 mask
)
54 if ((dev_priv
->irq_mask
& mask
) != mask
) {
55 dev_priv
->irq_mask
|= mask
;
56 I915_WRITE(DEIMR
, dev_priv
->irq_mask
);
62 i915_enable_pipestat(drm_i915_private_t
*dev_priv
, int pipe
, u32 mask
)
64 if ((dev_priv
->pipestat
[pipe
] & mask
) != mask
) {
65 u32 reg
= PIPESTAT(pipe
);
67 dev_priv
->pipestat
[pipe
] |= mask
;
68 /* Enable the interrupt, clear any pending status */
69 I915_WRITE(reg
, dev_priv
->pipestat
[pipe
] | (mask
>> 16));
75 i915_disable_pipestat(drm_i915_private_t
*dev_priv
, int pipe
, u32 mask
)
77 if ((dev_priv
->pipestat
[pipe
] & mask
) != 0) {
78 u32 reg
= PIPESTAT(pipe
);
80 dev_priv
->pipestat
[pipe
] &= ~mask
;
81 I915_WRITE(reg
, dev_priv
->pipestat
[pipe
]);
87 * intel_enable_asle - enable ASLE interrupt for OpRegion
89 void intel_enable_asle(struct drm_device
*dev
)
91 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
92 unsigned long irqflags
;
94 /* FIXME: opregion/asle for VLV */
95 if (IS_VALLEYVIEW(dev
))
98 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
100 if (HAS_PCH_SPLIT(dev
))
101 ironlake_enable_display_irq(dev_priv
, DE_GSE
);
103 i915_enable_pipestat(dev_priv
, 1,
104 PIPE_LEGACY_BLC_EVENT_ENABLE
);
105 if (INTEL_INFO(dev
)->gen
>= 4)
106 i915_enable_pipestat(dev_priv
, 0,
107 PIPE_LEGACY_BLC_EVENT_ENABLE
);
110 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
114 * i915_pipe_enabled - check if a pipe is enabled
116 * @pipe: pipe to check
118 * Reading certain registers when the pipe is disabled can hang the chip.
119 * Use this routine to make sure the PLL is running and the pipe is active
120 * before reading such registers if unsure.
123 i915_pipe_enabled(struct drm_device
*dev
, int pipe
)
125 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
126 return I915_READ(PIPECONF(pipe
)) & PIPECONF_ENABLE
;
129 /* Called from drm generic code, passed a 'crtc', which
130 * we use as a pipe index
132 static u32
i915_get_vblank_counter(struct drm_device
*dev
, int pipe
)
134 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
135 unsigned long high_frame
;
136 unsigned long low_frame
;
137 u32 high1
, high2
, low
;
139 if (!i915_pipe_enabled(dev
, pipe
)) {
140 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
141 "pipe %c\n", pipe_name(pipe
));
145 high_frame
= PIPEFRAME(pipe
);
146 low_frame
= PIPEFRAMEPIXEL(pipe
);
149 * High & low register fields aren't synchronized, so make sure
150 * we get a low value that's stable across two reads of the high
154 high1
= I915_READ(high_frame
) & PIPE_FRAME_HIGH_MASK
;
155 low
= I915_READ(low_frame
) & PIPE_FRAME_LOW_MASK
;
156 high2
= I915_READ(high_frame
) & PIPE_FRAME_HIGH_MASK
;
157 } while (high1
!= high2
);
159 high1
>>= PIPE_FRAME_HIGH_SHIFT
;
160 low
>>= PIPE_FRAME_LOW_SHIFT
;
161 return (high1
<< 8) | low
;
164 static u32
gm45_get_vblank_counter(struct drm_device
*dev
, int pipe
)
166 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
167 int reg
= PIPE_FRMCOUNT_GM45(pipe
);
169 if (!i915_pipe_enabled(dev
, pipe
)) {
170 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
171 "pipe %c\n", pipe_name(pipe
));
175 return I915_READ(reg
);
178 static int i915_get_crtc_scanoutpos(struct drm_device
*dev
, int pipe
,
179 int *vpos
, int *hpos
)
181 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
182 u32 vbl
= 0, position
= 0;
183 int vbl_start
, vbl_end
, htotal
, vtotal
;
187 if (!i915_pipe_enabled(dev
, pipe
)) {
188 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
189 "pipe %c\n", pipe_name(pipe
));
194 vtotal
= 1 + ((I915_READ(VTOTAL(pipe
)) >> 16) & 0x1fff);
196 if (INTEL_INFO(dev
)->gen
>= 4) {
197 /* No obvious pixelcount register. Only query vertical
198 * scanout position from Display scan line register.
200 position
= I915_READ(PIPEDSL(pipe
));
202 /* Decode into vertical scanout position. Don't have
203 * horizontal scanout position.
205 *vpos
= position
& 0x1fff;
208 /* Have access to pixelcount since start of frame.
209 * We can split this into vertical and horizontal
212 position
= (I915_READ(PIPEFRAMEPIXEL(pipe
)) & PIPE_PIXEL_MASK
) >> PIPE_PIXEL_SHIFT
;
214 htotal
= 1 + ((I915_READ(HTOTAL(pipe
)) >> 16) & 0x1fff);
215 *vpos
= position
/ htotal
;
216 *hpos
= position
- (*vpos
* htotal
);
219 /* Query vblank area. */
220 vbl
= I915_READ(VBLANK(pipe
));
222 /* Test position against vblank region. */
223 vbl_start
= vbl
& 0x1fff;
224 vbl_end
= (vbl
>> 16) & 0x1fff;
226 if ((*vpos
< vbl_start
) || (*vpos
> vbl_end
))
229 /* Inside "upper part" of vblank area? Apply corrective offset: */
230 if (in_vbl
&& (*vpos
>= vbl_start
))
231 *vpos
= *vpos
- vtotal
;
233 /* Readouts valid? */
235 ret
|= DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_ACCURATE
;
239 ret
|= DRM_SCANOUTPOS_INVBL
;
244 static int i915_get_vblank_timestamp(struct drm_device
*dev
, int pipe
,
246 struct timeval
*vblank_time
,
249 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
250 struct drm_crtc
*crtc
;
252 if (pipe
< 0 || pipe
>= dev_priv
->num_pipe
) {
253 DRM_ERROR("Invalid crtc %d\n", pipe
);
257 /* Get drm_crtc to timestamp: */
258 crtc
= intel_get_crtc_for_pipe(dev
, pipe
);
260 DRM_ERROR("Invalid crtc %d\n", pipe
);
264 if (!crtc
->enabled
) {
265 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe
);
269 /* Helper routine in DRM core does all the work: */
270 return drm_calc_vbltimestamp_from_scanoutpos(dev
, pipe
, max_error
,
276 * Handle hotplug events outside the interrupt handler proper.
278 static void i915_hotplug_work_func(struct work_struct
*work
)
280 drm_i915_private_t
*dev_priv
= container_of(work
, drm_i915_private_t
,
282 struct drm_device
*dev
= dev_priv
->dev
;
283 struct drm_mode_config
*mode_config
= &dev
->mode_config
;
284 struct intel_encoder
*encoder
;
286 mutex_lock(&mode_config
->mutex
);
287 DRM_DEBUG_KMS("running encoder hotplug functions\n");
289 list_for_each_entry(encoder
, &mode_config
->encoder_list
, base
.head
)
290 if (encoder
->hot_plug
)
291 encoder
->hot_plug(encoder
);
293 mutex_unlock(&mode_config
->mutex
);
295 /* Just fire off a uevent and let userspace tell us what to do */
296 drm_helper_hpd_irq_event(dev
);
299 static void i915_handle_rps_change(struct drm_device
*dev
)
301 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
302 u32 busy_up
, busy_down
, max_avg
, min_avg
;
303 u8 new_delay
= dev_priv
->cur_delay
;
305 I915_WRITE16(MEMINTRSTS
, MEMINT_EVAL_CHG
);
306 busy_up
= I915_READ(RCPREVBSYTUPAVG
);
307 busy_down
= I915_READ(RCPREVBSYTDNAVG
);
308 max_avg
= I915_READ(RCBMAXAVG
);
309 min_avg
= I915_READ(RCBMINAVG
);
311 /* Handle RCS change request from hw */
312 if (busy_up
> max_avg
) {
313 if (dev_priv
->cur_delay
!= dev_priv
->max_delay
)
314 new_delay
= dev_priv
->cur_delay
- 1;
315 if (new_delay
< dev_priv
->max_delay
)
316 new_delay
= dev_priv
->max_delay
;
317 } else if (busy_down
< min_avg
) {
318 if (dev_priv
->cur_delay
!= dev_priv
->min_delay
)
319 new_delay
= dev_priv
->cur_delay
+ 1;
320 if (new_delay
> dev_priv
->min_delay
)
321 new_delay
= dev_priv
->min_delay
;
324 if (ironlake_set_drps(dev
, new_delay
))
325 dev_priv
->cur_delay
= new_delay
;
330 static void notify_ring(struct drm_device
*dev
,
331 struct intel_ring_buffer
*ring
)
333 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
336 if (ring
->obj
== NULL
)
339 seqno
= ring
->get_seqno(ring
);
340 trace_i915_gem_request_complete(ring
, seqno
);
342 ring
->irq_seqno
= seqno
;
343 wake_up_all(&ring
->irq_queue
);
344 if (i915_enable_hangcheck
) {
345 dev_priv
->hangcheck_count
= 0;
346 mod_timer(&dev_priv
->hangcheck_timer
,
348 msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD
));
352 static void gen6_pm_rps_work(struct work_struct
*work
)
354 drm_i915_private_t
*dev_priv
= container_of(work
, drm_i915_private_t
,
356 u8 new_delay
= dev_priv
->cur_delay
;
359 spin_lock_irq(&dev_priv
->rps_lock
);
360 pm_iir
= dev_priv
->pm_iir
;
361 dev_priv
->pm_iir
= 0;
362 pm_imr
= I915_READ(GEN6_PMIMR
);
363 I915_WRITE(GEN6_PMIMR
, 0);
364 spin_unlock_irq(&dev_priv
->rps_lock
);
369 mutex_lock(&dev_priv
->dev
->struct_mutex
);
370 if (pm_iir
& GEN6_PM_RP_UP_THRESHOLD
) {
371 if (dev_priv
->cur_delay
!= dev_priv
->max_delay
)
372 new_delay
= dev_priv
->cur_delay
+ 1;
373 if (new_delay
> dev_priv
->max_delay
)
374 new_delay
= dev_priv
->max_delay
;
375 } else if (pm_iir
& (GEN6_PM_RP_DOWN_THRESHOLD
| GEN6_PM_RP_DOWN_TIMEOUT
)) {
376 gen6_gt_force_wake_get(dev_priv
);
377 if (dev_priv
->cur_delay
!= dev_priv
->min_delay
)
378 new_delay
= dev_priv
->cur_delay
- 1;
379 if (new_delay
< dev_priv
->min_delay
) {
380 new_delay
= dev_priv
->min_delay
;
381 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS
,
382 I915_READ(GEN6_RP_INTERRUPT_LIMITS
) |
383 ((new_delay
<< 16) & 0x3f0000));
385 /* Make sure we continue to get down interrupts
386 * until we hit the minimum frequency */
387 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS
,
388 I915_READ(GEN6_RP_INTERRUPT_LIMITS
) & ~0x3f0000);
390 gen6_gt_force_wake_put(dev_priv
);
393 gen6_set_rps(dev_priv
->dev
, new_delay
);
394 dev_priv
->cur_delay
= new_delay
;
397 * rps_lock not held here because clearing is non-destructive. There is
398 * an *extremely* unlikely race with gen6_rps_enable() that is prevented
399 * by holding struct_mutex for the duration of the write.
401 mutex_unlock(&dev_priv
->dev
->struct_mutex
);
404 static void snb_gt_irq_handler(struct drm_device
*dev
,
405 struct drm_i915_private
*dev_priv
,
409 if (gt_iir
& (GEN6_RENDER_USER_INTERRUPT
|
410 GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT
))
411 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
412 if (gt_iir
& GEN6_BSD_USER_INTERRUPT
)
413 notify_ring(dev
, &dev_priv
->ring
[VCS
]);
414 if (gt_iir
& GEN6_BLITTER_USER_INTERRUPT
)
415 notify_ring(dev
, &dev_priv
->ring
[BCS
]);
417 if (gt_iir
& (GT_GEN6_BLT_CS_ERROR_INTERRUPT
|
418 GT_GEN6_BSD_CS_ERROR_INTERRUPT
|
419 GT_RENDER_CS_ERROR_INTERRUPT
)) {
420 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir
);
421 i915_handle_error(dev
, false);
425 static void gen6_queue_rps_work(struct drm_i915_private
*dev_priv
,
431 * IIR bits should never already be set because IMR should
432 * prevent an interrupt from being shown in IIR. The warning
433 * displays a case where we've unsafely cleared
434 * dev_priv->pm_iir. Although missing an interrupt of the same
435 * type is not a problem, it displays a problem in the logic.
437 * The mask bit in IMR is cleared by rps_work.
440 spin_lock_irqsave(&dev_priv
->rps_lock
, flags
);
441 WARN(dev_priv
->pm_iir
& pm_iir
, "Missed a PM interrupt\n");
442 dev_priv
->pm_iir
|= pm_iir
;
443 I915_WRITE(GEN6_PMIMR
, dev_priv
->pm_iir
);
444 POSTING_READ(GEN6_PMIMR
);
445 spin_unlock_irqrestore(&dev_priv
->rps_lock
, flags
);
447 queue_work(dev_priv
->wq
, &dev_priv
->rps_work
);
450 static irqreturn_t
valleyview_irq_handler(DRM_IRQ_ARGS
)
452 struct drm_device
*dev
= (struct drm_device
*) arg
;
453 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
454 u32 iir
, gt_iir
, pm_iir
;
455 irqreturn_t ret
= IRQ_NONE
;
456 unsigned long irqflags
;
458 u32 pipe_stats
[I915_MAX_PIPES
];
463 atomic_inc(&dev_priv
->irq_received
);
465 vblank_status
= PIPE_START_VBLANK_INTERRUPT_STATUS
|
466 PIPE_VBLANK_INTERRUPT_STATUS
;
469 iir
= I915_READ(VLV_IIR
);
470 gt_iir
= I915_READ(GTIIR
);
471 pm_iir
= I915_READ(GEN6_PMIIR
);
473 if (gt_iir
== 0 && pm_iir
== 0 && iir
== 0)
478 snb_gt_irq_handler(dev
, dev_priv
, gt_iir
);
480 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
481 for_each_pipe(pipe
) {
482 int reg
= PIPESTAT(pipe
);
483 pipe_stats
[pipe
] = I915_READ(reg
);
486 * Clear the PIPE*STAT regs before the IIR
488 if (pipe_stats
[pipe
] & 0x8000ffff) {
489 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
)
490 DRM_DEBUG_DRIVER("pipe %c underrun\n",
492 I915_WRITE(reg
, pipe_stats
[pipe
]);
495 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
497 /* Consume port. Then clear IIR or we'll miss events */
498 if (iir
& I915_DISPLAY_PORT_INTERRUPT
) {
499 u32 hotplug_status
= I915_READ(PORT_HOTPLUG_STAT
);
501 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
503 if (hotplug_status
& dev_priv
->hotplug_supported_mask
)
504 queue_work(dev_priv
->wq
,
505 &dev_priv
->hotplug_work
);
507 I915_WRITE(PORT_HOTPLUG_STAT
, hotplug_status
);
508 I915_READ(PORT_HOTPLUG_STAT
);
512 if (iir
& I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT
) {
513 drm_handle_vblank(dev
, 0);
515 intel_finish_page_flip(dev
, 0);
518 if (iir
& I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT
) {
519 drm_handle_vblank(dev
, 1);
521 intel_finish_page_flip(dev
, 0);
524 if (pipe_stats
[pipe
] & PIPE_LEGACY_BLC_EVENT_STATUS
)
527 if (pm_iir
& GEN6_PM_DEFERRED_EVENTS
)
528 gen6_queue_rps_work(dev_priv
, pm_iir
);
530 I915_WRITE(GTIIR
, gt_iir
);
531 I915_WRITE(GEN6_PMIIR
, pm_iir
);
532 I915_WRITE(VLV_IIR
, iir
);
539 static void pch_irq_handler(struct drm_device
*dev
)
541 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
545 pch_iir
= I915_READ(SDEIIR
);
547 if (pch_iir
& SDE_AUDIO_POWER_MASK
)
548 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
549 (pch_iir
& SDE_AUDIO_POWER_MASK
) >>
550 SDE_AUDIO_POWER_SHIFT
);
552 if (pch_iir
& SDE_GMBUS
)
553 DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
555 if (pch_iir
& SDE_AUDIO_HDCP_MASK
)
556 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
558 if (pch_iir
& SDE_AUDIO_TRANS_MASK
)
559 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
561 if (pch_iir
& SDE_POISON
)
562 DRM_ERROR("PCH poison interrupt\n");
564 if (pch_iir
& SDE_FDI_MASK
)
566 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
568 I915_READ(FDI_RX_IIR(pipe
)));
570 if (pch_iir
& (SDE_TRANSB_CRC_DONE
| SDE_TRANSA_CRC_DONE
))
571 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
573 if (pch_iir
& (SDE_TRANSB_CRC_ERR
| SDE_TRANSA_CRC_ERR
))
574 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
576 if (pch_iir
& SDE_TRANSB_FIFO_UNDER
)
577 DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n");
578 if (pch_iir
& SDE_TRANSA_FIFO_UNDER
)
579 DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
582 static irqreturn_t
ivybridge_irq_handler(DRM_IRQ_ARGS
)
584 struct drm_device
*dev
= (struct drm_device
*) arg
;
585 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
587 u32 de_iir
, gt_iir
, de_ier
, pch_iir
, pm_iir
;
588 struct drm_i915_master_private
*master_priv
;
590 atomic_inc(&dev_priv
->irq_received
);
592 /* disable master interrupt before clearing iir */
593 de_ier
= I915_READ(DEIER
);
594 I915_WRITE(DEIER
, de_ier
& ~DE_MASTER_IRQ_CONTROL
);
597 de_iir
= I915_READ(DEIIR
);
598 gt_iir
= I915_READ(GTIIR
);
599 pch_iir
= I915_READ(SDEIIR
);
600 pm_iir
= I915_READ(GEN6_PMIIR
);
602 if (de_iir
== 0 && gt_iir
== 0 && pch_iir
== 0 && pm_iir
== 0)
607 if (dev
->primary
->master
) {
608 master_priv
= dev
->primary
->master
->driver_priv
;
609 if (master_priv
->sarea_priv
)
610 master_priv
->sarea_priv
->last_dispatch
=
611 READ_BREADCRUMB(dev_priv
);
614 snb_gt_irq_handler(dev
, dev_priv
, gt_iir
);
616 if (de_iir
& DE_GSE_IVB
)
617 intel_opregion_gse_intr(dev
);
619 if (de_iir
& DE_PLANEA_FLIP_DONE_IVB
) {
620 intel_prepare_page_flip(dev
, 0);
621 intel_finish_page_flip_plane(dev
, 0);
624 if (de_iir
& DE_PLANEB_FLIP_DONE_IVB
) {
625 intel_prepare_page_flip(dev
, 1);
626 intel_finish_page_flip_plane(dev
, 1);
629 if (de_iir
& DE_PIPEA_VBLANK_IVB
)
630 drm_handle_vblank(dev
, 0);
632 if (de_iir
& DE_PIPEB_VBLANK_IVB
)
633 drm_handle_vblank(dev
, 1);
635 /* check event from PCH */
636 if (de_iir
& DE_PCH_EVENT_IVB
) {
637 if (pch_iir
& SDE_HOTPLUG_MASK_CPT
)
638 queue_work(dev_priv
->wq
, &dev_priv
->hotplug_work
);
639 pch_irq_handler(dev
);
642 if (pm_iir
& GEN6_PM_DEFERRED_EVENTS
)
643 gen6_queue_rps_work(dev_priv
, pm_iir
);
645 /* should clear PCH hotplug event before clear CPU irq */
646 I915_WRITE(SDEIIR
, pch_iir
);
647 I915_WRITE(GTIIR
, gt_iir
);
648 I915_WRITE(DEIIR
, de_iir
);
649 I915_WRITE(GEN6_PMIIR
, pm_iir
);
652 I915_WRITE(DEIER
, de_ier
);
658 static void ilk_gt_irq_handler(struct drm_device
*dev
,
659 struct drm_i915_private
*dev_priv
,
662 if (gt_iir
& (GT_USER_INTERRUPT
| GT_PIPE_NOTIFY
))
663 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
664 if (gt_iir
& GT_BSD_USER_INTERRUPT
)
665 notify_ring(dev
, &dev_priv
->ring
[VCS
]);
668 static irqreturn_t
ironlake_irq_handler(DRM_IRQ_ARGS
)
670 struct drm_device
*dev
= (struct drm_device
*) arg
;
671 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
673 u32 de_iir
, gt_iir
, de_ier
, pch_iir
, pm_iir
;
675 struct drm_i915_master_private
*master_priv
;
677 atomic_inc(&dev_priv
->irq_received
);
679 /* disable master interrupt before clearing iir */
680 de_ier
= I915_READ(DEIER
);
681 I915_WRITE(DEIER
, de_ier
& ~DE_MASTER_IRQ_CONTROL
);
684 de_iir
= I915_READ(DEIIR
);
685 gt_iir
= I915_READ(GTIIR
);
686 pch_iir
= I915_READ(SDEIIR
);
687 pm_iir
= I915_READ(GEN6_PMIIR
);
689 if (de_iir
== 0 && gt_iir
== 0 && pch_iir
== 0 &&
690 (!IS_GEN6(dev
) || pm_iir
== 0))
693 if (HAS_PCH_CPT(dev
))
694 hotplug_mask
= SDE_HOTPLUG_MASK_CPT
;
696 hotplug_mask
= SDE_HOTPLUG_MASK
;
700 if (dev
->primary
->master
) {
701 master_priv
= dev
->primary
->master
->driver_priv
;
702 if (master_priv
->sarea_priv
)
703 master_priv
->sarea_priv
->last_dispatch
=
704 READ_BREADCRUMB(dev_priv
);
708 ilk_gt_irq_handler(dev
, dev_priv
, gt_iir
);
710 snb_gt_irq_handler(dev
, dev_priv
, gt_iir
);
713 intel_opregion_gse_intr(dev
);
715 if (de_iir
& DE_PLANEA_FLIP_DONE
) {
716 intel_prepare_page_flip(dev
, 0);
717 intel_finish_page_flip_plane(dev
, 0);
720 if (de_iir
& DE_PLANEB_FLIP_DONE
) {
721 intel_prepare_page_flip(dev
, 1);
722 intel_finish_page_flip_plane(dev
, 1);
725 if (de_iir
& DE_PIPEA_VBLANK
)
726 drm_handle_vblank(dev
, 0);
728 if (de_iir
& DE_PIPEB_VBLANK
)
729 drm_handle_vblank(dev
, 1);
731 /* check event from PCH */
732 if (de_iir
& DE_PCH_EVENT
) {
733 if (pch_iir
& hotplug_mask
)
734 queue_work(dev_priv
->wq
, &dev_priv
->hotplug_work
);
735 pch_irq_handler(dev
);
738 if (de_iir
& DE_PCU_EVENT
) {
739 I915_WRITE16(MEMINTRSTS
, I915_READ(MEMINTRSTS
));
740 i915_handle_rps_change(dev
);
743 if (IS_GEN6(dev
) && pm_iir
& GEN6_PM_DEFERRED_EVENTS
)
744 gen6_queue_rps_work(dev_priv
, pm_iir
);
746 /* should clear PCH hotplug event before clear CPU irq */
747 I915_WRITE(SDEIIR
, pch_iir
);
748 I915_WRITE(GTIIR
, gt_iir
);
749 I915_WRITE(DEIIR
, de_iir
);
750 I915_WRITE(GEN6_PMIIR
, pm_iir
);
753 I915_WRITE(DEIER
, de_ier
);
760 * i915_error_work_func - do process context error handling work
763 * Fire an error uevent so userspace can see that a hang or error
766 static void i915_error_work_func(struct work_struct
*work
)
768 drm_i915_private_t
*dev_priv
= container_of(work
, drm_i915_private_t
,
770 struct drm_device
*dev
= dev_priv
->dev
;
771 char *error_event
[] = { "ERROR=1", NULL
};
772 char *reset_event
[] = { "RESET=1", NULL
};
773 char *reset_done_event
[] = { "ERROR=0", NULL
};
775 kobject_uevent_env(&dev
->primary
->kdev
.kobj
, KOBJ_CHANGE
, error_event
);
777 if (atomic_read(&dev_priv
->mm
.wedged
)) {
778 DRM_DEBUG_DRIVER("resetting chip\n");
779 kobject_uevent_env(&dev
->primary
->kdev
.kobj
, KOBJ_CHANGE
, reset_event
);
780 if (!i915_reset(dev
, GRDOM_RENDER
)) {
781 atomic_set(&dev_priv
->mm
.wedged
, 0);
782 kobject_uevent_env(&dev
->primary
->kdev
.kobj
, KOBJ_CHANGE
, reset_done_event
);
784 complete_all(&dev_priv
->error_completion
);
788 #ifdef CONFIG_DEBUG_FS
789 static struct drm_i915_error_object
*
790 i915_error_object_create(struct drm_i915_private
*dev_priv
,
791 struct drm_i915_gem_object
*src
)
793 struct drm_i915_error_object
*dst
;
794 int page
, page_count
;
797 if (src
== NULL
|| src
->pages
== NULL
)
800 page_count
= src
->base
.size
/ PAGE_SIZE
;
802 dst
= kmalloc(sizeof(*dst
) + page_count
* sizeof(u32
*), GFP_ATOMIC
);
806 reloc_offset
= src
->gtt_offset
;
807 for (page
= 0; page
< page_count
; page
++) {
811 d
= kmalloc(PAGE_SIZE
, GFP_ATOMIC
);
815 local_irq_save(flags
);
816 if (reloc_offset
< dev_priv
->mm
.gtt_mappable_end
&&
817 src
->has_global_gtt_mapping
) {
820 /* Simply ignore tiling or any overlapping fence.
821 * It's part of the error state, and this hopefully
822 * captures what the GPU read.
825 s
= io_mapping_map_atomic_wc(dev_priv
->mm
.gtt_mapping
,
827 memcpy_fromio(d
, s
, PAGE_SIZE
);
828 io_mapping_unmap_atomic(s
);
832 drm_clflush_pages(&src
->pages
[page
], 1);
834 s
= kmap_atomic(src
->pages
[page
]);
835 memcpy(d
, s
, PAGE_SIZE
);
838 drm_clflush_pages(&src
->pages
[page
], 1);
840 local_irq_restore(flags
);
842 dst
->pages
[page
] = d
;
844 reloc_offset
+= PAGE_SIZE
;
846 dst
->page_count
= page_count
;
847 dst
->gtt_offset
= src
->gtt_offset
;
853 kfree(dst
->pages
[page
]);
859 i915_error_object_free(struct drm_i915_error_object
*obj
)
866 for (page
= 0; page
< obj
->page_count
; page
++)
867 kfree(obj
->pages
[page
]);
873 i915_error_state_free(struct drm_device
*dev
,
874 struct drm_i915_error_state
*error
)
878 for (i
= 0; i
< ARRAY_SIZE(error
->ring
); i
++) {
879 i915_error_object_free(error
->ring
[i
].batchbuffer
);
880 i915_error_object_free(error
->ring
[i
].ringbuffer
);
881 kfree(error
->ring
[i
].requests
);
884 kfree(error
->active_bo
);
885 kfree(error
->overlay
);
888 static void capture_bo(struct drm_i915_error_buffer
*err
,
889 struct drm_i915_gem_object
*obj
)
891 err
->size
= obj
->base
.size
;
892 err
->name
= obj
->base
.name
;
893 err
->seqno
= obj
->last_rendering_seqno
;
894 err
->gtt_offset
= obj
->gtt_offset
;
895 err
->read_domains
= obj
->base
.read_domains
;
896 err
->write_domain
= obj
->base
.write_domain
;
897 err
->fence_reg
= obj
->fence_reg
;
899 if (obj
->pin_count
> 0)
901 if (obj
->user_pin_count
> 0)
903 err
->tiling
= obj
->tiling_mode
;
904 err
->dirty
= obj
->dirty
;
905 err
->purgeable
= obj
->madv
!= I915_MADV_WILLNEED
;
906 err
->ring
= obj
->ring
? obj
->ring
->id
: -1;
907 err
->cache_level
= obj
->cache_level
;
910 static u32
capture_active_bo(struct drm_i915_error_buffer
*err
,
911 int count
, struct list_head
*head
)
913 struct drm_i915_gem_object
*obj
;
916 list_for_each_entry(obj
, head
, mm_list
) {
917 capture_bo(err
++, obj
);
925 static u32
capture_pinned_bo(struct drm_i915_error_buffer
*err
,
926 int count
, struct list_head
*head
)
928 struct drm_i915_gem_object
*obj
;
931 list_for_each_entry(obj
, head
, gtt_list
) {
932 if (obj
->pin_count
== 0)
935 capture_bo(err
++, obj
);
943 static void i915_gem_record_fences(struct drm_device
*dev
,
944 struct drm_i915_error_state
*error
)
946 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
950 switch (INTEL_INFO(dev
)->gen
) {
953 for (i
= 0; i
< 16; i
++)
954 error
->fence
[i
] = I915_READ64(FENCE_REG_SANDYBRIDGE_0
+ (i
* 8));
958 for (i
= 0; i
< 16; i
++)
959 error
->fence
[i
] = I915_READ64(FENCE_REG_965_0
+ (i
* 8));
962 if (IS_I945G(dev
) || IS_I945GM(dev
) || IS_G33(dev
))
963 for (i
= 0; i
< 8; i
++)
964 error
->fence
[i
+8] = I915_READ(FENCE_REG_945_8
+ (i
* 4));
966 for (i
= 0; i
< 8; i
++)
967 error
->fence
[i
] = I915_READ(FENCE_REG_830_0
+ (i
* 4));
973 static struct drm_i915_error_object
*
974 i915_error_first_batchbuffer(struct drm_i915_private
*dev_priv
,
975 struct intel_ring_buffer
*ring
)
977 struct drm_i915_gem_object
*obj
;
980 if (!ring
->get_seqno
)
983 seqno
= ring
->get_seqno(ring
);
984 list_for_each_entry(obj
, &dev_priv
->mm
.active_list
, mm_list
) {
985 if (obj
->ring
!= ring
)
988 if (i915_seqno_passed(seqno
, obj
->last_rendering_seqno
))
991 if ((obj
->base
.read_domains
& I915_GEM_DOMAIN_COMMAND
) == 0)
994 /* We need to copy these to an anonymous buffer as the simplest
995 * method to avoid being overwritten by userspace.
997 return i915_error_object_create(dev_priv
, obj
);
1003 static void i915_record_ring_state(struct drm_device
*dev
,
1004 struct drm_i915_error_state
*error
,
1005 struct intel_ring_buffer
*ring
)
1007 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1009 if (INTEL_INFO(dev
)->gen
>= 6) {
1010 error
->fault_reg
[ring
->id
] = I915_READ(RING_FAULT_REG(ring
));
1011 error
->semaphore_mboxes
[ring
->id
][0]
1012 = I915_READ(RING_SYNC_0(ring
->mmio_base
));
1013 error
->semaphore_mboxes
[ring
->id
][1]
1014 = I915_READ(RING_SYNC_1(ring
->mmio_base
));
1017 if (INTEL_INFO(dev
)->gen
>= 4) {
1018 error
->faddr
[ring
->id
] = I915_READ(RING_DMA_FADD(ring
->mmio_base
));
1019 error
->ipeir
[ring
->id
] = I915_READ(RING_IPEIR(ring
->mmio_base
));
1020 error
->ipehr
[ring
->id
] = I915_READ(RING_IPEHR(ring
->mmio_base
));
1021 error
->instdone
[ring
->id
] = I915_READ(RING_INSTDONE(ring
->mmio_base
));
1022 error
->instps
[ring
->id
] = I915_READ(RING_INSTPS(ring
->mmio_base
));
1023 if (ring
->id
== RCS
) {
1024 error
->instdone1
= I915_READ(INSTDONE1
);
1025 error
->bbaddr
= I915_READ64(BB_ADDR
);
1028 error
->faddr
[ring
->id
] = I915_READ(DMA_FADD_I8XX
);
1029 error
->ipeir
[ring
->id
] = I915_READ(IPEIR
);
1030 error
->ipehr
[ring
->id
] = I915_READ(IPEHR
);
1031 error
->instdone
[ring
->id
] = I915_READ(INSTDONE
);
1034 error
->waiting
[ring
->id
] = waitqueue_active(&ring
->irq_queue
);
1035 error
->instpm
[ring
->id
] = I915_READ(RING_INSTPM(ring
->mmio_base
));
1036 error
->seqno
[ring
->id
] = ring
->get_seqno(ring
);
1037 error
->acthd
[ring
->id
] = intel_ring_get_active_head(ring
);
1038 error
->head
[ring
->id
] = I915_READ_HEAD(ring
);
1039 error
->tail
[ring
->id
] = I915_READ_TAIL(ring
);
1041 error
->cpu_ring_head
[ring
->id
] = ring
->head
;
1042 error
->cpu_ring_tail
[ring
->id
] = ring
->tail
;
1045 static void i915_gem_record_rings(struct drm_device
*dev
,
1046 struct drm_i915_error_state
*error
)
1048 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1049 struct drm_i915_gem_request
*request
;
1052 for (i
= 0; i
< I915_NUM_RINGS
; i
++) {
1053 struct intel_ring_buffer
*ring
= &dev_priv
->ring
[i
];
1055 if (ring
->obj
== NULL
)
1058 i915_record_ring_state(dev
, error
, ring
);
1060 error
->ring
[i
].batchbuffer
=
1061 i915_error_first_batchbuffer(dev_priv
, ring
);
1063 error
->ring
[i
].ringbuffer
=
1064 i915_error_object_create(dev_priv
, ring
->obj
);
1067 list_for_each_entry(request
, &ring
->request_list
, list
)
1070 error
->ring
[i
].num_requests
= count
;
1071 error
->ring
[i
].requests
=
1072 kmalloc(count
*sizeof(struct drm_i915_error_request
),
1074 if (error
->ring
[i
].requests
== NULL
) {
1075 error
->ring
[i
].num_requests
= 0;
1080 list_for_each_entry(request
, &ring
->request_list
, list
) {
1081 struct drm_i915_error_request
*erq
;
1083 erq
= &error
->ring
[i
].requests
[count
++];
1084 erq
->seqno
= request
->seqno
;
1085 erq
->jiffies
= request
->emitted_jiffies
;
1086 erq
->tail
= request
->tail
;
1092 * i915_capture_error_state - capture an error record for later analysis
1095 * Should be called when an error is detected (either a hang or an error
1096 * interrupt) to capture error state from the time of the error. Fills
1097 * out a structure which becomes available in debugfs for user level tools
1100 static void i915_capture_error_state(struct drm_device
*dev
)
1102 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1103 struct drm_i915_gem_object
*obj
;
1104 struct drm_i915_error_state
*error
;
1105 unsigned long flags
;
1108 spin_lock_irqsave(&dev_priv
->error_lock
, flags
);
1109 error
= dev_priv
->first_error
;
1110 spin_unlock_irqrestore(&dev_priv
->error_lock
, flags
);
1114 /* Account for pipe specific data like PIPE*STAT */
1115 error
= kzalloc(sizeof(*error
), GFP_ATOMIC
);
1117 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1121 DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n",
1122 dev
->primary
->index
);
1124 error
->eir
= I915_READ(EIR
);
1125 error
->pgtbl_er
= I915_READ(PGTBL_ER
);
1127 if (HAS_PCH_SPLIT(dev
))
1128 error
->ier
= I915_READ(DEIER
) | I915_READ(GTIER
);
1129 else if (IS_VALLEYVIEW(dev
))
1130 error
->ier
= I915_READ(GTIER
) | I915_READ(VLV_IER
);
1131 else if (IS_GEN2(dev
))
1132 error
->ier
= I915_READ16(IER
);
1134 error
->ier
= I915_READ(IER
);
1137 error
->pipestat
[pipe
] = I915_READ(PIPESTAT(pipe
));
1139 if (INTEL_INFO(dev
)->gen
>= 6) {
1140 error
->error
= I915_READ(ERROR_GEN6
);
1141 error
->done_reg
= I915_READ(DONE_REG
);
1144 i915_gem_record_fences(dev
, error
);
1145 i915_gem_record_rings(dev
, error
);
1147 /* Record buffers on the active and pinned lists. */
1148 error
->active_bo
= NULL
;
1149 error
->pinned_bo
= NULL
;
1152 list_for_each_entry(obj
, &dev_priv
->mm
.active_list
, mm_list
)
1154 error
->active_bo_count
= i
;
1155 list_for_each_entry(obj
, &dev_priv
->mm
.gtt_list
, gtt_list
)
1158 error
->pinned_bo_count
= i
- error
->active_bo_count
;
1160 error
->active_bo
= NULL
;
1161 error
->pinned_bo
= NULL
;
1163 error
->active_bo
= kmalloc(sizeof(*error
->active_bo
)*i
,
1165 if (error
->active_bo
)
1167 error
->active_bo
+ error
->active_bo_count
;
1170 if (error
->active_bo
)
1171 error
->active_bo_count
=
1172 capture_active_bo(error
->active_bo
,
1173 error
->active_bo_count
,
1174 &dev_priv
->mm
.active_list
);
1176 if (error
->pinned_bo
)
1177 error
->pinned_bo_count
=
1178 capture_pinned_bo(error
->pinned_bo
,
1179 error
->pinned_bo_count
,
1180 &dev_priv
->mm
.gtt_list
);
1182 do_gettimeofday(&error
->time
);
1184 error
->overlay
= intel_overlay_capture_error_state(dev
);
1185 error
->display
= intel_display_capture_error_state(dev
);
1187 spin_lock_irqsave(&dev_priv
->error_lock
, flags
);
1188 if (dev_priv
->first_error
== NULL
) {
1189 dev_priv
->first_error
= error
;
1192 spin_unlock_irqrestore(&dev_priv
->error_lock
, flags
);
1195 i915_error_state_free(dev
, error
);
1198 void i915_destroy_error_state(struct drm_device
*dev
)
1200 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1201 struct drm_i915_error_state
*error
;
1202 unsigned long flags
;
1204 spin_lock_irqsave(&dev_priv
->error_lock
, flags
);
1205 error
= dev_priv
->first_error
;
1206 dev_priv
->first_error
= NULL
;
1207 spin_unlock_irqrestore(&dev_priv
->error_lock
, flags
);
1210 i915_error_state_free(dev
, error
);
1213 #define i915_capture_error_state(x)
1216 static void i915_report_and_clear_eir(struct drm_device
*dev
)
1218 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1219 u32 eir
= I915_READ(EIR
);
1225 pr_err("render error detected, EIR: 0x%08x\n", eir
);
1228 if (eir
& (GM45_ERROR_MEM_PRIV
| GM45_ERROR_CP_PRIV
)) {
1229 u32 ipeir
= I915_READ(IPEIR_I965
);
1231 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965
));
1232 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965
));
1233 pr_err(" INSTDONE: 0x%08x\n",
1234 I915_READ(INSTDONE_I965
));
1235 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS
));
1236 pr_err(" INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1
));
1237 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965
));
1238 I915_WRITE(IPEIR_I965
, ipeir
);
1239 POSTING_READ(IPEIR_I965
);
1241 if (eir
& GM45_ERROR_PAGE_TABLE
) {
1242 u32 pgtbl_err
= I915_READ(PGTBL_ER
);
1243 pr_err("page table error\n");
1244 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err
);
1245 I915_WRITE(PGTBL_ER
, pgtbl_err
);
1246 POSTING_READ(PGTBL_ER
);
1250 if (!IS_GEN2(dev
)) {
1251 if (eir
& I915_ERROR_PAGE_TABLE
) {
1252 u32 pgtbl_err
= I915_READ(PGTBL_ER
);
1253 pr_err("page table error\n");
1254 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err
);
1255 I915_WRITE(PGTBL_ER
, pgtbl_err
);
1256 POSTING_READ(PGTBL_ER
);
1260 if (eir
& I915_ERROR_MEMORY_REFRESH
) {
1261 pr_err("memory refresh error:\n");
1263 pr_err("pipe %c stat: 0x%08x\n",
1264 pipe_name(pipe
), I915_READ(PIPESTAT(pipe
)));
1265 /* pipestat has already been acked */
1267 if (eir
& I915_ERROR_INSTRUCTION
) {
1268 pr_err("instruction error\n");
1269 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM
));
1270 if (INTEL_INFO(dev
)->gen
< 4) {
1271 u32 ipeir
= I915_READ(IPEIR
);
1273 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR
));
1274 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR
));
1275 pr_err(" INSTDONE: 0x%08x\n", I915_READ(INSTDONE
));
1276 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD
));
1277 I915_WRITE(IPEIR
, ipeir
);
1278 POSTING_READ(IPEIR
);
1280 u32 ipeir
= I915_READ(IPEIR_I965
);
1282 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965
));
1283 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965
));
1284 pr_err(" INSTDONE: 0x%08x\n",
1285 I915_READ(INSTDONE_I965
));
1286 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS
));
1287 pr_err(" INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1
));
1288 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965
));
1289 I915_WRITE(IPEIR_I965
, ipeir
);
1290 POSTING_READ(IPEIR_I965
);
1294 I915_WRITE(EIR
, eir
);
1296 eir
= I915_READ(EIR
);
1299 * some errors might have become stuck,
1302 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir
);
1303 I915_WRITE(EMR
, I915_READ(EMR
) | eir
);
1304 I915_WRITE(IIR
, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
);
1309 * i915_handle_error - handle an error interrupt
1312 * Do some basic checking of regsiter state at error interrupt time and
1313 * dump it to the syslog. Also call i915_capture_error_state() to make
1314 * sure we get a record and make it available in debugfs. Fire a uevent
1315 * so userspace knows something bad happened (should trigger collection
1316 * of a ring dump etc.).
1318 void i915_handle_error(struct drm_device
*dev
, bool wedged
)
1320 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1322 i915_capture_error_state(dev
);
1323 i915_report_and_clear_eir(dev
);
1326 INIT_COMPLETION(dev_priv
->error_completion
);
1327 atomic_set(&dev_priv
->mm
.wedged
, 1);
1330 * Wakeup waiting processes so they don't hang
1332 wake_up_all(&dev_priv
->ring
[RCS
].irq_queue
);
1334 wake_up_all(&dev_priv
->ring
[VCS
].irq_queue
);
1336 wake_up_all(&dev_priv
->ring
[BCS
].irq_queue
);
1339 queue_work(dev_priv
->wq
, &dev_priv
->error_work
);
1342 static void i915_pageflip_stall_check(struct drm_device
*dev
, int pipe
)
1344 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1345 struct drm_crtc
*crtc
= dev_priv
->pipe_to_crtc_mapping
[pipe
];
1346 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
1347 struct drm_i915_gem_object
*obj
;
1348 struct intel_unpin_work
*work
;
1349 unsigned long flags
;
1350 bool stall_detected
;
1352 /* Ignore early vblank irqs */
1353 if (intel_crtc
== NULL
)
1356 spin_lock_irqsave(&dev
->event_lock
, flags
);
1357 work
= intel_crtc
->unpin_work
;
1359 if (work
== NULL
|| work
->pending
|| !work
->enable_stall_check
) {
1360 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
1361 spin_unlock_irqrestore(&dev
->event_lock
, flags
);
1365 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
1366 obj
= work
->pending_flip_obj
;
1367 if (INTEL_INFO(dev
)->gen
>= 4) {
1368 int dspsurf
= DSPSURF(intel_crtc
->plane
);
1369 stall_detected
= I915_HI_DISPBASE(I915_READ(dspsurf
)) ==
1372 int dspaddr
= DSPADDR(intel_crtc
->plane
);
1373 stall_detected
= I915_READ(dspaddr
) == (obj
->gtt_offset
+
1374 crtc
->y
* crtc
->fb
->pitches
[0] +
1375 crtc
->x
* crtc
->fb
->bits_per_pixel
/8);
1378 spin_unlock_irqrestore(&dev
->event_lock
, flags
);
1380 if (stall_detected
) {
1381 DRM_DEBUG_DRIVER("Pageflip stall detected\n");
1382 intel_prepare_page_flip(dev
, intel_crtc
->plane
);
1386 static int i915_emit_irq(struct drm_device
* dev
)
1388 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1389 struct drm_i915_master_private
*master_priv
= dev
->primary
->master
->driver_priv
;
1391 i915_kernel_lost_context(dev
);
1393 DRM_DEBUG_DRIVER("\n");
1395 dev_priv
->counter
++;
1396 if (dev_priv
->counter
> 0x7FFFFFFFUL
)
1397 dev_priv
->counter
= 1;
1398 if (master_priv
->sarea_priv
)
1399 master_priv
->sarea_priv
->last_enqueue
= dev_priv
->counter
;
1401 if (BEGIN_LP_RING(4) == 0) {
1402 OUT_RING(MI_STORE_DWORD_INDEX
);
1403 OUT_RING(I915_BREADCRUMB_INDEX
<< MI_STORE_DWORD_INDEX_SHIFT
);
1404 OUT_RING(dev_priv
->counter
);
1405 OUT_RING(MI_USER_INTERRUPT
);
1409 return dev_priv
->counter
;
1412 static int i915_wait_irq(struct drm_device
* dev
, int irq_nr
)
1414 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1415 struct drm_i915_master_private
*master_priv
= dev
->primary
->master
->driver_priv
;
1417 struct intel_ring_buffer
*ring
= LP_RING(dev_priv
);
1419 DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr
,
1420 READ_BREADCRUMB(dev_priv
));
1422 if (READ_BREADCRUMB(dev_priv
) >= irq_nr
) {
1423 if (master_priv
->sarea_priv
)
1424 master_priv
->sarea_priv
->last_dispatch
= READ_BREADCRUMB(dev_priv
);
1428 if (master_priv
->sarea_priv
)
1429 master_priv
->sarea_priv
->perf_boxes
|= I915_BOX_WAIT
;
1431 if (ring
->irq_get(ring
)) {
1432 DRM_WAIT_ON(ret
, ring
->irq_queue
, 3 * DRM_HZ
,
1433 READ_BREADCRUMB(dev_priv
) >= irq_nr
);
1434 ring
->irq_put(ring
);
1435 } else if (wait_for(READ_BREADCRUMB(dev_priv
) >= irq_nr
, 3000))
1438 if (ret
== -EBUSY
) {
1439 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
1440 READ_BREADCRUMB(dev_priv
), (int)dev_priv
->counter
);
1446 /* Needs the lock as it touches the ring.
1448 int i915_irq_emit(struct drm_device
*dev
, void *data
,
1449 struct drm_file
*file_priv
)
1451 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1452 drm_i915_irq_emit_t
*emit
= data
;
1455 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
1458 if (!dev_priv
|| !LP_RING(dev_priv
)->virtual_start
) {
1459 DRM_ERROR("called with no initialization\n");
1463 RING_LOCK_TEST_WITH_RETURN(dev
, file_priv
);
1465 mutex_lock(&dev
->struct_mutex
);
1466 result
= i915_emit_irq(dev
);
1467 mutex_unlock(&dev
->struct_mutex
);
1469 if (DRM_COPY_TO_USER(emit
->irq_seq
, &result
, sizeof(int))) {
1470 DRM_ERROR("copy_to_user\n");
1477 /* Doesn't need the hardware lock.
1479 int i915_irq_wait(struct drm_device
*dev
, void *data
,
1480 struct drm_file
*file_priv
)
1482 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1483 drm_i915_irq_wait_t
*irqwait
= data
;
1485 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
1489 DRM_ERROR("called with no initialization\n");
1493 return i915_wait_irq(dev
, irqwait
->irq_seq
);
1496 /* Called from drm generic code, passed 'crtc' which
1497 * we use as a pipe index
1499 static int i915_enable_vblank(struct drm_device
*dev
, int pipe
)
1501 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1502 unsigned long irqflags
;
1504 if (!i915_pipe_enabled(dev
, pipe
))
1507 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
1508 if (INTEL_INFO(dev
)->gen
>= 4)
1509 i915_enable_pipestat(dev_priv
, pipe
,
1510 PIPE_START_VBLANK_INTERRUPT_ENABLE
);
1512 i915_enable_pipestat(dev_priv
, pipe
,
1513 PIPE_VBLANK_INTERRUPT_ENABLE
);
1515 /* maintain vblank delivery even in deep C-states */
1516 if (dev_priv
->info
->gen
== 3)
1517 I915_WRITE(INSTPM
, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS
));
1518 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
1523 static int ironlake_enable_vblank(struct drm_device
*dev
, int pipe
)
1525 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1526 unsigned long irqflags
;
1528 if (!i915_pipe_enabled(dev
, pipe
))
1531 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
1532 ironlake_enable_display_irq(dev_priv
, (pipe
== 0) ?
1533 DE_PIPEA_VBLANK
: DE_PIPEB_VBLANK
);
1534 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
1539 static int ivybridge_enable_vblank(struct drm_device
*dev
, int pipe
)
1541 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1542 unsigned long irqflags
;
1544 if (!i915_pipe_enabled(dev
, pipe
))
1547 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
1548 ironlake_enable_display_irq(dev_priv
, (pipe
== 0) ?
1549 DE_PIPEA_VBLANK_IVB
: DE_PIPEB_VBLANK_IVB
);
1550 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
1555 static int valleyview_enable_vblank(struct drm_device
*dev
, int pipe
)
1557 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1558 unsigned long irqflags
;
1561 if (!i915_pipe_enabled(dev
, pipe
))
1564 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
1565 dpfl
= I915_READ(VLV_DPFLIPSTAT
);
1566 imr
= I915_READ(VLV_IMR
);
1568 dpfl
|= PIPEA_VBLANK_INT_EN
;
1569 imr
&= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT
;
1571 dpfl
|= PIPEA_VBLANK_INT_EN
;
1572 imr
&= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT
;
1574 I915_WRITE(VLV_DPFLIPSTAT
, dpfl
);
1575 I915_WRITE(VLV_IMR
, imr
);
1576 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
1581 /* Called from drm generic code, passed 'crtc' which
1582 * we use as a pipe index
1584 static void i915_disable_vblank(struct drm_device
*dev
, int pipe
)
1586 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1587 unsigned long irqflags
;
1589 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
1590 if (dev_priv
->info
->gen
== 3)
1591 I915_WRITE(INSTPM
, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS
));
1593 i915_disable_pipestat(dev_priv
, pipe
,
1594 PIPE_VBLANK_INTERRUPT_ENABLE
|
1595 PIPE_START_VBLANK_INTERRUPT_ENABLE
);
1596 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
1599 static void ironlake_disable_vblank(struct drm_device
*dev
, int pipe
)
1601 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1602 unsigned long irqflags
;
1604 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
1605 ironlake_disable_display_irq(dev_priv
, (pipe
== 0) ?
1606 DE_PIPEA_VBLANK
: DE_PIPEB_VBLANK
);
1607 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
1610 static void ivybridge_disable_vblank(struct drm_device
*dev
, int pipe
)
1612 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1613 unsigned long irqflags
;
1615 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
1616 ironlake_disable_display_irq(dev_priv
, (pipe
== 0) ?
1617 DE_PIPEA_VBLANK_IVB
: DE_PIPEB_VBLANK_IVB
);
1618 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
1621 static void valleyview_disable_vblank(struct drm_device
*dev
, int pipe
)
1623 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1624 unsigned long irqflags
;
1627 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
1628 dpfl
= I915_READ(VLV_DPFLIPSTAT
);
1629 imr
= I915_READ(VLV_IMR
);
1631 dpfl
&= ~PIPEA_VBLANK_INT_EN
;
1632 imr
|= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT
;
1634 dpfl
&= ~PIPEB_VBLANK_INT_EN
;
1635 imr
|= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT
;
1637 I915_WRITE(VLV_IMR
, imr
);
1638 I915_WRITE(VLV_DPFLIPSTAT
, dpfl
);
1639 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
1643 /* Set the vblank monitor pipe
1645 int i915_vblank_pipe_set(struct drm_device
*dev
, void *data
,
1646 struct drm_file
*file_priv
)
1648 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1650 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
1654 DRM_ERROR("called with no initialization\n");
1661 int i915_vblank_pipe_get(struct drm_device
*dev
, void *data
,
1662 struct drm_file
*file_priv
)
1664 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1665 drm_i915_vblank_pipe_t
*pipe
= data
;
1667 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
1671 DRM_ERROR("called with no initialization\n");
1675 pipe
->pipe
= DRM_I915_VBLANK_PIPE_A
| DRM_I915_VBLANK_PIPE_B
;
1681 * Schedule buffer swap at given vertical blank.
1683 int i915_vblank_swap(struct drm_device
*dev
, void *data
,
1684 struct drm_file
*file_priv
)
1686 /* The delayed swap mechanism was fundamentally racy, and has been
1687 * removed. The model was that the client requested a delayed flip/swap
1688 * from the kernel, then waited for vblank before continuing to perform
1689 * rendering. The problem was that the kernel might wake the client
1690 * up before it dispatched the vblank swap (since the lock has to be
1691 * held while touching the ringbuffer), in which case the client would
1692 * clear and start the next frame before the swap occurred, and
1693 * flicker would occur in addition to likely missing the vblank.
1695 * In the absence of this ioctl, userland falls back to a correct path
1696 * of waiting for a vblank, then dispatching the swap on its own.
1697 * Context switching to userland and back is plenty fast enough for
1698 * meeting the requirements of vblank swapping.
1704 ring_last_seqno(struct intel_ring_buffer
*ring
)
1706 return list_entry(ring
->request_list
.prev
,
1707 struct drm_i915_gem_request
, list
)->seqno
;
1710 static bool i915_hangcheck_ring_idle(struct intel_ring_buffer
*ring
, bool *err
)
1712 /* We don't check whether the ring even exists before calling this
1713 * function. Hence check whether it's initialized. */
1714 if (ring
->obj
== NULL
)
1717 if (list_empty(&ring
->request_list
) ||
1718 i915_seqno_passed(ring
->get_seqno(ring
), ring_last_seqno(ring
))) {
1719 /* Issue a wake-up to catch stuck h/w. */
1720 if (waitqueue_active(&ring
->irq_queue
)) {
1721 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
1723 wake_up_all(&ring
->irq_queue
);
1731 static bool kick_ring(struct intel_ring_buffer
*ring
)
1733 struct drm_device
*dev
= ring
->dev
;
1734 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1735 u32 tmp
= I915_READ_CTL(ring
);
1736 if (tmp
& RING_WAIT
) {
1737 DRM_ERROR("Kicking stuck wait on %s\n",
1739 I915_WRITE_CTL(ring
, tmp
);
1745 static bool i915_hangcheck_hung(struct drm_device
*dev
)
1747 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1749 if (dev_priv
->hangcheck_count
++ > 1) {
1750 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
1751 i915_handle_error(dev
, true);
1753 if (!IS_GEN2(dev
)) {
1754 /* Is the chip hanging on a WAIT_FOR_EVENT?
1755 * If so we can simply poke the RB_WAIT bit
1756 * and break the hang. This should work on
1757 * all but the second generation chipsets.
1759 if (kick_ring(&dev_priv
->ring
[RCS
]))
1762 if (HAS_BSD(dev
) && kick_ring(&dev_priv
->ring
[VCS
]))
1765 if (HAS_BLT(dev
) && kick_ring(&dev_priv
->ring
[BCS
]))
1776 * This is called when the chip hasn't reported back with completed
1777 * batchbuffers in a long time. The first time this is called we simply record
1778 * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
1779 * again, we assume the chip is wedged and try to fix it.
1781 void i915_hangcheck_elapsed(unsigned long data
)
1783 struct drm_device
*dev
= (struct drm_device
*)data
;
1784 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1785 uint32_t acthd
, instdone
, instdone1
, acthd_bsd
, acthd_blt
;
1788 if (!i915_enable_hangcheck
)
1791 /* If all work is done then ACTHD clearly hasn't advanced. */
1792 if (i915_hangcheck_ring_idle(&dev_priv
->ring
[RCS
], &err
) &&
1793 i915_hangcheck_ring_idle(&dev_priv
->ring
[VCS
], &err
) &&
1794 i915_hangcheck_ring_idle(&dev_priv
->ring
[BCS
], &err
)) {
1796 if (i915_hangcheck_hung(dev
))
1802 dev_priv
->hangcheck_count
= 0;
1806 if (INTEL_INFO(dev
)->gen
< 4) {
1807 instdone
= I915_READ(INSTDONE
);
1810 instdone
= I915_READ(INSTDONE_I965
);
1811 instdone1
= I915_READ(INSTDONE1
);
1813 acthd
= intel_ring_get_active_head(&dev_priv
->ring
[RCS
]);
1814 acthd_bsd
= HAS_BSD(dev
) ?
1815 intel_ring_get_active_head(&dev_priv
->ring
[VCS
]) : 0;
1816 acthd_blt
= HAS_BLT(dev
) ?
1817 intel_ring_get_active_head(&dev_priv
->ring
[BCS
]) : 0;
1819 if (dev_priv
->last_acthd
== acthd
&&
1820 dev_priv
->last_acthd_bsd
== acthd_bsd
&&
1821 dev_priv
->last_acthd_blt
== acthd_blt
&&
1822 dev_priv
->last_instdone
== instdone
&&
1823 dev_priv
->last_instdone1
== instdone1
) {
1824 if (i915_hangcheck_hung(dev
))
1827 dev_priv
->hangcheck_count
= 0;
1829 dev_priv
->last_acthd
= acthd
;
1830 dev_priv
->last_acthd_bsd
= acthd_bsd
;
1831 dev_priv
->last_acthd_blt
= acthd_blt
;
1832 dev_priv
->last_instdone
= instdone
;
1833 dev_priv
->last_instdone1
= instdone1
;
1837 /* Reset timer case chip hangs without another request being added */
1838 mod_timer(&dev_priv
->hangcheck_timer
,
1839 jiffies
+ msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD
));
1844 static void ironlake_irq_preinstall(struct drm_device
*dev
)
1846 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1848 atomic_set(&dev_priv
->irq_received
, 0);
1851 I915_WRITE(HWSTAM
, 0xeffe);
1853 /* XXX hotplug from PCH */
1855 I915_WRITE(DEIMR
, 0xffffffff);
1856 I915_WRITE(DEIER
, 0x0);
1857 POSTING_READ(DEIER
);
1860 I915_WRITE(GTIMR
, 0xffffffff);
1861 I915_WRITE(GTIER
, 0x0);
1862 POSTING_READ(GTIER
);
1864 /* south display irq */
1865 I915_WRITE(SDEIMR
, 0xffffffff);
1866 I915_WRITE(SDEIER
, 0x0);
1867 POSTING_READ(SDEIER
);
1870 static void valleyview_irq_preinstall(struct drm_device
*dev
)
1872 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1875 atomic_set(&dev_priv
->irq_received
, 0);
1878 I915_WRITE(VLV_IMR
, 0);
1879 I915_WRITE(RING_IMR(RENDER_RING_BASE
), 0);
1880 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE
), 0);
1881 I915_WRITE(RING_IMR(BLT_RING_BASE
), 0);
1884 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
1885 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
1886 I915_WRITE(GTIMR
, 0xffffffff);
1887 I915_WRITE(GTIER
, 0x0);
1888 POSTING_READ(GTIER
);
1890 I915_WRITE(DPINVGTT
, 0xff);
1892 I915_WRITE(PORT_HOTPLUG_EN
, 0);
1893 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
1895 I915_WRITE(PIPESTAT(pipe
), 0xffff);
1896 I915_WRITE(VLV_IIR
, 0xffffffff);
1897 I915_WRITE(VLV_IMR
, 0xffffffff);
1898 I915_WRITE(VLV_IER
, 0x0);
1899 POSTING_READ(VLV_IER
);
1903 * Enable digital hotplug on the PCH, and configure the DP short pulse
1904 * duration to 2ms (which is the minimum in the Display Port spec)
1906 * This register is the same on all known PCH chips.
1909 static void ironlake_enable_pch_hotplug(struct drm_device
*dev
)
1911 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1914 hotplug
= I915_READ(PCH_PORT_HOTPLUG
);
1915 hotplug
&= ~(PORTD_PULSE_DURATION_MASK
|PORTC_PULSE_DURATION_MASK
|PORTB_PULSE_DURATION_MASK
);
1916 hotplug
|= PORTD_HOTPLUG_ENABLE
| PORTD_PULSE_DURATION_2ms
;
1917 hotplug
|= PORTC_HOTPLUG_ENABLE
| PORTC_PULSE_DURATION_2ms
;
1918 hotplug
|= PORTB_HOTPLUG_ENABLE
| PORTB_PULSE_DURATION_2ms
;
1919 I915_WRITE(PCH_PORT_HOTPLUG
, hotplug
);
1922 static int ironlake_irq_postinstall(struct drm_device
*dev
)
1924 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1925 /* enable kind of interrupts always enabled */
1926 u32 display_mask
= DE_MASTER_IRQ_CONTROL
| DE_GSE
| DE_PCH_EVENT
|
1927 DE_PLANEA_FLIP_DONE
| DE_PLANEB_FLIP_DONE
;
1931 dev_priv
->vblank_pipe
= DRM_I915_VBLANK_PIPE_A
| DRM_I915_VBLANK_PIPE_B
;
1932 dev_priv
->irq_mask
= ~display_mask
;
1934 /* should always can generate irq */
1935 I915_WRITE(DEIIR
, I915_READ(DEIIR
));
1936 I915_WRITE(DEIMR
, dev_priv
->irq_mask
);
1937 I915_WRITE(DEIER
, display_mask
| DE_PIPEA_VBLANK
| DE_PIPEB_VBLANK
);
1938 POSTING_READ(DEIER
);
1940 dev_priv
->gt_irq_mask
= ~0;
1942 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
1943 I915_WRITE(GTIMR
, dev_priv
->gt_irq_mask
);
1948 GEN6_BSD_USER_INTERRUPT
|
1949 GEN6_BLITTER_USER_INTERRUPT
;
1954 GT_BSD_USER_INTERRUPT
;
1955 I915_WRITE(GTIER
, render_irqs
);
1956 POSTING_READ(GTIER
);
1958 if (HAS_PCH_CPT(dev
)) {
1959 hotplug_mask
= (SDE_CRT_HOTPLUG_CPT
|
1960 SDE_PORTB_HOTPLUG_CPT
|
1961 SDE_PORTC_HOTPLUG_CPT
|
1962 SDE_PORTD_HOTPLUG_CPT
);
1964 hotplug_mask
= (SDE_CRT_HOTPLUG
|
1971 dev_priv
->pch_irq_mask
= ~hotplug_mask
;
1973 I915_WRITE(SDEIIR
, I915_READ(SDEIIR
));
1974 I915_WRITE(SDEIMR
, dev_priv
->pch_irq_mask
);
1975 I915_WRITE(SDEIER
, hotplug_mask
);
1976 POSTING_READ(SDEIER
);
1978 ironlake_enable_pch_hotplug(dev
);
1980 if (IS_IRONLAKE_M(dev
)) {
1981 /* Clear & enable PCU event interrupts */
1982 I915_WRITE(DEIIR
, DE_PCU_EVENT
);
1983 I915_WRITE(DEIER
, I915_READ(DEIER
) | DE_PCU_EVENT
);
1984 ironlake_enable_display_irq(dev_priv
, DE_PCU_EVENT
);
1990 static int ivybridge_irq_postinstall(struct drm_device
*dev
)
1992 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1993 /* enable kind of interrupts always enabled */
1994 u32 display_mask
= DE_MASTER_IRQ_CONTROL
| DE_GSE_IVB
|
1995 DE_PCH_EVENT_IVB
| DE_PLANEA_FLIP_DONE_IVB
|
1996 DE_PLANEB_FLIP_DONE_IVB
;
2000 dev_priv
->vblank_pipe
= DRM_I915_VBLANK_PIPE_A
| DRM_I915_VBLANK_PIPE_B
;
2001 dev_priv
->irq_mask
= ~display_mask
;
2003 /* should always can generate irq */
2004 I915_WRITE(DEIIR
, I915_READ(DEIIR
));
2005 I915_WRITE(DEIMR
, dev_priv
->irq_mask
);
2006 I915_WRITE(DEIER
, display_mask
| DE_PIPEA_VBLANK_IVB
|
2007 DE_PIPEB_VBLANK_IVB
);
2008 POSTING_READ(DEIER
);
2010 dev_priv
->gt_irq_mask
= ~0;
2012 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
2013 I915_WRITE(GTIMR
, dev_priv
->gt_irq_mask
);
2015 render_irqs
= GT_USER_INTERRUPT
| GEN6_BSD_USER_INTERRUPT
|
2016 GEN6_BLITTER_USER_INTERRUPT
;
2017 I915_WRITE(GTIER
, render_irqs
);
2018 POSTING_READ(GTIER
);
2020 hotplug_mask
= (SDE_CRT_HOTPLUG_CPT
|
2021 SDE_PORTB_HOTPLUG_CPT
|
2022 SDE_PORTC_HOTPLUG_CPT
|
2023 SDE_PORTD_HOTPLUG_CPT
);
2024 dev_priv
->pch_irq_mask
= ~hotplug_mask
;
2026 I915_WRITE(SDEIIR
, I915_READ(SDEIIR
));
2027 I915_WRITE(SDEIMR
, dev_priv
->pch_irq_mask
);
2028 I915_WRITE(SDEIER
, hotplug_mask
);
2029 POSTING_READ(SDEIER
);
2031 ironlake_enable_pch_hotplug(dev
);
2036 static int valleyview_irq_postinstall(struct drm_device
*dev
)
2038 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2041 u32 hotplug_en
= I915_READ(PORT_HOTPLUG_EN
);
2044 enable_mask
= I915_DISPLAY_PORT_INTERRUPT
;
2045 enable_mask
|= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT
|
2046 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT
;
2048 dev_priv
->irq_mask
= ~enable_mask
;
2050 dev_priv
->pipestat
[0] = 0;
2051 dev_priv
->pipestat
[1] = 0;
2053 dev_priv
->vblank_pipe
= DRM_I915_VBLANK_PIPE_A
| DRM_I915_VBLANK_PIPE_B
;
2055 /* Hack for broken MSIs on VLV */
2056 pci_write_config_dword(dev_priv
->dev
->pdev
, 0x94, 0xfee00000);
2057 pci_read_config_word(dev
->pdev
, 0x98, &msid
);
2058 msid
&= 0xff; /* mask out delivery bits */
2060 pci_write_config_word(dev_priv
->dev
->pdev
, 0x98, msid
);
2062 I915_WRITE(VLV_IMR
, dev_priv
->irq_mask
);
2063 I915_WRITE(VLV_IER
, enable_mask
);
2064 I915_WRITE(VLV_IIR
, 0xffffffff);
2065 I915_WRITE(PIPESTAT(0), 0xffff);
2066 I915_WRITE(PIPESTAT(1), 0xffff);
2067 POSTING_READ(VLV_IER
);
2069 I915_WRITE(VLV_IIR
, 0xffffffff);
2070 I915_WRITE(VLV_IIR
, 0xffffffff);
2072 render_irqs
= GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT
|
2073 GT_GEN6_BLT_CS_ERROR_INTERRUPT
|
2074 GT_GEN6_BLT_USER_INTERRUPT
|
2075 GT_GEN6_BSD_USER_INTERRUPT
|
2076 GT_GEN6_BSD_CS_ERROR_INTERRUPT
|
2077 GT_GEN7_L3_PARITY_ERROR_INTERRUPT
|
2079 GT_RENDER_CS_ERROR_INTERRUPT
|
2083 dev_priv
->gt_irq_mask
= ~render_irqs
;
2085 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
2086 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
2087 I915_WRITE(GTIMR
, 0);
2088 I915_WRITE(GTIER
, render_irqs
);
2089 POSTING_READ(GTIER
);
2091 /* ack & enable invalid PTE error interrupts */
2092 #if 0 /* FIXME: add support to irq handler for checking these bits */
2093 I915_WRITE(DPINVGTT
, DPINVGTT_STATUS_MASK
);
2094 I915_WRITE(DPINVGTT
, DPINVGTT_EN_MASK
);
2097 I915_WRITE(VLV_MASTER_IER
, MASTER_INTERRUPT_ENABLE
);
2098 #if 0 /* FIXME: check register definitions; some have moved */
2099 /* Note HDMI and DP share bits */
2100 if (dev_priv
->hotplug_supported_mask
& HDMIB_HOTPLUG_INT_STATUS
)
2101 hotplug_en
|= HDMIB_HOTPLUG_INT_EN
;
2102 if (dev_priv
->hotplug_supported_mask
& HDMIC_HOTPLUG_INT_STATUS
)
2103 hotplug_en
|= HDMIC_HOTPLUG_INT_EN
;
2104 if (dev_priv
->hotplug_supported_mask
& HDMID_HOTPLUG_INT_STATUS
)
2105 hotplug_en
|= HDMID_HOTPLUG_INT_EN
;
2106 if (dev_priv
->hotplug_supported_mask
& SDVOC_HOTPLUG_INT_STATUS
)
2107 hotplug_en
|= SDVOC_HOTPLUG_INT_EN
;
2108 if (dev_priv
->hotplug_supported_mask
& SDVOB_HOTPLUG_INT_STATUS
)
2109 hotplug_en
|= SDVOB_HOTPLUG_INT_EN
;
2110 if (dev_priv
->hotplug_supported_mask
& CRT_HOTPLUG_INT_STATUS
) {
2111 hotplug_en
|= CRT_HOTPLUG_INT_EN
;
2112 hotplug_en
|= CRT_HOTPLUG_VOLTAGE_COMPARE_50
;
2116 I915_WRITE(PORT_HOTPLUG_EN
, hotplug_en
);
2121 static void valleyview_irq_uninstall(struct drm_device
*dev
)
2123 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2129 dev_priv
->vblank_pipe
= 0;
2132 I915_WRITE(PIPESTAT(pipe
), 0xffff);
2134 I915_WRITE(HWSTAM
, 0xffffffff);
2135 I915_WRITE(PORT_HOTPLUG_EN
, 0);
2136 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
2138 I915_WRITE(PIPESTAT(pipe
), 0xffff);
2139 I915_WRITE(VLV_IIR
, 0xffffffff);
2140 I915_WRITE(VLV_IMR
, 0xffffffff);
2141 I915_WRITE(VLV_IER
, 0x0);
2142 POSTING_READ(VLV_IER
);
2145 static void ironlake_irq_uninstall(struct drm_device
*dev
)
2147 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2152 dev_priv
->vblank_pipe
= 0;
2154 I915_WRITE(HWSTAM
, 0xffffffff);
2156 I915_WRITE(DEIMR
, 0xffffffff);
2157 I915_WRITE(DEIER
, 0x0);
2158 I915_WRITE(DEIIR
, I915_READ(DEIIR
));
2160 I915_WRITE(GTIMR
, 0xffffffff);
2161 I915_WRITE(GTIER
, 0x0);
2162 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
2164 I915_WRITE(SDEIMR
, 0xffffffff);
2165 I915_WRITE(SDEIER
, 0x0);
2166 I915_WRITE(SDEIIR
, I915_READ(SDEIIR
));
2169 static void i8xx_irq_preinstall(struct drm_device
* dev
)
2171 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2174 atomic_set(&dev_priv
->irq_received
, 0);
2177 I915_WRITE(PIPESTAT(pipe
), 0);
2178 I915_WRITE16(IMR
, 0xffff);
2179 I915_WRITE16(IER
, 0x0);
2180 POSTING_READ16(IER
);
2183 static int i8xx_irq_postinstall(struct drm_device
*dev
)
2185 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2187 dev_priv
->vblank_pipe
= DRM_I915_VBLANK_PIPE_A
| DRM_I915_VBLANK_PIPE_B
;
2189 dev_priv
->pipestat
[0] = 0;
2190 dev_priv
->pipestat
[1] = 0;
2193 ~(I915_ERROR_PAGE_TABLE
| I915_ERROR_MEMORY_REFRESH
));
2195 /* Unmask the interrupts that we always want on. */
2196 dev_priv
->irq_mask
=
2197 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
2198 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
2199 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
2200 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
|
2201 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
);
2202 I915_WRITE16(IMR
, dev_priv
->irq_mask
);
2205 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
2206 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
2207 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
|
2208 I915_USER_INTERRUPT
);
2209 POSTING_READ16(IER
);
2214 static irqreturn_t
i8xx_irq_handler(DRM_IRQ_ARGS
)
2216 struct drm_device
*dev
= (struct drm_device
*) arg
;
2217 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2218 struct drm_i915_master_private
*master_priv
;
2221 unsigned long irqflags
;
2225 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
2226 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
;
2228 atomic_inc(&dev_priv
->irq_received
);
2230 iir
= I915_READ16(IIR
);
2234 while (iir
& ~flip_mask
) {
2235 /* Can't rely on pipestat interrupt bit in iir as it might
2236 * have been cleared after the pipestat interrupt was received.
2237 * It doesn't set the bit in iir again, but it still produces
2238 * interrupts (for non-MSI).
2240 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2241 if (iir
& I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
)
2242 i915_handle_error(dev
, false);
2244 for_each_pipe(pipe
) {
2245 int reg
= PIPESTAT(pipe
);
2246 pipe_stats
[pipe
] = I915_READ(reg
);
2249 * Clear the PIPE*STAT regs before the IIR
2251 if (pipe_stats
[pipe
] & 0x8000ffff) {
2252 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
)
2253 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2255 I915_WRITE(reg
, pipe_stats
[pipe
]);
2259 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2261 I915_WRITE16(IIR
, iir
& ~flip_mask
);
2262 new_iir
= I915_READ16(IIR
); /* Flush posted writes */
2264 if (dev
->primary
->master
) {
2265 master_priv
= dev
->primary
->master
->driver_priv
;
2266 if (master_priv
->sarea_priv
)
2267 master_priv
->sarea_priv
->last_dispatch
=
2268 READ_BREADCRUMB(dev_priv
);
2271 if (iir
& I915_USER_INTERRUPT
)
2272 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
2274 if (pipe_stats
[0] & PIPE_VBLANK_INTERRUPT_STATUS
&&
2275 drm_handle_vblank(dev
, 0)) {
2276 if (iir
& I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
) {
2277 intel_prepare_page_flip(dev
, 0);
2278 intel_finish_page_flip(dev
, 0);
2279 flip_mask
&= ~I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
;
2283 if (pipe_stats
[1] & PIPE_VBLANK_INTERRUPT_STATUS
&&
2284 drm_handle_vblank(dev
, 1)) {
2285 if (iir
& I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
) {
2286 intel_prepare_page_flip(dev
, 1);
2287 intel_finish_page_flip(dev
, 1);
2288 flip_mask
&= ~I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
;
2298 static void i8xx_irq_uninstall(struct drm_device
* dev
)
2300 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2303 dev_priv
->vblank_pipe
= 0;
2305 for_each_pipe(pipe
) {
2306 /* Clear enable bits; then clear status bits */
2307 I915_WRITE(PIPESTAT(pipe
), 0);
2308 I915_WRITE(PIPESTAT(pipe
), I915_READ(PIPESTAT(pipe
)));
2310 I915_WRITE16(IMR
, 0xffff);
2311 I915_WRITE16(IER
, 0x0);
2312 I915_WRITE16(IIR
, I915_READ16(IIR
));
2315 static void i915_irq_preinstall(struct drm_device
* dev
)
2317 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2320 atomic_set(&dev_priv
->irq_received
, 0);
2322 if (I915_HAS_HOTPLUG(dev
)) {
2323 I915_WRITE(PORT_HOTPLUG_EN
, 0);
2324 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
2327 I915_WRITE16(HWSTAM
, 0xeffe);
2329 I915_WRITE(PIPESTAT(pipe
), 0);
2330 I915_WRITE(IMR
, 0xffffffff);
2331 I915_WRITE(IER
, 0x0);
2335 static int i915_irq_postinstall(struct drm_device
*dev
)
2337 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2340 dev_priv
->vblank_pipe
= DRM_I915_VBLANK_PIPE_A
| DRM_I915_VBLANK_PIPE_B
;
2342 dev_priv
->pipestat
[0] = 0;
2343 dev_priv
->pipestat
[1] = 0;
2345 I915_WRITE(EMR
, ~(I915_ERROR_PAGE_TABLE
| I915_ERROR_MEMORY_REFRESH
));
2347 /* Unmask the interrupts that we always want on. */
2348 dev_priv
->irq_mask
=
2349 ~(I915_ASLE_INTERRUPT
|
2350 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
2351 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
2352 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
2353 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
|
2354 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
);
2357 I915_ASLE_INTERRUPT
|
2358 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
2359 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
2360 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
|
2361 I915_USER_INTERRUPT
;
2363 if (I915_HAS_HOTPLUG(dev
)) {
2364 /* Enable in IER... */
2365 enable_mask
|= I915_DISPLAY_PORT_INTERRUPT
;
2366 /* and unmask in IMR */
2367 dev_priv
->irq_mask
&= ~I915_DISPLAY_PORT_INTERRUPT
;
2370 I915_WRITE(IMR
, dev_priv
->irq_mask
);
2371 I915_WRITE(IER
, enable_mask
);
2374 if (I915_HAS_HOTPLUG(dev
)) {
2375 u32 hotplug_en
= I915_READ(PORT_HOTPLUG_EN
);
2377 if (dev_priv
->hotplug_supported_mask
& HDMIB_HOTPLUG_INT_STATUS
)
2378 hotplug_en
|= HDMIB_HOTPLUG_INT_EN
;
2379 if (dev_priv
->hotplug_supported_mask
& HDMIC_HOTPLUG_INT_STATUS
)
2380 hotplug_en
|= HDMIC_HOTPLUG_INT_EN
;
2381 if (dev_priv
->hotplug_supported_mask
& HDMID_HOTPLUG_INT_STATUS
)
2382 hotplug_en
|= HDMID_HOTPLUG_INT_EN
;
2383 if (dev_priv
->hotplug_supported_mask
& SDVOC_HOTPLUG_INT_STATUS
)
2384 hotplug_en
|= SDVOC_HOTPLUG_INT_EN
;
2385 if (dev_priv
->hotplug_supported_mask
& SDVOB_HOTPLUG_INT_STATUS
)
2386 hotplug_en
|= SDVOB_HOTPLUG_INT_EN
;
2387 if (dev_priv
->hotplug_supported_mask
& CRT_HOTPLUG_INT_STATUS
) {
2388 hotplug_en
|= CRT_HOTPLUG_INT_EN
;
2389 hotplug_en
|= CRT_HOTPLUG_VOLTAGE_COMPARE_50
;
2392 /* Ignore TV since it's buggy */
2394 I915_WRITE(PORT_HOTPLUG_EN
, hotplug_en
);
2397 intel_opregion_enable_asle(dev
);
2402 static irqreturn_t
i915_irq_handler(DRM_IRQ_ARGS
)
2404 struct drm_device
*dev
= (struct drm_device
*) arg
;
2405 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2406 struct drm_i915_master_private
*master_priv
;
2407 u32 iir
, new_iir
, pipe_stats
[I915_MAX_PIPES
];
2408 unsigned long irqflags
;
2410 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
2411 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
;
2413 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
,
2414 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
2416 int pipe
, ret
= IRQ_NONE
;
2418 atomic_inc(&dev_priv
->irq_received
);
2420 iir
= I915_READ(IIR
);
2422 bool irq_received
= (iir
& ~flip_mask
) != 0;
2423 bool blc_event
= false;
2425 /* Can't rely on pipestat interrupt bit in iir as it might
2426 * have been cleared after the pipestat interrupt was received.
2427 * It doesn't set the bit in iir again, but it still produces
2428 * interrupts (for non-MSI).
2430 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2431 if (iir
& I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
)
2432 i915_handle_error(dev
, false);
2434 for_each_pipe(pipe
) {
2435 int reg
= PIPESTAT(pipe
);
2436 pipe_stats
[pipe
] = I915_READ(reg
);
2438 /* Clear the PIPE*STAT regs before the IIR */
2439 if (pipe_stats
[pipe
] & 0x8000ffff) {
2440 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
)
2441 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2443 I915_WRITE(reg
, pipe_stats
[pipe
]);
2444 irq_received
= true;
2447 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2452 /* Consume port. Then clear IIR or we'll miss events */
2453 if ((I915_HAS_HOTPLUG(dev
)) &&
2454 (iir
& I915_DISPLAY_PORT_INTERRUPT
)) {
2455 u32 hotplug_status
= I915_READ(PORT_HOTPLUG_STAT
);
2457 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2459 if (hotplug_status
& dev_priv
->hotplug_supported_mask
)
2460 queue_work(dev_priv
->wq
,
2461 &dev_priv
->hotplug_work
);
2463 I915_WRITE(PORT_HOTPLUG_STAT
, hotplug_status
);
2464 POSTING_READ(PORT_HOTPLUG_STAT
);
2467 I915_WRITE(IIR
, iir
& ~flip_mask
);
2468 new_iir
= I915_READ(IIR
); /* Flush posted writes */
2470 if (iir
& I915_USER_INTERRUPT
)
2471 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
2473 for_each_pipe(pipe
) {
2477 if (pipe_stats
[pipe
] & PIPE_VBLANK_INTERRUPT_STATUS
&&
2478 drm_handle_vblank(dev
, pipe
)) {
2479 if (iir
& flip
[plane
]) {
2480 intel_prepare_page_flip(dev
, plane
);
2481 intel_finish_page_flip(dev
, pipe
);
2482 flip_mask
&= ~flip
[plane
];
2486 if (pipe_stats
[pipe
] & PIPE_LEGACY_BLC_EVENT_STATUS
)
2490 if (blc_event
|| (iir
& I915_ASLE_INTERRUPT
))
2491 intel_opregion_asle_intr(dev
);
2493 /* With MSI, interrupts are only generated when iir
2494 * transitions from zero to nonzero. If another bit got
2495 * set while we were handling the existing iir bits, then
2496 * we would never get another interrupt.
2498 * This is fine on non-MSI as well, as if we hit this path
2499 * we avoid exiting the interrupt handler only to generate
2502 * Note that for MSI this could cause a stray interrupt report
2503 * if an interrupt landed in the time between writing IIR and
2504 * the posting read. This should be rare enough to never
2505 * trigger the 99% of 100,000 interrupts test for disabling
2510 } while (iir
& ~flip_mask
);
2512 if (dev
->primary
->master
) {
2513 master_priv
= dev
->primary
->master
->driver_priv
;
2514 if (master_priv
->sarea_priv
)
2515 master_priv
->sarea_priv
->last_dispatch
=
2516 READ_BREADCRUMB(dev_priv
);
2522 static void i915_irq_uninstall(struct drm_device
* dev
)
2524 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2527 dev_priv
->vblank_pipe
= 0;
2529 if (I915_HAS_HOTPLUG(dev
)) {
2530 I915_WRITE(PORT_HOTPLUG_EN
, 0);
2531 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
2534 I915_WRITE16(HWSTAM
, 0xffff);
2535 for_each_pipe(pipe
) {
2536 /* Clear enable bits; then clear status bits */
2537 I915_WRITE(PIPESTAT(pipe
), 0);
2538 I915_WRITE(PIPESTAT(pipe
), I915_READ(PIPESTAT(pipe
)));
2540 I915_WRITE(IMR
, 0xffffffff);
2541 I915_WRITE(IER
, 0x0);
2543 I915_WRITE(IIR
, I915_READ(IIR
));
2546 static void i965_irq_preinstall(struct drm_device
* dev
)
2548 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2551 atomic_set(&dev_priv
->irq_received
, 0);
2553 if (I915_HAS_HOTPLUG(dev
)) {
2554 I915_WRITE(PORT_HOTPLUG_EN
, 0);
2555 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
2558 I915_WRITE(HWSTAM
, 0xeffe);
2560 I915_WRITE(PIPESTAT(pipe
), 0);
2561 I915_WRITE(IMR
, 0xffffffff);
2562 I915_WRITE(IER
, 0x0);
2566 static int i965_irq_postinstall(struct drm_device
*dev
)
2568 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2572 dev_priv
->vblank_pipe
= DRM_I915_VBLANK_PIPE_A
| DRM_I915_VBLANK_PIPE_B
;
2574 /* Unmask the interrupts that we always want on. */
2575 dev_priv
->irq_mask
= ~(I915_ASLE_INTERRUPT
|
2576 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
2577 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
2578 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
2579 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
|
2580 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
);
2582 enable_mask
= ~dev_priv
->irq_mask
;
2583 enable_mask
|= I915_USER_INTERRUPT
;
2586 enable_mask
|= I915_BSD_USER_INTERRUPT
;
2588 dev_priv
->pipestat
[0] = 0;
2589 dev_priv
->pipestat
[1] = 0;
2591 if (I915_HAS_HOTPLUG(dev
)) {
2592 /* Enable in IER... */
2593 enable_mask
|= I915_DISPLAY_PORT_INTERRUPT
;
2594 /* and unmask in IMR */
2595 dev_priv
->irq_mask
&= ~I915_DISPLAY_PORT_INTERRUPT
;
2599 * Enable some error detection, note the instruction error mask
2600 * bit is reserved, so we leave it masked.
2603 error_mask
= ~(GM45_ERROR_PAGE_TABLE
|
2604 GM45_ERROR_MEM_PRIV
|
2605 GM45_ERROR_CP_PRIV
|
2606 I915_ERROR_MEMORY_REFRESH
);
2608 error_mask
= ~(I915_ERROR_PAGE_TABLE
|
2609 I915_ERROR_MEMORY_REFRESH
);
2611 I915_WRITE(EMR
, error_mask
);
2613 I915_WRITE(IMR
, dev_priv
->irq_mask
);
2614 I915_WRITE(IER
, enable_mask
);
2617 if (I915_HAS_HOTPLUG(dev
)) {
2618 u32 hotplug_en
= I915_READ(PORT_HOTPLUG_EN
);
2620 /* Note HDMI and DP share bits */
2621 if (dev_priv
->hotplug_supported_mask
& HDMIB_HOTPLUG_INT_STATUS
)
2622 hotplug_en
|= HDMIB_HOTPLUG_INT_EN
;
2623 if (dev_priv
->hotplug_supported_mask
& HDMIC_HOTPLUG_INT_STATUS
)
2624 hotplug_en
|= HDMIC_HOTPLUG_INT_EN
;
2625 if (dev_priv
->hotplug_supported_mask
& HDMID_HOTPLUG_INT_STATUS
)
2626 hotplug_en
|= HDMID_HOTPLUG_INT_EN
;
2627 if (dev_priv
->hotplug_supported_mask
& SDVOC_HOTPLUG_INT_STATUS
)
2628 hotplug_en
|= SDVOC_HOTPLUG_INT_EN
;
2629 if (dev_priv
->hotplug_supported_mask
& SDVOB_HOTPLUG_INT_STATUS
)
2630 hotplug_en
|= SDVOB_HOTPLUG_INT_EN
;
2631 if (dev_priv
->hotplug_supported_mask
& CRT_HOTPLUG_INT_STATUS
) {
2632 hotplug_en
|= CRT_HOTPLUG_INT_EN
;
2634 /* Programming the CRT detection parameters tends
2635 to generate a spurious hotplug event about three
2636 seconds later. So just do it once.
2639 hotplug_en
|= CRT_HOTPLUG_ACTIVATION_PERIOD_64
;
2640 hotplug_en
|= CRT_HOTPLUG_VOLTAGE_COMPARE_50
;
2643 /* Ignore TV since it's buggy */
2645 I915_WRITE(PORT_HOTPLUG_EN
, hotplug_en
);
2648 intel_opregion_enable_asle(dev
);
2653 static irqreturn_t
i965_irq_handler(DRM_IRQ_ARGS
)
2655 struct drm_device
*dev
= (struct drm_device
*) arg
;
2656 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2657 struct drm_i915_master_private
*master_priv
;
2659 u32 pipe_stats
[I915_MAX_PIPES
];
2660 unsigned long irqflags
;
2662 int ret
= IRQ_NONE
, pipe
;
2664 atomic_inc(&dev_priv
->irq_received
);
2666 iir
= I915_READ(IIR
);
2669 bool blc_event
= false;
2671 irq_received
= iir
!= 0;
2673 /* Can't rely on pipestat interrupt bit in iir as it might
2674 * have been cleared after the pipestat interrupt was received.
2675 * It doesn't set the bit in iir again, but it still produces
2676 * interrupts (for non-MSI).
2678 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2679 if (iir
& I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
)
2680 i915_handle_error(dev
, false);
2682 for_each_pipe(pipe
) {
2683 int reg
= PIPESTAT(pipe
);
2684 pipe_stats
[pipe
] = I915_READ(reg
);
2687 * Clear the PIPE*STAT regs before the IIR
2689 if (pipe_stats
[pipe
] & 0x8000ffff) {
2690 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
)
2691 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2693 I915_WRITE(reg
, pipe_stats
[pipe
]);
2697 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2704 /* Consume port. Then clear IIR or we'll miss events */
2705 if ((I915_HAS_HOTPLUG(dev
)) &&
2706 (iir
& I915_DISPLAY_PORT_INTERRUPT
)) {
2707 u32 hotplug_status
= I915_READ(PORT_HOTPLUG_STAT
);
2709 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2711 if (hotplug_status
& dev_priv
->hotplug_supported_mask
)
2712 queue_work(dev_priv
->wq
,
2713 &dev_priv
->hotplug_work
);
2715 I915_WRITE(PORT_HOTPLUG_STAT
, hotplug_status
);
2716 I915_READ(PORT_HOTPLUG_STAT
);
2719 I915_WRITE(IIR
, iir
);
2720 new_iir
= I915_READ(IIR
); /* Flush posted writes */
2722 if (iir
& I915_USER_INTERRUPT
)
2723 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
2724 if (iir
& I915_BSD_USER_INTERRUPT
)
2725 notify_ring(dev
, &dev_priv
->ring
[VCS
]);
2727 if (iir
& I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
)
2728 intel_prepare_page_flip(dev
, 0);
2730 if (iir
& I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
)
2731 intel_prepare_page_flip(dev
, 1);
2733 for_each_pipe(pipe
) {
2734 if (pipe_stats
[pipe
] & PIPE_START_VBLANK_INTERRUPT_STATUS
&&
2735 drm_handle_vblank(dev
, pipe
)) {
2736 i915_pageflip_stall_check(dev
, pipe
);
2737 intel_finish_page_flip(dev
, pipe
);
2740 if (pipe_stats
[pipe
] & PIPE_LEGACY_BLC_EVENT_STATUS
)
2745 if (blc_event
|| (iir
& I915_ASLE_INTERRUPT
))
2746 intel_opregion_asle_intr(dev
);
2748 /* With MSI, interrupts are only generated when iir
2749 * transitions from zero to nonzero. If another bit got
2750 * set while we were handling the existing iir bits, then
2751 * we would never get another interrupt.
2753 * This is fine on non-MSI as well, as if we hit this path
2754 * we avoid exiting the interrupt handler only to generate
2757 * Note that for MSI this could cause a stray interrupt report
2758 * if an interrupt landed in the time between writing IIR and
2759 * the posting read. This should be rare enough to never
2760 * trigger the 99% of 100,000 interrupts test for disabling
2766 if (dev
->primary
->master
) {
2767 master_priv
= dev
->primary
->master
->driver_priv
;
2768 if (master_priv
->sarea_priv
)
2769 master_priv
->sarea_priv
->last_dispatch
=
2770 READ_BREADCRUMB(dev_priv
);
2776 static void i965_irq_uninstall(struct drm_device
* dev
)
2778 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2784 dev_priv
->vblank_pipe
= 0;
2786 if (I915_HAS_HOTPLUG(dev
)) {
2787 I915_WRITE(PORT_HOTPLUG_EN
, 0);
2788 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
2791 I915_WRITE(HWSTAM
, 0xffffffff);
2793 I915_WRITE(PIPESTAT(pipe
), 0);
2794 I915_WRITE(IMR
, 0xffffffff);
2795 I915_WRITE(IER
, 0x0);
2798 I915_WRITE(PIPESTAT(pipe
),
2799 I915_READ(PIPESTAT(pipe
)) & 0x8000ffff);
2800 I915_WRITE(IIR
, I915_READ(IIR
));
2803 void intel_irq_init(struct drm_device
*dev
)
2805 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2807 INIT_WORK(&dev_priv
->hotplug_work
, i915_hotplug_work_func
);
2808 INIT_WORK(&dev_priv
->error_work
, i915_error_work_func
);
2809 INIT_WORK(&dev_priv
->rps_work
, gen6_pm_rps_work
);
2811 dev
->driver
->get_vblank_counter
= i915_get_vblank_counter
;
2812 dev
->max_vblank_count
= 0xffffff; /* only 24 bits of frame count */
2813 if (IS_G4X(dev
) || IS_GEN5(dev
) || IS_GEN6(dev
) || IS_IVYBRIDGE(dev
) ||
2814 IS_VALLEYVIEW(dev
)) {
2815 dev
->max_vblank_count
= 0xffffffff; /* full 32 bit counter */
2816 dev
->driver
->get_vblank_counter
= gm45_get_vblank_counter
;
2819 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
2820 dev
->driver
->get_vblank_timestamp
= i915_get_vblank_timestamp
;
2822 dev
->driver
->get_vblank_timestamp
= NULL
;
2823 dev
->driver
->get_scanout_position
= i915_get_crtc_scanoutpos
;
2825 if (IS_VALLEYVIEW(dev
)) {
2826 dev
->driver
->irq_handler
= valleyview_irq_handler
;
2827 dev
->driver
->irq_preinstall
= valleyview_irq_preinstall
;
2828 dev
->driver
->irq_postinstall
= valleyview_irq_postinstall
;
2829 dev
->driver
->irq_uninstall
= valleyview_irq_uninstall
;
2830 dev
->driver
->enable_vblank
= valleyview_enable_vblank
;
2831 dev
->driver
->disable_vblank
= valleyview_disable_vblank
;
2832 } else if (IS_IVYBRIDGE(dev
)) {
2833 /* Share pre & uninstall handlers with ILK/SNB */
2834 dev
->driver
->irq_handler
= ivybridge_irq_handler
;
2835 dev
->driver
->irq_preinstall
= ironlake_irq_preinstall
;
2836 dev
->driver
->irq_postinstall
= ivybridge_irq_postinstall
;
2837 dev
->driver
->irq_uninstall
= ironlake_irq_uninstall
;
2838 dev
->driver
->enable_vblank
= ivybridge_enable_vblank
;
2839 dev
->driver
->disable_vblank
= ivybridge_disable_vblank
;
2840 } else if (HAS_PCH_SPLIT(dev
)) {
2841 dev
->driver
->irq_handler
= ironlake_irq_handler
;
2842 dev
->driver
->irq_preinstall
= ironlake_irq_preinstall
;
2843 dev
->driver
->irq_postinstall
= ironlake_irq_postinstall
;
2844 dev
->driver
->irq_uninstall
= ironlake_irq_uninstall
;
2845 dev
->driver
->enable_vblank
= ironlake_enable_vblank
;
2846 dev
->driver
->disable_vblank
= ironlake_disable_vblank
;
2848 if (INTEL_INFO(dev
)->gen
== 2) {
2849 dev
->driver
->irq_preinstall
= i8xx_irq_preinstall
;
2850 dev
->driver
->irq_postinstall
= i8xx_irq_postinstall
;
2851 dev
->driver
->irq_handler
= i8xx_irq_handler
;
2852 dev
->driver
->irq_uninstall
= i8xx_irq_uninstall
;
2853 } else if (INTEL_INFO(dev
)->gen
== 3) {
2854 /* IIR "flip pending" means done if this bit is set */
2855 I915_WRITE(ECOSKPD
, _MASKED_BIT_DISABLE(ECO_FLIP_DONE
));
2857 dev
->driver
->irq_preinstall
= i915_irq_preinstall
;
2858 dev
->driver
->irq_postinstall
= i915_irq_postinstall
;
2859 dev
->driver
->irq_uninstall
= i915_irq_uninstall
;
2860 dev
->driver
->irq_handler
= i915_irq_handler
;
2862 dev
->driver
->irq_preinstall
= i965_irq_preinstall
;
2863 dev
->driver
->irq_postinstall
= i965_irq_postinstall
;
2864 dev
->driver
->irq_uninstall
= i965_irq_uninstall
;
2865 dev
->driver
->irq_handler
= i965_irq_handler
;
2867 dev
->driver
->enable_vblank
= i915_enable_vblank
;
2868 dev
->driver
->disable_vblank
= i915_disable_vblank
;