1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
34 #include <drm/i915_drm.h>
36 #include "i915_trace.h"
37 #include "intel_drv.h"
39 /* For display hotplug interrupt */
41 ironlake_enable_display_irq(drm_i915_private_t
*dev_priv
, u32 mask
)
43 if ((dev_priv
->irq_mask
& mask
) != 0) {
44 dev_priv
->irq_mask
&= ~mask
;
45 I915_WRITE(DEIMR
, dev_priv
->irq_mask
);
51 ironlake_disable_display_irq(drm_i915_private_t
*dev_priv
, u32 mask
)
53 if ((dev_priv
->irq_mask
& mask
) != mask
) {
54 dev_priv
->irq_mask
|= mask
;
55 I915_WRITE(DEIMR
, dev_priv
->irq_mask
);
61 i915_enable_pipestat(drm_i915_private_t
*dev_priv
, int pipe
, u32 mask
)
63 u32 reg
= PIPESTAT(pipe
);
64 u32 pipestat
= I915_READ(reg
) & 0x7fff0000;
66 if ((pipestat
& mask
) == mask
)
69 /* Enable the interrupt, clear any pending status */
70 pipestat
|= mask
| (mask
>> 16);
71 I915_WRITE(reg
, pipestat
);
76 i915_disable_pipestat(drm_i915_private_t
*dev_priv
, int pipe
, u32 mask
)
78 u32 reg
= PIPESTAT(pipe
);
79 u32 pipestat
= I915_READ(reg
) & 0x7fff0000;
81 if ((pipestat
& mask
) == 0)
85 I915_WRITE(reg
, pipestat
);
90 * intel_enable_asle - enable ASLE interrupt for OpRegion
92 void intel_enable_asle(struct drm_device
*dev
)
94 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
95 unsigned long irqflags
;
97 /* FIXME: opregion/asle for VLV */
98 if (IS_VALLEYVIEW(dev
))
101 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
103 if (HAS_PCH_SPLIT(dev
))
104 ironlake_enable_display_irq(dev_priv
, DE_GSE
);
106 i915_enable_pipestat(dev_priv
, 1,
107 PIPE_LEGACY_BLC_EVENT_ENABLE
);
108 if (INTEL_INFO(dev
)->gen
>= 4)
109 i915_enable_pipestat(dev_priv
, 0,
110 PIPE_LEGACY_BLC_EVENT_ENABLE
);
113 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
117 * i915_pipe_enabled - check if a pipe is enabled
119 * @pipe: pipe to check
121 * Reading certain registers when the pipe is disabled can hang the chip.
122 * Use this routine to make sure the PLL is running and the pipe is active
123 * before reading such registers if unsure.
126 i915_pipe_enabled(struct drm_device
*dev
, int pipe
)
128 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
129 enum transcoder cpu_transcoder
= intel_pipe_to_cpu_transcoder(dev_priv
,
132 return I915_READ(PIPECONF(cpu_transcoder
)) & PIPECONF_ENABLE
;
135 /* Called from drm generic code, passed a 'crtc', which
136 * we use as a pipe index
138 static u32
i915_get_vblank_counter(struct drm_device
*dev
, int pipe
)
140 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
141 unsigned long high_frame
;
142 unsigned long low_frame
;
143 u32 high1
, high2
, low
;
145 if (!i915_pipe_enabled(dev
, pipe
)) {
146 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
147 "pipe %c\n", pipe_name(pipe
));
151 high_frame
= PIPEFRAME(pipe
);
152 low_frame
= PIPEFRAMEPIXEL(pipe
);
155 * High & low register fields aren't synchronized, so make sure
156 * we get a low value that's stable across two reads of the high
160 high1
= I915_READ(high_frame
) & PIPE_FRAME_HIGH_MASK
;
161 low
= I915_READ(low_frame
) & PIPE_FRAME_LOW_MASK
;
162 high2
= I915_READ(high_frame
) & PIPE_FRAME_HIGH_MASK
;
163 } while (high1
!= high2
);
165 high1
>>= PIPE_FRAME_HIGH_SHIFT
;
166 low
>>= PIPE_FRAME_LOW_SHIFT
;
167 return (high1
<< 8) | low
;
170 static u32
gm45_get_vblank_counter(struct drm_device
*dev
, int pipe
)
172 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
173 int reg
= PIPE_FRMCOUNT_GM45(pipe
);
175 if (!i915_pipe_enabled(dev
, pipe
)) {
176 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
177 "pipe %c\n", pipe_name(pipe
));
181 return I915_READ(reg
);
184 static int i915_get_crtc_scanoutpos(struct drm_device
*dev
, int pipe
,
185 int *vpos
, int *hpos
)
187 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
188 u32 vbl
= 0, position
= 0;
189 int vbl_start
, vbl_end
, htotal
, vtotal
;
192 enum transcoder cpu_transcoder
= intel_pipe_to_cpu_transcoder(dev_priv
,
195 if (!i915_pipe_enabled(dev
, pipe
)) {
196 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
197 "pipe %c\n", pipe_name(pipe
));
202 vtotal
= 1 + ((I915_READ(VTOTAL(cpu_transcoder
)) >> 16) & 0x1fff);
204 if (INTEL_INFO(dev
)->gen
>= 4) {
205 /* No obvious pixelcount register. Only query vertical
206 * scanout position from Display scan line register.
208 position
= I915_READ(PIPEDSL(pipe
));
210 /* Decode into vertical scanout position. Don't have
211 * horizontal scanout position.
213 *vpos
= position
& 0x1fff;
216 /* Have access to pixelcount since start of frame.
217 * We can split this into vertical and horizontal
220 position
= (I915_READ(PIPEFRAMEPIXEL(pipe
)) & PIPE_PIXEL_MASK
) >> PIPE_PIXEL_SHIFT
;
222 htotal
= 1 + ((I915_READ(HTOTAL(cpu_transcoder
)) >> 16) & 0x1fff);
223 *vpos
= position
/ htotal
;
224 *hpos
= position
- (*vpos
* htotal
);
227 /* Query vblank area. */
228 vbl
= I915_READ(VBLANK(cpu_transcoder
));
230 /* Test position against vblank region. */
231 vbl_start
= vbl
& 0x1fff;
232 vbl_end
= (vbl
>> 16) & 0x1fff;
234 if ((*vpos
< vbl_start
) || (*vpos
> vbl_end
))
237 /* Inside "upper part" of vblank area? Apply corrective offset: */
238 if (in_vbl
&& (*vpos
>= vbl_start
))
239 *vpos
= *vpos
- vtotal
;
241 /* Readouts valid? */
243 ret
|= DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_ACCURATE
;
247 ret
|= DRM_SCANOUTPOS_INVBL
;
252 static int i915_get_vblank_timestamp(struct drm_device
*dev
, int pipe
,
254 struct timeval
*vblank_time
,
257 struct drm_crtc
*crtc
;
259 if (pipe
< 0 || pipe
>= INTEL_INFO(dev
)->num_pipes
) {
260 DRM_ERROR("Invalid crtc %d\n", pipe
);
264 /* Get drm_crtc to timestamp: */
265 crtc
= intel_get_crtc_for_pipe(dev
, pipe
);
267 DRM_ERROR("Invalid crtc %d\n", pipe
);
271 if (!crtc
->enabled
) {
272 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe
);
276 /* Helper routine in DRM core does all the work: */
277 return drm_calc_vbltimestamp_from_scanoutpos(dev
, pipe
, max_error
,
283 * Handle hotplug events outside the interrupt handler proper.
285 static void i915_hotplug_work_func(struct work_struct
*work
)
287 drm_i915_private_t
*dev_priv
= container_of(work
, drm_i915_private_t
,
289 struct drm_device
*dev
= dev_priv
->dev
;
290 struct drm_mode_config
*mode_config
= &dev
->mode_config
;
291 struct intel_encoder
*encoder
;
293 /* HPD irq before everything is fully set up. */
294 if (!dev_priv
->enable_hotplug_processing
)
297 mutex_lock(&mode_config
->mutex
);
298 DRM_DEBUG_KMS("running encoder hotplug functions\n");
300 list_for_each_entry(encoder
, &mode_config
->encoder_list
, base
.head
)
301 if (encoder
->hot_plug
)
302 encoder
->hot_plug(encoder
);
304 mutex_unlock(&mode_config
->mutex
);
306 /* Just fire off a uevent and let userspace tell us what to do */
307 drm_helper_hpd_irq_event(dev
);
310 static void ironlake_handle_rps_change(struct drm_device
*dev
)
312 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
313 u32 busy_up
, busy_down
, max_avg
, min_avg
;
317 spin_lock_irqsave(&mchdev_lock
, flags
);
319 I915_WRITE16(MEMINTRSTS
, I915_READ(MEMINTRSTS
));
321 new_delay
= dev_priv
->ips
.cur_delay
;
323 I915_WRITE16(MEMINTRSTS
, MEMINT_EVAL_CHG
);
324 busy_up
= I915_READ(RCPREVBSYTUPAVG
);
325 busy_down
= I915_READ(RCPREVBSYTDNAVG
);
326 max_avg
= I915_READ(RCBMAXAVG
);
327 min_avg
= I915_READ(RCBMINAVG
);
329 /* Handle RCS change request from hw */
330 if (busy_up
> max_avg
) {
331 if (dev_priv
->ips
.cur_delay
!= dev_priv
->ips
.max_delay
)
332 new_delay
= dev_priv
->ips
.cur_delay
- 1;
333 if (new_delay
< dev_priv
->ips
.max_delay
)
334 new_delay
= dev_priv
->ips
.max_delay
;
335 } else if (busy_down
< min_avg
) {
336 if (dev_priv
->ips
.cur_delay
!= dev_priv
->ips
.min_delay
)
337 new_delay
= dev_priv
->ips
.cur_delay
+ 1;
338 if (new_delay
> dev_priv
->ips
.min_delay
)
339 new_delay
= dev_priv
->ips
.min_delay
;
342 if (ironlake_set_drps(dev
, new_delay
))
343 dev_priv
->ips
.cur_delay
= new_delay
;
345 spin_unlock_irqrestore(&mchdev_lock
, flags
);
350 static void notify_ring(struct drm_device
*dev
,
351 struct intel_ring_buffer
*ring
)
353 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
355 if (ring
->obj
== NULL
)
358 trace_i915_gem_request_complete(ring
, ring
->get_seqno(ring
, false));
360 wake_up_all(&ring
->irq_queue
);
361 if (i915_enable_hangcheck
) {
362 dev_priv
->gpu_error
.hangcheck_count
= 0;
363 mod_timer(&dev_priv
->gpu_error
.hangcheck_timer
,
364 round_jiffies_up(jiffies
+ DRM_I915_HANGCHECK_JIFFIES
));
368 static void gen6_pm_rps_work(struct work_struct
*work
)
370 drm_i915_private_t
*dev_priv
= container_of(work
, drm_i915_private_t
,
375 spin_lock_irq(&dev_priv
->rps
.lock
);
376 pm_iir
= dev_priv
->rps
.pm_iir
;
377 dev_priv
->rps
.pm_iir
= 0;
378 pm_imr
= I915_READ(GEN6_PMIMR
);
379 I915_WRITE(GEN6_PMIMR
, 0);
380 spin_unlock_irq(&dev_priv
->rps
.lock
);
382 if ((pm_iir
& GEN6_PM_DEFERRED_EVENTS
) == 0)
385 mutex_lock(&dev_priv
->rps
.hw_lock
);
387 if (pm_iir
& GEN6_PM_RP_UP_THRESHOLD
)
388 new_delay
= dev_priv
->rps
.cur_delay
+ 1;
390 new_delay
= dev_priv
->rps
.cur_delay
- 1;
392 /* sysfs frequency interfaces may have snuck in while servicing the
395 if (!(new_delay
> dev_priv
->rps
.max_delay
||
396 new_delay
< dev_priv
->rps
.min_delay
)) {
397 gen6_set_rps(dev_priv
->dev
, new_delay
);
400 mutex_unlock(&dev_priv
->rps
.hw_lock
);
405 * ivybridge_parity_work - Workqueue called when a parity error interrupt
407 * @work: workqueue struct
409 * Doesn't actually do anything except notify userspace. As a consequence of
410 * this event, userspace should try to remap the bad rows since statistically
411 * it is likely the same row is more likely to go bad again.
413 static void ivybridge_parity_work(struct work_struct
*work
)
415 drm_i915_private_t
*dev_priv
= container_of(work
, drm_i915_private_t
,
416 l3_parity
.error_work
);
417 u32 error_status
, row
, bank
, subbank
;
418 char *parity_event
[5];
422 /* We must turn off DOP level clock gating to access the L3 registers.
423 * In order to prevent a get/put style interface, acquire struct mutex
424 * any time we access those registers.
426 mutex_lock(&dev_priv
->dev
->struct_mutex
);
428 misccpctl
= I915_READ(GEN7_MISCCPCTL
);
429 I915_WRITE(GEN7_MISCCPCTL
, misccpctl
& ~GEN7_DOP_CLOCK_GATE_ENABLE
);
430 POSTING_READ(GEN7_MISCCPCTL
);
432 error_status
= I915_READ(GEN7_L3CDERRST1
);
433 row
= GEN7_PARITY_ERROR_ROW(error_status
);
434 bank
= GEN7_PARITY_ERROR_BANK(error_status
);
435 subbank
= GEN7_PARITY_ERROR_SUBBANK(error_status
);
437 I915_WRITE(GEN7_L3CDERRST1
, GEN7_PARITY_ERROR_VALID
|
438 GEN7_L3CDERRST1_ENABLE
);
439 POSTING_READ(GEN7_L3CDERRST1
);
441 I915_WRITE(GEN7_MISCCPCTL
, misccpctl
);
443 spin_lock_irqsave(&dev_priv
->irq_lock
, flags
);
444 dev_priv
->gt_irq_mask
&= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT
;
445 I915_WRITE(GTIMR
, dev_priv
->gt_irq_mask
);
446 spin_unlock_irqrestore(&dev_priv
->irq_lock
, flags
);
448 mutex_unlock(&dev_priv
->dev
->struct_mutex
);
450 parity_event
[0] = "L3_PARITY_ERROR=1";
451 parity_event
[1] = kasprintf(GFP_KERNEL
, "ROW=%d", row
);
452 parity_event
[2] = kasprintf(GFP_KERNEL
, "BANK=%d", bank
);
453 parity_event
[3] = kasprintf(GFP_KERNEL
, "SUBBANK=%d", subbank
);
454 parity_event
[4] = NULL
;
456 kobject_uevent_env(&dev_priv
->dev
->primary
->kdev
.kobj
,
457 KOBJ_CHANGE
, parity_event
);
459 DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
462 kfree(parity_event
[3]);
463 kfree(parity_event
[2]);
464 kfree(parity_event
[1]);
467 static void ivybridge_handle_parity_error(struct drm_device
*dev
)
469 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
472 if (!HAS_L3_GPU_CACHE(dev
))
475 spin_lock_irqsave(&dev_priv
->irq_lock
, flags
);
476 dev_priv
->gt_irq_mask
|= GT_GEN7_L3_PARITY_ERROR_INTERRUPT
;
477 I915_WRITE(GTIMR
, dev_priv
->gt_irq_mask
);
478 spin_unlock_irqrestore(&dev_priv
->irq_lock
, flags
);
480 queue_work(dev_priv
->wq
, &dev_priv
->l3_parity
.error_work
);
483 static void snb_gt_irq_handler(struct drm_device
*dev
,
484 struct drm_i915_private
*dev_priv
,
488 if (gt_iir
& (GEN6_RENDER_USER_INTERRUPT
|
489 GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT
))
490 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
491 if (gt_iir
& GEN6_BSD_USER_INTERRUPT
)
492 notify_ring(dev
, &dev_priv
->ring
[VCS
]);
493 if (gt_iir
& GEN6_BLITTER_USER_INTERRUPT
)
494 notify_ring(dev
, &dev_priv
->ring
[BCS
]);
496 if (gt_iir
& (GT_GEN6_BLT_CS_ERROR_INTERRUPT
|
497 GT_GEN6_BSD_CS_ERROR_INTERRUPT
|
498 GT_RENDER_CS_ERROR_INTERRUPT
)) {
499 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir
);
500 i915_handle_error(dev
, false);
503 if (gt_iir
& GT_GEN7_L3_PARITY_ERROR_INTERRUPT
)
504 ivybridge_handle_parity_error(dev
);
507 static void gen6_queue_rps_work(struct drm_i915_private
*dev_priv
,
513 * IIR bits should never already be set because IMR should
514 * prevent an interrupt from being shown in IIR. The warning
515 * displays a case where we've unsafely cleared
516 * dev_priv->rps.pm_iir. Although missing an interrupt of the same
517 * type is not a problem, it displays a problem in the logic.
519 * The mask bit in IMR is cleared by dev_priv->rps.work.
522 spin_lock_irqsave(&dev_priv
->rps
.lock
, flags
);
523 dev_priv
->rps
.pm_iir
|= pm_iir
;
524 I915_WRITE(GEN6_PMIMR
, dev_priv
->rps
.pm_iir
);
525 POSTING_READ(GEN6_PMIMR
);
526 spin_unlock_irqrestore(&dev_priv
->rps
.lock
, flags
);
528 queue_work(dev_priv
->wq
, &dev_priv
->rps
.work
);
531 static void gmbus_irq_handler(struct drm_device
*dev
)
533 struct drm_i915_private
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
535 wake_up_all(&dev_priv
->gmbus_wait_queue
);
538 static void dp_aux_irq_handler(struct drm_device
*dev
)
540 struct drm_i915_private
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
542 wake_up_all(&dev_priv
->gmbus_wait_queue
);
545 static irqreturn_t
valleyview_irq_handler(int irq
, void *arg
)
547 struct drm_device
*dev
= (struct drm_device
*) arg
;
548 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
549 u32 iir
, gt_iir
, pm_iir
;
550 irqreturn_t ret
= IRQ_NONE
;
551 unsigned long irqflags
;
553 u32 pipe_stats
[I915_MAX_PIPES
];
555 atomic_inc(&dev_priv
->irq_received
);
558 iir
= I915_READ(VLV_IIR
);
559 gt_iir
= I915_READ(GTIIR
);
560 pm_iir
= I915_READ(GEN6_PMIIR
);
562 if (gt_iir
== 0 && pm_iir
== 0 && iir
== 0)
567 snb_gt_irq_handler(dev
, dev_priv
, gt_iir
);
569 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
570 for_each_pipe(pipe
) {
571 int reg
= PIPESTAT(pipe
);
572 pipe_stats
[pipe
] = I915_READ(reg
);
575 * Clear the PIPE*STAT regs before the IIR
577 if (pipe_stats
[pipe
] & 0x8000ffff) {
578 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
)
579 DRM_DEBUG_DRIVER("pipe %c underrun\n",
581 I915_WRITE(reg
, pipe_stats
[pipe
]);
584 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
586 for_each_pipe(pipe
) {
587 if (pipe_stats
[pipe
] & PIPE_VBLANK_INTERRUPT_STATUS
)
588 drm_handle_vblank(dev
, pipe
);
590 if (pipe_stats
[pipe
] & PLANE_FLIPDONE_INT_STATUS_VLV
) {
591 intel_prepare_page_flip(dev
, pipe
);
592 intel_finish_page_flip(dev
, pipe
);
596 /* Consume port. Then clear IIR or we'll miss events */
597 if (iir
& I915_DISPLAY_PORT_INTERRUPT
) {
598 u32 hotplug_status
= I915_READ(PORT_HOTPLUG_STAT
);
600 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
602 if (hotplug_status
& dev_priv
->hotplug_supported_mask
)
603 queue_work(dev_priv
->wq
,
604 &dev_priv
->hotplug_work
);
606 I915_WRITE(PORT_HOTPLUG_STAT
, hotplug_status
);
607 I915_READ(PORT_HOTPLUG_STAT
);
610 if (pipe_stats
[0] & PIPE_GMBUS_INTERRUPT_STATUS
)
611 gmbus_irq_handler(dev
);
613 if (pm_iir
& GEN6_PM_DEFERRED_EVENTS
)
614 gen6_queue_rps_work(dev_priv
, pm_iir
);
616 I915_WRITE(GTIIR
, gt_iir
);
617 I915_WRITE(GEN6_PMIIR
, pm_iir
);
618 I915_WRITE(VLV_IIR
, iir
);
625 static void ibx_irq_handler(struct drm_device
*dev
, u32 pch_iir
)
627 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
630 if (pch_iir
& SDE_HOTPLUG_MASK
)
631 queue_work(dev_priv
->wq
, &dev_priv
->hotplug_work
);
633 if (pch_iir
& SDE_AUDIO_POWER_MASK
)
634 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
635 (pch_iir
& SDE_AUDIO_POWER_MASK
) >>
636 SDE_AUDIO_POWER_SHIFT
);
638 if (pch_iir
& SDE_AUX_MASK
)
639 dp_aux_irq_handler(dev
);
641 if (pch_iir
& SDE_GMBUS
)
642 gmbus_irq_handler(dev
);
644 if (pch_iir
& SDE_AUDIO_HDCP_MASK
)
645 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
647 if (pch_iir
& SDE_AUDIO_TRANS_MASK
)
648 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
650 if (pch_iir
& SDE_POISON
)
651 DRM_ERROR("PCH poison interrupt\n");
653 if (pch_iir
& SDE_FDI_MASK
)
655 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
657 I915_READ(FDI_RX_IIR(pipe
)));
659 if (pch_iir
& (SDE_TRANSB_CRC_DONE
| SDE_TRANSA_CRC_DONE
))
660 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
662 if (pch_iir
& (SDE_TRANSB_CRC_ERR
| SDE_TRANSA_CRC_ERR
))
663 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
665 if (pch_iir
& SDE_TRANSB_FIFO_UNDER
)
666 DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n");
667 if (pch_iir
& SDE_TRANSA_FIFO_UNDER
)
668 DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
671 static void cpt_irq_handler(struct drm_device
*dev
, u32 pch_iir
)
673 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
676 if (pch_iir
& SDE_HOTPLUG_MASK_CPT
)
677 queue_work(dev_priv
->wq
, &dev_priv
->hotplug_work
);
679 if (pch_iir
& SDE_AUDIO_POWER_MASK_CPT
)
680 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
681 (pch_iir
& SDE_AUDIO_POWER_MASK_CPT
) >>
682 SDE_AUDIO_POWER_SHIFT_CPT
);
684 if (pch_iir
& SDE_AUX_MASK_CPT
)
685 dp_aux_irq_handler(dev
);
687 if (pch_iir
& SDE_GMBUS_CPT
)
688 gmbus_irq_handler(dev
);
690 if (pch_iir
& SDE_AUDIO_CP_REQ_CPT
)
691 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
693 if (pch_iir
& SDE_AUDIO_CP_CHG_CPT
)
694 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
696 if (pch_iir
& SDE_FDI_MASK_CPT
)
698 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
700 I915_READ(FDI_RX_IIR(pipe
)));
703 static irqreturn_t
ivybridge_irq_handler(int irq
, void *arg
)
705 struct drm_device
*dev
= (struct drm_device
*) arg
;
706 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
707 u32 de_iir
, gt_iir
, de_ier
, pm_iir
, sde_ier
;
708 irqreturn_t ret
= IRQ_NONE
;
711 atomic_inc(&dev_priv
->irq_received
);
713 /* disable master interrupt before clearing iir */
714 de_ier
= I915_READ(DEIER
);
715 I915_WRITE(DEIER
, de_ier
& ~DE_MASTER_IRQ_CONTROL
);
717 /* Disable south interrupts. We'll only write to SDEIIR once, so further
718 * interrupts will will be stored on its back queue, and then we'll be
719 * able to process them after we restore SDEIER (as soon as we restore
720 * it, we'll get an interrupt if SDEIIR still has something to process
721 * due to its back queue). */
722 sde_ier
= I915_READ(SDEIER
);
723 I915_WRITE(SDEIER
, 0);
724 POSTING_READ(SDEIER
);
726 gt_iir
= I915_READ(GTIIR
);
728 snb_gt_irq_handler(dev
, dev_priv
, gt_iir
);
729 I915_WRITE(GTIIR
, gt_iir
);
733 de_iir
= I915_READ(DEIIR
);
735 if (de_iir
& DE_AUX_CHANNEL_A_IVB
)
736 dp_aux_irq_handler(dev
);
738 if (de_iir
& DE_GSE_IVB
)
739 intel_opregion_gse_intr(dev
);
741 for (i
= 0; i
< 3; i
++) {
742 if (de_iir
& (DE_PIPEA_VBLANK_IVB
<< (5 * i
)))
743 drm_handle_vblank(dev
, i
);
744 if (de_iir
& (DE_PLANEA_FLIP_DONE_IVB
<< (5 * i
))) {
745 intel_prepare_page_flip(dev
, i
);
746 intel_finish_page_flip_plane(dev
, i
);
750 /* check event from PCH */
751 if (de_iir
& DE_PCH_EVENT_IVB
) {
752 u32 pch_iir
= I915_READ(SDEIIR
);
754 cpt_irq_handler(dev
, pch_iir
);
756 /* clear PCH hotplug event before clear CPU irq */
757 I915_WRITE(SDEIIR
, pch_iir
);
760 I915_WRITE(DEIIR
, de_iir
);
764 pm_iir
= I915_READ(GEN6_PMIIR
);
766 if (pm_iir
& GEN6_PM_DEFERRED_EVENTS
)
767 gen6_queue_rps_work(dev_priv
, pm_iir
);
768 I915_WRITE(GEN6_PMIIR
, pm_iir
);
772 I915_WRITE(DEIER
, de_ier
);
774 I915_WRITE(SDEIER
, sde_ier
);
775 POSTING_READ(SDEIER
);
780 static void ilk_gt_irq_handler(struct drm_device
*dev
,
781 struct drm_i915_private
*dev_priv
,
784 if (gt_iir
& (GT_USER_INTERRUPT
| GT_PIPE_NOTIFY
))
785 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
786 if (gt_iir
& GT_BSD_USER_INTERRUPT
)
787 notify_ring(dev
, &dev_priv
->ring
[VCS
]);
790 static irqreturn_t
ironlake_irq_handler(int irq
, void *arg
)
792 struct drm_device
*dev
= (struct drm_device
*) arg
;
793 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
795 u32 de_iir
, gt_iir
, de_ier
, pm_iir
, sde_ier
;
797 atomic_inc(&dev_priv
->irq_received
);
799 /* disable master interrupt before clearing iir */
800 de_ier
= I915_READ(DEIER
);
801 I915_WRITE(DEIER
, de_ier
& ~DE_MASTER_IRQ_CONTROL
);
804 /* Disable south interrupts. We'll only write to SDEIIR once, so further
805 * interrupts will will be stored on its back queue, and then we'll be
806 * able to process them after we restore SDEIER (as soon as we restore
807 * it, we'll get an interrupt if SDEIIR still has something to process
808 * due to its back queue). */
809 sde_ier
= I915_READ(SDEIER
);
810 I915_WRITE(SDEIER
, 0);
811 POSTING_READ(SDEIER
);
813 de_iir
= I915_READ(DEIIR
);
814 gt_iir
= I915_READ(GTIIR
);
815 pm_iir
= I915_READ(GEN6_PMIIR
);
817 if (de_iir
== 0 && gt_iir
== 0 && (!IS_GEN6(dev
) || pm_iir
== 0))
823 ilk_gt_irq_handler(dev
, dev_priv
, gt_iir
);
825 snb_gt_irq_handler(dev
, dev_priv
, gt_iir
);
827 if (de_iir
& DE_AUX_CHANNEL_A
)
828 dp_aux_irq_handler(dev
);
831 intel_opregion_gse_intr(dev
);
833 if (de_iir
& DE_PIPEA_VBLANK
)
834 drm_handle_vblank(dev
, 0);
836 if (de_iir
& DE_PIPEB_VBLANK
)
837 drm_handle_vblank(dev
, 1);
839 if (de_iir
& DE_PLANEA_FLIP_DONE
) {
840 intel_prepare_page_flip(dev
, 0);
841 intel_finish_page_flip_plane(dev
, 0);
844 if (de_iir
& DE_PLANEB_FLIP_DONE
) {
845 intel_prepare_page_flip(dev
, 1);
846 intel_finish_page_flip_plane(dev
, 1);
849 /* check event from PCH */
850 if (de_iir
& DE_PCH_EVENT
) {
851 u32 pch_iir
= I915_READ(SDEIIR
);
853 if (HAS_PCH_CPT(dev
))
854 cpt_irq_handler(dev
, pch_iir
);
856 ibx_irq_handler(dev
, pch_iir
);
858 /* should clear PCH hotplug event before clear CPU irq */
859 I915_WRITE(SDEIIR
, pch_iir
);
862 if (IS_GEN5(dev
) && de_iir
& DE_PCU_EVENT
)
863 ironlake_handle_rps_change(dev
);
865 if (IS_GEN6(dev
) && pm_iir
& GEN6_PM_DEFERRED_EVENTS
)
866 gen6_queue_rps_work(dev_priv
, pm_iir
);
868 I915_WRITE(GTIIR
, gt_iir
);
869 I915_WRITE(DEIIR
, de_iir
);
870 I915_WRITE(GEN6_PMIIR
, pm_iir
);
873 I915_WRITE(DEIER
, de_ier
);
875 I915_WRITE(SDEIER
, sde_ier
);
876 POSTING_READ(SDEIER
);
882 * i915_error_work_func - do process context error handling work
885 * Fire an error uevent so userspace can see that a hang or error
888 static void i915_error_work_func(struct work_struct
*work
)
890 struct i915_gpu_error
*error
= container_of(work
, struct i915_gpu_error
,
892 drm_i915_private_t
*dev_priv
= container_of(error
, drm_i915_private_t
,
894 struct drm_device
*dev
= dev_priv
->dev
;
895 struct intel_ring_buffer
*ring
;
896 char *error_event
[] = { "ERROR=1", NULL
};
897 char *reset_event
[] = { "RESET=1", NULL
};
898 char *reset_done_event
[] = { "ERROR=0", NULL
};
901 kobject_uevent_env(&dev
->primary
->kdev
.kobj
, KOBJ_CHANGE
, error_event
);
904 * Note that there's only one work item which does gpu resets, so we
905 * need not worry about concurrent gpu resets potentially incrementing
906 * error->reset_counter twice. We only need to take care of another
907 * racing irq/hangcheck declaring the gpu dead for a second time. A
908 * quick check for that is good enough: schedule_work ensures the
909 * correct ordering between hang detection and this work item, and since
910 * the reset in-progress bit is only ever set by code outside of this
911 * work we don't need to worry about any other races.
913 if (i915_reset_in_progress(error
) && !i915_terminally_wedged(error
)) {
914 DRM_DEBUG_DRIVER("resetting chip\n");
915 kobject_uevent_env(&dev
->primary
->kdev
.kobj
, KOBJ_CHANGE
,
918 ret
= i915_reset(dev
);
922 * After all the gem state is reset, increment the reset
923 * counter and wake up everyone waiting for the reset to
926 * Since unlock operations are a one-sided barrier only,
927 * we need to insert a barrier here to order any seqno
929 * the counter increment.
931 smp_mb__before_atomic_inc();
932 atomic_inc(&dev_priv
->gpu_error
.reset_counter
);
934 kobject_uevent_env(&dev
->primary
->kdev
.kobj
,
935 KOBJ_CHANGE
, reset_done_event
);
937 atomic_set(&error
->reset_counter
, I915_WEDGED
);
940 for_each_ring(ring
, dev_priv
, i
)
941 wake_up_all(&ring
->irq_queue
);
943 intel_display_handle_reset(dev
);
945 wake_up_all(&dev_priv
->gpu_error
.reset_queue
);
949 /* NB: please notice the memset */
950 static void i915_get_extra_instdone(struct drm_device
*dev
,
953 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
954 memset(instdone
, 0, sizeof(*instdone
) * I915_NUM_INSTDONE_REG
);
956 switch(INTEL_INFO(dev
)->gen
) {
959 instdone
[0] = I915_READ(INSTDONE
);
964 instdone
[0] = I915_READ(INSTDONE_I965
);
965 instdone
[1] = I915_READ(INSTDONE1
);
968 WARN_ONCE(1, "Unsupported platform\n");
970 instdone
[0] = I915_READ(GEN7_INSTDONE_1
);
971 instdone
[1] = I915_READ(GEN7_SC_INSTDONE
);
972 instdone
[2] = I915_READ(GEN7_SAMPLER_INSTDONE
);
973 instdone
[3] = I915_READ(GEN7_ROW_INSTDONE
);
978 #ifdef CONFIG_DEBUG_FS
979 static struct drm_i915_error_object
*
980 i915_error_object_create_sized(struct drm_i915_private
*dev_priv
,
981 struct drm_i915_gem_object
*src
,
984 struct drm_i915_error_object
*dst
;
988 if (src
== NULL
|| src
->pages
== NULL
)
991 dst
= kmalloc(sizeof(*dst
) + num_pages
* sizeof(u32
*), GFP_ATOMIC
);
995 reloc_offset
= src
->gtt_offset
;
996 for (i
= 0; i
< num_pages
; i
++) {
1000 d
= kmalloc(PAGE_SIZE
, GFP_ATOMIC
);
1004 local_irq_save(flags
);
1005 if (reloc_offset
< dev_priv
->gtt
.mappable_end
&&
1006 src
->has_global_gtt_mapping
) {
1009 /* Simply ignore tiling or any overlapping fence.
1010 * It's part of the error state, and this hopefully
1011 * captures what the GPU read.
1014 s
= io_mapping_map_atomic_wc(dev_priv
->gtt
.mappable
,
1016 memcpy_fromio(d
, s
, PAGE_SIZE
);
1017 io_mapping_unmap_atomic(s
);
1018 } else if (src
->stolen
) {
1019 unsigned long offset
;
1021 offset
= dev_priv
->mm
.stolen_base
;
1022 offset
+= src
->stolen
->start
;
1023 offset
+= i
<< PAGE_SHIFT
;
1025 memcpy_fromio(d
, (void __iomem
*) offset
, PAGE_SIZE
);
1030 page
= i915_gem_object_get_page(src
, i
);
1032 drm_clflush_pages(&page
, 1);
1034 s
= kmap_atomic(page
);
1035 memcpy(d
, s
, PAGE_SIZE
);
1038 drm_clflush_pages(&page
, 1);
1040 local_irq_restore(flags
);
1044 reloc_offset
+= PAGE_SIZE
;
1046 dst
->page_count
= num_pages
;
1047 dst
->gtt_offset
= src
->gtt_offset
;
1053 kfree(dst
->pages
[i
]);
1057 #define i915_error_object_create(dev_priv, src) \
1058 i915_error_object_create_sized((dev_priv), (src), \
1059 (src)->base.size>>PAGE_SHIFT)
1062 i915_error_object_free(struct drm_i915_error_object
*obj
)
1069 for (page
= 0; page
< obj
->page_count
; page
++)
1070 kfree(obj
->pages
[page
]);
1076 i915_error_state_free(struct kref
*error_ref
)
1078 struct drm_i915_error_state
*error
= container_of(error_ref
,
1079 typeof(*error
), ref
);
1082 for (i
= 0; i
< ARRAY_SIZE(error
->ring
); i
++) {
1083 i915_error_object_free(error
->ring
[i
].batchbuffer
);
1084 i915_error_object_free(error
->ring
[i
].ringbuffer
);
1085 kfree(error
->ring
[i
].requests
);
1088 kfree(error
->active_bo
);
1089 kfree(error
->overlay
);
1092 static void capture_bo(struct drm_i915_error_buffer
*err
,
1093 struct drm_i915_gem_object
*obj
)
1095 err
->size
= obj
->base
.size
;
1096 err
->name
= obj
->base
.name
;
1097 err
->rseqno
= obj
->last_read_seqno
;
1098 err
->wseqno
= obj
->last_write_seqno
;
1099 err
->gtt_offset
= obj
->gtt_offset
;
1100 err
->read_domains
= obj
->base
.read_domains
;
1101 err
->write_domain
= obj
->base
.write_domain
;
1102 err
->fence_reg
= obj
->fence_reg
;
1104 if (obj
->pin_count
> 0)
1106 if (obj
->user_pin_count
> 0)
1108 err
->tiling
= obj
->tiling_mode
;
1109 err
->dirty
= obj
->dirty
;
1110 err
->purgeable
= obj
->madv
!= I915_MADV_WILLNEED
;
1111 err
->ring
= obj
->ring
? obj
->ring
->id
: -1;
1112 err
->cache_level
= obj
->cache_level
;
1115 static u32
capture_active_bo(struct drm_i915_error_buffer
*err
,
1116 int count
, struct list_head
*head
)
1118 struct drm_i915_gem_object
*obj
;
1121 list_for_each_entry(obj
, head
, mm_list
) {
1122 capture_bo(err
++, obj
);
1130 static u32
capture_pinned_bo(struct drm_i915_error_buffer
*err
,
1131 int count
, struct list_head
*head
)
1133 struct drm_i915_gem_object
*obj
;
1136 list_for_each_entry(obj
, head
, gtt_list
) {
1137 if (obj
->pin_count
== 0)
1140 capture_bo(err
++, obj
);
1148 static void i915_gem_record_fences(struct drm_device
*dev
,
1149 struct drm_i915_error_state
*error
)
1151 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1155 switch (INTEL_INFO(dev
)->gen
) {
1158 for (i
= 0; i
< 16; i
++)
1159 error
->fence
[i
] = I915_READ64(FENCE_REG_SANDYBRIDGE_0
+ (i
* 8));
1163 for (i
= 0; i
< 16; i
++)
1164 error
->fence
[i
] = I915_READ64(FENCE_REG_965_0
+ (i
* 8));
1167 if (IS_I945G(dev
) || IS_I945GM(dev
) || IS_G33(dev
))
1168 for (i
= 0; i
< 8; i
++)
1169 error
->fence
[i
+8] = I915_READ(FENCE_REG_945_8
+ (i
* 4));
1171 for (i
= 0; i
< 8; i
++)
1172 error
->fence
[i
] = I915_READ(FENCE_REG_830_0
+ (i
* 4));
1180 static struct drm_i915_error_object
*
1181 i915_error_first_batchbuffer(struct drm_i915_private
*dev_priv
,
1182 struct intel_ring_buffer
*ring
)
1184 struct drm_i915_gem_object
*obj
;
1187 if (!ring
->get_seqno
)
1190 if (HAS_BROKEN_CS_TLB(dev_priv
->dev
)) {
1191 u32 acthd
= I915_READ(ACTHD
);
1193 if (WARN_ON(ring
->id
!= RCS
))
1196 obj
= ring
->private;
1197 if (acthd
>= obj
->gtt_offset
&&
1198 acthd
< obj
->gtt_offset
+ obj
->base
.size
)
1199 return i915_error_object_create(dev_priv
, obj
);
1202 seqno
= ring
->get_seqno(ring
, false);
1203 list_for_each_entry(obj
, &dev_priv
->mm
.active_list
, mm_list
) {
1204 if (obj
->ring
!= ring
)
1207 if (i915_seqno_passed(seqno
, obj
->last_read_seqno
))
1210 if ((obj
->base
.read_domains
& I915_GEM_DOMAIN_COMMAND
) == 0)
1213 /* We need to copy these to an anonymous buffer as the simplest
1214 * method to avoid being overwritten by userspace.
1216 return i915_error_object_create(dev_priv
, obj
);
1222 static void i915_record_ring_state(struct drm_device
*dev
,
1223 struct drm_i915_error_state
*error
,
1224 struct intel_ring_buffer
*ring
)
1226 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1228 if (INTEL_INFO(dev
)->gen
>= 6) {
1229 error
->rc_psmi
[ring
->id
] = I915_READ(ring
->mmio_base
+ 0x50);
1230 error
->fault_reg
[ring
->id
] = I915_READ(RING_FAULT_REG(ring
));
1231 error
->semaphore_mboxes
[ring
->id
][0]
1232 = I915_READ(RING_SYNC_0(ring
->mmio_base
));
1233 error
->semaphore_mboxes
[ring
->id
][1]
1234 = I915_READ(RING_SYNC_1(ring
->mmio_base
));
1235 error
->semaphore_seqno
[ring
->id
][0] = ring
->sync_seqno
[0];
1236 error
->semaphore_seqno
[ring
->id
][1] = ring
->sync_seqno
[1];
1239 if (INTEL_INFO(dev
)->gen
>= 4) {
1240 error
->faddr
[ring
->id
] = I915_READ(RING_DMA_FADD(ring
->mmio_base
));
1241 error
->ipeir
[ring
->id
] = I915_READ(RING_IPEIR(ring
->mmio_base
));
1242 error
->ipehr
[ring
->id
] = I915_READ(RING_IPEHR(ring
->mmio_base
));
1243 error
->instdone
[ring
->id
] = I915_READ(RING_INSTDONE(ring
->mmio_base
));
1244 error
->instps
[ring
->id
] = I915_READ(RING_INSTPS(ring
->mmio_base
));
1245 if (ring
->id
== RCS
)
1246 error
->bbaddr
= I915_READ64(BB_ADDR
);
1248 error
->faddr
[ring
->id
] = I915_READ(DMA_FADD_I8XX
);
1249 error
->ipeir
[ring
->id
] = I915_READ(IPEIR
);
1250 error
->ipehr
[ring
->id
] = I915_READ(IPEHR
);
1251 error
->instdone
[ring
->id
] = I915_READ(INSTDONE
);
1254 error
->waiting
[ring
->id
] = waitqueue_active(&ring
->irq_queue
);
1255 error
->instpm
[ring
->id
] = I915_READ(RING_INSTPM(ring
->mmio_base
));
1256 error
->seqno
[ring
->id
] = ring
->get_seqno(ring
, false);
1257 error
->acthd
[ring
->id
] = intel_ring_get_active_head(ring
);
1258 error
->head
[ring
->id
] = I915_READ_HEAD(ring
);
1259 error
->tail
[ring
->id
] = I915_READ_TAIL(ring
);
1260 error
->ctl
[ring
->id
] = I915_READ_CTL(ring
);
1262 error
->cpu_ring_head
[ring
->id
] = ring
->head
;
1263 error
->cpu_ring_tail
[ring
->id
] = ring
->tail
;
1267 static void i915_gem_record_active_context(struct intel_ring_buffer
*ring
,
1268 struct drm_i915_error_state
*error
,
1269 struct drm_i915_error_ring
*ering
)
1271 struct drm_i915_private
*dev_priv
= ring
->dev
->dev_private
;
1272 struct drm_i915_gem_object
*obj
;
1274 /* Currently render ring is the only HW context user */
1275 if (ring
->id
!= RCS
|| !error
->ccid
)
1278 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, gtt_list
) {
1279 if ((error
->ccid
& PAGE_MASK
) == obj
->gtt_offset
) {
1280 ering
->ctx
= i915_error_object_create_sized(dev_priv
,
1286 static void i915_gem_record_rings(struct drm_device
*dev
,
1287 struct drm_i915_error_state
*error
)
1289 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1290 struct intel_ring_buffer
*ring
;
1291 struct drm_i915_gem_request
*request
;
1294 for_each_ring(ring
, dev_priv
, i
) {
1295 i915_record_ring_state(dev
, error
, ring
);
1297 error
->ring
[i
].batchbuffer
=
1298 i915_error_first_batchbuffer(dev_priv
, ring
);
1300 error
->ring
[i
].ringbuffer
=
1301 i915_error_object_create(dev_priv
, ring
->obj
);
1304 i915_gem_record_active_context(ring
, error
, &error
->ring
[i
]);
1307 list_for_each_entry(request
, &ring
->request_list
, list
)
1310 error
->ring
[i
].num_requests
= count
;
1311 error
->ring
[i
].requests
=
1312 kmalloc(count
*sizeof(struct drm_i915_error_request
),
1314 if (error
->ring
[i
].requests
== NULL
) {
1315 error
->ring
[i
].num_requests
= 0;
1320 list_for_each_entry(request
, &ring
->request_list
, list
) {
1321 struct drm_i915_error_request
*erq
;
1323 erq
= &error
->ring
[i
].requests
[count
++];
1324 erq
->seqno
= request
->seqno
;
1325 erq
->jiffies
= request
->emitted_jiffies
;
1326 erq
->tail
= request
->tail
;
1332 * i915_capture_error_state - capture an error record for later analysis
1335 * Should be called when an error is detected (either a hang or an error
1336 * interrupt) to capture error state from the time of the error. Fills
1337 * out a structure which becomes available in debugfs for user level tools
1340 static void i915_capture_error_state(struct drm_device
*dev
)
1342 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1343 struct drm_i915_gem_object
*obj
;
1344 struct drm_i915_error_state
*error
;
1345 unsigned long flags
;
1348 spin_lock_irqsave(&dev_priv
->gpu_error
.lock
, flags
);
1349 error
= dev_priv
->gpu_error
.first_error
;
1350 spin_unlock_irqrestore(&dev_priv
->gpu_error
.lock
, flags
);
1354 /* Account for pipe specific data like PIPE*STAT */
1355 error
= kzalloc(sizeof(*error
), GFP_ATOMIC
);
1357 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1361 DRM_INFO("capturing error event; look for more information in "
1362 "/sys/kernel/debug/dri/%d/i915_error_state\n",
1363 dev
->primary
->index
);
1365 kref_init(&error
->ref
);
1366 error
->eir
= I915_READ(EIR
);
1367 error
->pgtbl_er
= I915_READ(PGTBL_ER
);
1368 if (HAS_HW_CONTEXTS(dev
))
1369 error
->ccid
= I915_READ(CCID
);
1371 if (HAS_PCH_SPLIT(dev
))
1372 error
->ier
= I915_READ(DEIER
) | I915_READ(GTIER
);
1373 else if (IS_VALLEYVIEW(dev
))
1374 error
->ier
= I915_READ(GTIER
) | I915_READ(VLV_IER
);
1375 else if (IS_GEN2(dev
))
1376 error
->ier
= I915_READ16(IER
);
1378 error
->ier
= I915_READ(IER
);
1380 if (INTEL_INFO(dev
)->gen
>= 6)
1381 error
->derrmr
= I915_READ(DERRMR
);
1383 if (IS_VALLEYVIEW(dev
))
1384 error
->forcewake
= I915_READ(FORCEWAKE_VLV
);
1385 else if (INTEL_INFO(dev
)->gen
>= 7)
1386 error
->forcewake
= I915_READ(FORCEWAKE_MT
);
1387 else if (INTEL_INFO(dev
)->gen
== 6)
1388 error
->forcewake
= I915_READ(FORCEWAKE
);
1391 error
->pipestat
[pipe
] = I915_READ(PIPESTAT(pipe
));
1393 if (INTEL_INFO(dev
)->gen
>= 6) {
1394 error
->error
= I915_READ(ERROR_GEN6
);
1395 error
->done_reg
= I915_READ(DONE_REG
);
1398 if (INTEL_INFO(dev
)->gen
== 7)
1399 error
->err_int
= I915_READ(GEN7_ERR_INT
);
1401 i915_get_extra_instdone(dev
, error
->extra_instdone
);
1403 i915_gem_record_fences(dev
, error
);
1404 i915_gem_record_rings(dev
, error
);
1406 /* Record buffers on the active and pinned lists. */
1407 error
->active_bo
= NULL
;
1408 error
->pinned_bo
= NULL
;
1411 list_for_each_entry(obj
, &dev_priv
->mm
.active_list
, mm_list
)
1413 error
->active_bo_count
= i
;
1414 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, gtt_list
)
1417 error
->pinned_bo_count
= i
- error
->active_bo_count
;
1419 error
->active_bo
= NULL
;
1420 error
->pinned_bo
= NULL
;
1422 error
->active_bo
= kmalloc(sizeof(*error
->active_bo
)*i
,
1424 if (error
->active_bo
)
1426 error
->active_bo
+ error
->active_bo_count
;
1429 if (error
->active_bo
)
1430 error
->active_bo_count
=
1431 capture_active_bo(error
->active_bo
,
1432 error
->active_bo_count
,
1433 &dev_priv
->mm
.active_list
);
1435 if (error
->pinned_bo
)
1436 error
->pinned_bo_count
=
1437 capture_pinned_bo(error
->pinned_bo
,
1438 error
->pinned_bo_count
,
1439 &dev_priv
->mm
.bound_list
);
1441 do_gettimeofday(&error
->time
);
1443 error
->overlay
= intel_overlay_capture_error_state(dev
);
1444 error
->display
= intel_display_capture_error_state(dev
);
1446 spin_lock_irqsave(&dev_priv
->gpu_error
.lock
, flags
);
1447 if (dev_priv
->gpu_error
.first_error
== NULL
) {
1448 dev_priv
->gpu_error
.first_error
= error
;
1451 spin_unlock_irqrestore(&dev_priv
->gpu_error
.lock
, flags
);
1454 i915_error_state_free(&error
->ref
);
1457 void i915_destroy_error_state(struct drm_device
*dev
)
1459 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1460 struct drm_i915_error_state
*error
;
1461 unsigned long flags
;
1463 spin_lock_irqsave(&dev_priv
->gpu_error
.lock
, flags
);
1464 error
= dev_priv
->gpu_error
.first_error
;
1465 dev_priv
->gpu_error
.first_error
= NULL
;
1466 spin_unlock_irqrestore(&dev_priv
->gpu_error
.lock
, flags
);
1469 kref_put(&error
->ref
, i915_error_state_free
);
1472 #define i915_capture_error_state(x)
1475 static void i915_report_and_clear_eir(struct drm_device
*dev
)
1477 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1478 uint32_t instdone
[I915_NUM_INSTDONE_REG
];
1479 u32 eir
= I915_READ(EIR
);
1485 pr_err("render error detected, EIR: 0x%08x\n", eir
);
1487 i915_get_extra_instdone(dev
, instdone
);
1490 if (eir
& (GM45_ERROR_MEM_PRIV
| GM45_ERROR_CP_PRIV
)) {
1491 u32 ipeir
= I915_READ(IPEIR_I965
);
1493 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965
));
1494 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965
));
1495 for (i
= 0; i
< ARRAY_SIZE(instdone
); i
++)
1496 pr_err(" INSTDONE_%d: 0x%08x\n", i
, instdone
[i
]);
1497 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS
));
1498 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965
));
1499 I915_WRITE(IPEIR_I965
, ipeir
);
1500 POSTING_READ(IPEIR_I965
);
1502 if (eir
& GM45_ERROR_PAGE_TABLE
) {
1503 u32 pgtbl_err
= I915_READ(PGTBL_ER
);
1504 pr_err("page table error\n");
1505 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err
);
1506 I915_WRITE(PGTBL_ER
, pgtbl_err
);
1507 POSTING_READ(PGTBL_ER
);
1511 if (!IS_GEN2(dev
)) {
1512 if (eir
& I915_ERROR_PAGE_TABLE
) {
1513 u32 pgtbl_err
= I915_READ(PGTBL_ER
);
1514 pr_err("page table error\n");
1515 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err
);
1516 I915_WRITE(PGTBL_ER
, pgtbl_err
);
1517 POSTING_READ(PGTBL_ER
);
1521 if (eir
& I915_ERROR_MEMORY_REFRESH
) {
1522 pr_err("memory refresh error:\n");
1524 pr_err("pipe %c stat: 0x%08x\n",
1525 pipe_name(pipe
), I915_READ(PIPESTAT(pipe
)));
1526 /* pipestat has already been acked */
1528 if (eir
& I915_ERROR_INSTRUCTION
) {
1529 pr_err("instruction error\n");
1530 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM
));
1531 for (i
= 0; i
< ARRAY_SIZE(instdone
); i
++)
1532 pr_err(" INSTDONE_%d: 0x%08x\n", i
, instdone
[i
]);
1533 if (INTEL_INFO(dev
)->gen
< 4) {
1534 u32 ipeir
= I915_READ(IPEIR
);
1536 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR
));
1537 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR
));
1538 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD
));
1539 I915_WRITE(IPEIR
, ipeir
);
1540 POSTING_READ(IPEIR
);
1542 u32 ipeir
= I915_READ(IPEIR_I965
);
1544 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965
));
1545 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965
));
1546 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS
));
1547 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965
));
1548 I915_WRITE(IPEIR_I965
, ipeir
);
1549 POSTING_READ(IPEIR_I965
);
1553 I915_WRITE(EIR
, eir
);
1555 eir
= I915_READ(EIR
);
1558 * some errors might have become stuck,
1561 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir
);
1562 I915_WRITE(EMR
, I915_READ(EMR
) | eir
);
1563 I915_WRITE(IIR
, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
);
1568 * i915_handle_error - handle an error interrupt
1571 * Do some basic checking of regsiter state at error interrupt time and
1572 * dump it to the syslog. Also call i915_capture_error_state() to make
1573 * sure we get a record and make it available in debugfs. Fire a uevent
1574 * so userspace knows something bad happened (should trigger collection
1575 * of a ring dump etc.).
1577 void i915_handle_error(struct drm_device
*dev
, bool wedged
)
1579 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1580 struct intel_ring_buffer
*ring
;
1583 i915_capture_error_state(dev
);
1584 i915_report_and_clear_eir(dev
);
1587 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG
,
1588 &dev_priv
->gpu_error
.reset_counter
);
1591 * Wakeup waiting processes so that the reset work item
1592 * doesn't deadlock trying to grab various locks.
1594 for_each_ring(ring
, dev_priv
, i
)
1595 wake_up_all(&ring
->irq_queue
);
1598 queue_work(dev_priv
->wq
, &dev_priv
->gpu_error
.work
);
1601 static void __always_unused
i915_pageflip_stall_check(struct drm_device
*dev
, int pipe
)
1603 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1604 struct drm_crtc
*crtc
= dev_priv
->pipe_to_crtc_mapping
[pipe
];
1605 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
1606 struct drm_i915_gem_object
*obj
;
1607 struct intel_unpin_work
*work
;
1608 unsigned long flags
;
1609 bool stall_detected
;
1611 /* Ignore early vblank irqs */
1612 if (intel_crtc
== NULL
)
1615 spin_lock_irqsave(&dev
->event_lock
, flags
);
1616 work
= intel_crtc
->unpin_work
;
1619 atomic_read(&work
->pending
) >= INTEL_FLIP_COMPLETE
||
1620 !work
->enable_stall_check
) {
1621 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
1622 spin_unlock_irqrestore(&dev
->event_lock
, flags
);
1626 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
1627 obj
= work
->pending_flip_obj
;
1628 if (INTEL_INFO(dev
)->gen
>= 4) {
1629 int dspsurf
= DSPSURF(intel_crtc
->plane
);
1630 stall_detected
= I915_HI_DISPBASE(I915_READ(dspsurf
)) ==
1633 int dspaddr
= DSPADDR(intel_crtc
->plane
);
1634 stall_detected
= I915_READ(dspaddr
) == (obj
->gtt_offset
+
1635 crtc
->y
* crtc
->fb
->pitches
[0] +
1636 crtc
->x
* crtc
->fb
->bits_per_pixel
/8);
1639 spin_unlock_irqrestore(&dev
->event_lock
, flags
);
1641 if (stall_detected
) {
1642 DRM_DEBUG_DRIVER("Pageflip stall detected\n");
1643 intel_prepare_page_flip(dev
, intel_crtc
->plane
);
1647 /* Called from drm generic code, passed 'crtc' which
1648 * we use as a pipe index
1650 static int i915_enable_vblank(struct drm_device
*dev
, int pipe
)
1652 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1653 unsigned long irqflags
;
1655 if (!i915_pipe_enabled(dev
, pipe
))
1658 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
1659 if (INTEL_INFO(dev
)->gen
>= 4)
1660 i915_enable_pipestat(dev_priv
, pipe
,
1661 PIPE_START_VBLANK_INTERRUPT_ENABLE
);
1663 i915_enable_pipestat(dev_priv
, pipe
,
1664 PIPE_VBLANK_INTERRUPT_ENABLE
);
1666 /* maintain vblank delivery even in deep C-states */
1667 if (dev_priv
->info
->gen
== 3)
1668 I915_WRITE(INSTPM
, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS
));
1669 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
1674 static int ironlake_enable_vblank(struct drm_device
*dev
, int pipe
)
1676 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1677 unsigned long irqflags
;
1679 if (!i915_pipe_enabled(dev
, pipe
))
1682 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
1683 ironlake_enable_display_irq(dev_priv
, (pipe
== 0) ?
1684 DE_PIPEA_VBLANK
: DE_PIPEB_VBLANK
);
1685 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
1690 static int ivybridge_enable_vblank(struct drm_device
*dev
, int pipe
)
1692 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1693 unsigned long irqflags
;
1695 if (!i915_pipe_enabled(dev
, pipe
))
1698 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
1699 ironlake_enable_display_irq(dev_priv
,
1700 DE_PIPEA_VBLANK_IVB
<< (5 * pipe
));
1701 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
1706 static int valleyview_enable_vblank(struct drm_device
*dev
, int pipe
)
1708 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1709 unsigned long irqflags
;
1712 if (!i915_pipe_enabled(dev
, pipe
))
1715 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
1716 imr
= I915_READ(VLV_IMR
);
1718 imr
&= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT
;
1720 imr
&= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT
;
1721 I915_WRITE(VLV_IMR
, imr
);
1722 i915_enable_pipestat(dev_priv
, pipe
,
1723 PIPE_START_VBLANK_INTERRUPT_ENABLE
);
1724 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
1729 /* Called from drm generic code, passed 'crtc' which
1730 * we use as a pipe index
1732 static void i915_disable_vblank(struct drm_device
*dev
, int pipe
)
1734 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1735 unsigned long irqflags
;
1737 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
1738 if (dev_priv
->info
->gen
== 3)
1739 I915_WRITE(INSTPM
, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS
));
1741 i915_disable_pipestat(dev_priv
, pipe
,
1742 PIPE_VBLANK_INTERRUPT_ENABLE
|
1743 PIPE_START_VBLANK_INTERRUPT_ENABLE
);
1744 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
1747 static void ironlake_disable_vblank(struct drm_device
*dev
, int pipe
)
1749 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1750 unsigned long irqflags
;
1752 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
1753 ironlake_disable_display_irq(dev_priv
, (pipe
== 0) ?
1754 DE_PIPEA_VBLANK
: DE_PIPEB_VBLANK
);
1755 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
1758 static void ivybridge_disable_vblank(struct drm_device
*dev
, int pipe
)
1760 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1761 unsigned long irqflags
;
1763 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
1764 ironlake_disable_display_irq(dev_priv
,
1765 DE_PIPEA_VBLANK_IVB
<< (pipe
* 5));
1766 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
1769 static void valleyview_disable_vblank(struct drm_device
*dev
, int pipe
)
1771 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1772 unsigned long irqflags
;
1775 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
1776 i915_disable_pipestat(dev_priv
, pipe
,
1777 PIPE_START_VBLANK_INTERRUPT_ENABLE
);
1778 imr
= I915_READ(VLV_IMR
);
1780 imr
|= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT
;
1782 imr
|= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT
;
1783 I915_WRITE(VLV_IMR
, imr
);
1784 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
1788 ring_last_seqno(struct intel_ring_buffer
*ring
)
1790 return list_entry(ring
->request_list
.prev
,
1791 struct drm_i915_gem_request
, list
)->seqno
;
1794 static bool i915_hangcheck_ring_idle(struct intel_ring_buffer
*ring
, bool *err
)
1796 if (list_empty(&ring
->request_list
) ||
1797 i915_seqno_passed(ring
->get_seqno(ring
, false),
1798 ring_last_seqno(ring
))) {
1799 /* Issue a wake-up to catch stuck h/w. */
1800 if (waitqueue_active(&ring
->irq_queue
)) {
1801 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
1803 wake_up_all(&ring
->irq_queue
);
1811 static bool semaphore_passed(struct intel_ring_buffer
*ring
)
1813 struct drm_i915_private
*dev_priv
= ring
->dev
->dev_private
;
1814 u32 acthd
= intel_ring_get_active_head(ring
) & HEAD_ADDR
;
1815 struct intel_ring_buffer
*signaller
;
1816 u32 cmd
, ipehr
, acthd_min
;
1818 ipehr
= I915_READ(RING_IPEHR(ring
->mmio_base
));
1819 if ((ipehr
& ~(0x3 << 16)) !=
1820 (MI_SEMAPHORE_MBOX
| MI_SEMAPHORE_COMPARE
| MI_SEMAPHORE_REGISTER
))
1823 /* ACTHD is likely pointing to the dword after the actual command,
1824 * so scan backwards until we find the MBOX.
1826 acthd_min
= max((int)acthd
- 3 * 4, 0);
1828 cmd
= ioread32(ring
->virtual_start
+ acthd
);
1833 if (acthd
< acthd_min
)
1837 signaller
= &dev_priv
->ring
[(ring
->id
+ (((ipehr
>> 17) & 1) + 1)) % 3];
1838 return i915_seqno_passed(signaller
->get_seqno(signaller
, false),
1839 ioread32(ring
->virtual_start
+acthd
+4)+1);
1842 static bool kick_ring(struct intel_ring_buffer
*ring
)
1844 struct drm_device
*dev
= ring
->dev
;
1845 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1846 u32 tmp
= I915_READ_CTL(ring
);
1847 if (tmp
& RING_WAIT
) {
1848 DRM_ERROR("Kicking stuck wait on %s\n",
1850 I915_WRITE_CTL(ring
, tmp
);
1854 if (INTEL_INFO(dev
)->gen
>= 6 &&
1855 tmp
& RING_WAIT_SEMAPHORE
&&
1856 semaphore_passed(ring
)) {
1857 DRM_ERROR("Kicking stuck semaphore on %s\n",
1859 I915_WRITE_CTL(ring
, tmp
);
1865 static bool i915_hangcheck_hung(struct drm_device
*dev
)
1867 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1869 if (dev_priv
->gpu_error
.hangcheck_count
++ > 1) {
1872 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
1873 i915_handle_error(dev
, true);
1875 if (!IS_GEN2(dev
)) {
1876 struct intel_ring_buffer
*ring
;
1879 /* Is the chip hanging on a WAIT_FOR_EVENT?
1880 * If so we can simply poke the RB_WAIT bit
1881 * and break the hang. This should work on
1882 * all but the second generation chipsets.
1884 for_each_ring(ring
, dev_priv
, i
)
1885 hung
&= !kick_ring(ring
);
1895 * This is called when the chip hasn't reported back with completed
1896 * batchbuffers in a long time. The first time this is called we simply record
1897 * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
1898 * again, we assume the chip is wedged and try to fix it.
1900 void i915_hangcheck_elapsed(unsigned long data
)
1902 struct drm_device
*dev
= (struct drm_device
*)data
;
1903 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1904 uint32_t acthd
[I915_NUM_RINGS
], instdone
[I915_NUM_INSTDONE_REG
];
1905 struct intel_ring_buffer
*ring
;
1906 bool err
= false, idle
;
1909 if (!i915_enable_hangcheck
)
1912 memset(acthd
, 0, sizeof(acthd
));
1914 for_each_ring(ring
, dev_priv
, i
) {
1915 idle
&= i915_hangcheck_ring_idle(ring
, &err
);
1916 acthd
[i
] = intel_ring_get_active_head(ring
);
1919 /* If all work is done then ACTHD clearly hasn't advanced. */
1922 if (i915_hangcheck_hung(dev
))
1928 dev_priv
->gpu_error
.hangcheck_count
= 0;
1932 i915_get_extra_instdone(dev
, instdone
);
1933 if (memcmp(dev_priv
->gpu_error
.last_acthd
, acthd
,
1934 sizeof(acthd
)) == 0 &&
1935 memcmp(dev_priv
->gpu_error
.prev_instdone
, instdone
,
1936 sizeof(instdone
)) == 0) {
1937 if (i915_hangcheck_hung(dev
))
1940 dev_priv
->gpu_error
.hangcheck_count
= 0;
1942 memcpy(dev_priv
->gpu_error
.last_acthd
, acthd
,
1944 memcpy(dev_priv
->gpu_error
.prev_instdone
, instdone
,
1949 /* Reset timer case chip hangs without another request being added */
1950 mod_timer(&dev_priv
->gpu_error
.hangcheck_timer
,
1951 round_jiffies_up(jiffies
+ DRM_I915_HANGCHECK_JIFFIES
));
1956 static void ironlake_irq_preinstall(struct drm_device
*dev
)
1958 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1960 atomic_set(&dev_priv
->irq_received
, 0);
1962 I915_WRITE(HWSTAM
, 0xeffe);
1964 /* XXX hotplug from PCH */
1966 I915_WRITE(DEIMR
, 0xffffffff);
1967 I915_WRITE(DEIER
, 0x0);
1968 POSTING_READ(DEIER
);
1971 I915_WRITE(GTIMR
, 0xffffffff);
1972 I915_WRITE(GTIER
, 0x0);
1973 POSTING_READ(GTIER
);
1975 /* south display irq */
1976 I915_WRITE(SDEIMR
, 0xffffffff);
1977 I915_WRITE(SDEIER
, 0x0);
1978 POSTING_READ(SDEIER
);
1981 static void valleyview_irq_preinstall(struct drm_device
*dev
)
1983 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1986 atomic_set(&dev_priv
->irq_received
, 0);
1989 I915_WRITE(VLV_IMR
, 0);
1990 I915_WRITE(RING_IMR(RENDER_RING_BASE
), 0);
1991 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE
), 0);
1992 I915_WRITE(RING_IMR(BLT_RING_BASE
), 0);
1995 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
1996 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
1997 I915_WRITE(GTIMR
, 0xffffffff);
1998 I915_WRITE(GTIER
, 0x0);
1999 POSTING_READ(GTIER
);
2001 I915_WRITE(DPINVGTT
, 0xff);
2003 I915_WRITE(PORT_HOTPLUG_EN
, 0);
2004 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
2006 I915_WRITE(PIPESTAT(pipe
), 0xffff);
2007 I915_WRITE(VLV_IIR
, 0xffffffff);
2008 I915_WRITE(VLV_IMR
, 0xffffffff);
2009 I915_WRITE(VLV_IER
, 0x0);
2010 POSTING_READ(VLV_IER
);
2014 * Enable digital hotplug on the PCH, and configure the DP short pulse
2015 * duration to 2ms (which is the minimum in the Display Port spec)
2017 * This register is the same on all known PCH chips.
2020 static void ibx_enable_hotplug(struct drm_device
*dev
)
2022 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2025 hotplug
= I915_READ(PCH_PORT_HOTPLUG
);
2026 hotplug
&= ~(PORTD_PULSE_DURATION_MASK
|PORTC_PULSE_DURATION_MASK
|PORTB_PULSE_DURATION_MASK
);
2027 hotplug
|= PORTD_HOTPLUG_ENABLE
| PORTD_PULSE_DURATION_2ms
;
2028 hotplug
|= PORTC_HOTPLUG_ENABLE
| PORTC_PULSE_DURATION_2ms
;
2029 hotplug
|= PORTB_HOTPLUG_ENABLE
| PORTB_PULSE_DURATION_2ms
;
2030 I915_WRITE(PCH_PORT_HOTPLUG
, hotplug
);
2033 static void ibx_irq_postinstall(struct drm_device
*dev
)
2035 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2038 if (HAS_PCH_IBX(dev
))
2039 mask
= SDE_HOTPLUG_MASK
|
2043 mask
= SDE_HOTPLUG_MASK_CPT
|
2047 I915_WRITE(SDEIIR
, I915_READ(SDEIIR
));
2048 I915_WRITE(SDEIMR
, ~mask
);
2049 I915_WRITE(SDEIER
, mask
);
2050 POSTING_READ(SDEIER
);
2052 ibx_enable_hotplug(dev
);
2055 static int ironlake_irq_postinstall(struct drm_device
*dev
)
2057 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2058 /* enable kind of interrupts always enabled */
2059 u32 display_mask
= DE_MASTER_IRQ_CONTROL
| DE_GSE
| DE_PCH_EVENT
|
2060 DE_PLANEA_FLIP_DONE
| DE_PLANEB_FLIP_DONE
|
2064 dev_priv
->irq_mask
= ~display_mask
;
2066 /* should always can generate irq */
2067 I915_WRITE(DEIIR
, I915_READ(DEIIR
));
2068 I915_WRITE(DEIMR
, dev_priv
->irq_mask
);
2069 I915_WRITE(DEIER
, display_mask
| DE_PIPEA_VBLANK
| DE_PIPEB_VBLANK
);
2070 POSTING_READ(DEIER
);
2072 dev_priv
->gt_irq_mask
= ~0;
2074 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
2075 I915_WRITE(GTIMR
, dev_priv
->gt_irq_mask
);
2080 GEN6_BSD_USER_INTERRUPT
|
2081 GEN6_BLITTER_USER_INTERRUPT
;
2086 GT_BSD_USER_INTERRUPT
;
2087 I915_WRITE(GTIER
, render_irqs
);
2088 POSTING_READ(GTIER
);
2090 ibx_irq_postinstall(dev
);
2092 if (IS_IRONLAKE_M(dev
)) {
2093 /* Clear & enable PCU event interrupts */
2094 I915_WRITE(DEIIR
, DE_PCU_EVENT
);
2095 I915_WRITE(DEIER
, I915_READ(DEIER
) | DE_PCU_EVENT
);
2096 ironlake_enable_display_irq(dev_priv
, DE_PCU_EVENT
);
2102 static int ivybridge_irq_postinstall(struct drm_device
*dev
)
2104 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2105 /* enable kind of interrupts always enabled */
2107 DE_MASTER_IRQ_CONTROL
| DE_GSE_IVB
| DE_PCH_EVENT_IVB
|
2108 DE_PLANEC_FLIP_DONE_IVB
|
2109 DE_PLANEB_FLIP_DONE_IVB
|
2110 DE_PLANEA_FLIP_DONE_IVB
|
2111 DE_AUX_CHANNEL_A_IVB
;
2114 dev_priv
->irq_mask
= ~display_mask
;
2116 /* should always can generate irq */
2117 I915_WRITE(DEIIR
, I915_READ(DEIIR
));
2118 I915_WRITE(DEIMR
, dev_priv
->irq_mask
);
2121 DE_PIPEC_VBLANK_IVB
|
2122 DE_PIPEB_VBLANK_IVB
|
2123 DE_PIPEA_VBLANK_IVB
);
2124 POSTING_READ(DEIER
);
2126 dev_priv
->gt_irq_mask
= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT
;
2128 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
2129 I915_WRITE(GTIMR
, dev_priv
->gt_irq_mask
);
2131 render_irqs
= GT_USER_INTERRUPT
| GEN6_BSD_USER_INTERRUPT
|
2132 GEN6_BLITTER_USER_INTERRUPT
| GT_GEN7_L3_PARITY_ERROR_INTERRUPT
;
2133 I915_WRITE(GTIER
, render_irqs
);
2134 POSTING_READ(GTIER
);
2136 ibx_irq_postinstall(dev
);
2141 static int valleyview_irq_postinstall(struct drm_device
*dev
)
2143 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2145 u32 pipestat_enable
= PLANE_FLIP_DONE_INT_EN_VLV
;
2149 enable_mask
= I915_DISPLAY_PORT_INTERRUPT
;
2150 enable_mask
|= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
2151 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT
|
2152 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
2153 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT
;
2156 *Leave vblank interrupts masked initially. enable/disable will
2157 * toggle them based on usage.
2159 dev_priv
->irq_mask
= (~enable_mask
) |
2160 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT
|
2161 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT
;
2163 /* Hack for broken MSIs on VLV */
2164 pci_write_config_dword(dev_priv
->dev
->pdev
, 0x94, 0xfee00000);
2165 pci_read_config_word(dev
->pdev
, 0x98, &msid
);
2166 msid
&= 0xff; /* mask out delivery bits */
2168 pci_write_config_word(dev_priv
->dev
->pdev
, 0x98, msid
);
2170 I915_WRITE(PORT_HOTPLUG_EN
, 0);
2171 POSTING_READ(PORT_HOTPLUG_EN
);
2173 I915_WRITE(VLV_IMR
, dev_priv
->irq_mask
);
2174 I915_WRITE(VLV_IER
, enable_mask
);
2175 I915_WRITE(VLV_IIR
, 0xffffffff);
2176 I915_WRITE(PIPESTAT(0), 0xffff);
2177 I915_WRITE(PIPESTAT(1), 0xffff);
2178 POSTING_READ(VLV_IER
);
2180 i915_enable_pipestat(dev_priv
, 0, pipestat_enable
);
2181 i915_enable_pipestat(dev_priv
, 0, PIPE_GMBUS_EVENT_ENABLE
);
2182 i915_enable_pipestat(dev_priv
, 1, pipestat_enable
);
2184 I915_WRITE(VLV_IIR
, 0xffffffff);
2185 I915_WRITE(VLV_IIR
, 0xffffffff);
2187 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
2188 I915_WRITE(GTIMR
, dev_priv
->gt_irq_mask
);
2190 render_irqs
= GT_USER_INTERRUPT
| GEN6_BSD_USER_INTERRUPT
|
2191 GEN6_BLITTER_USER_INTERRUPT
;
2192 I915_WRITE(GTIER
, render_irqs
);
2193 POSTING_READ(GTIER
);
2195 /* ack & enable invalid PTE error interrupts */
2196 #if 0 /* FIXME: add support to irq handler for checking these bits */
2197 I915_WRITE(DPINVGTT
, DPINVGTT_STATUS_MASK
);
2198 I915_WRITE(DPINVGTT
, DPINVGTT_EN_MASK
);
2201 I915_WRITE(VLV_MASTER_IER
, MASTER_INTERRUPT_ENABLE
);
2206 static void valleyview_hpd_irq_setup(struct drm_device
*dev
)
2208 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2209 u32 hotplug_en
= I915_READ(PORT_HOTPLUG_EN
);
2211 /* Note HDMI and DP share bits */
2212 if (dev_priv
->hotplug_supported_mask
& PORTB_HOTPLUG_INT_STATUS
)
2213 hotplug_en
|= PORTB_HOTPLUG_INT_EN
;
2214 if (dev_priv
->hotplug_supported_mask
& PORTC_HOTPLUG_INT_STATUS
)
2215 hotplug_en
|= PORTC_HOTPLUG_INT_EN
;
2216 if (dev_priv
->hotplug_supported_mask
& PORTD_HOTPLUG_INT_STATUS
)
2217 hotplug_en
|= PORTD_HOTPLUG_INT_EN
;
2218 if (dev_priv
->hotplug_supported_mask
& SDVOC_HOTPLUG_INT_STATUS_I915
)
2219 hotplug_en
|= SDVOC_HOTPLUG_INT_EN
;
2220 if (dev_priv
->hotplug_supported_mask
& SDVOB_HOTPLUG_INT_STATUS_I915
)
2221 hotplug_en
|= SDVOB_HOTPLUG_INT_EN
;
2222 if (dev_priv
->hotplug_supported_mask
& CRT_HOTPLUG_INT_STATUS
) {
2223 hotplug_en
|= CRT_HOTPLUG_INT_EN
;
2224 hotplug_en
|= CRT_HOTPLUG_VOLTAGE_COMPARE_50
;
2227 I915_WRITE(PORT_HOTPLUG_EN
, hotplug_en
);
2230 static void valleyview_irq_uninstall(struct drm_device
*dev
)
2232 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2239 I915_WRITE(PIPESTAT(pipe
), 0xffff);
2241 I915_WRITE(HWSTAM
, 0xffffffff);
2242 I915_WRITE(PORT_HOTPLUG_EN
, 0);
2243 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
2245 I915_WRITE(PIPESTAT(pipe
), 0xffff);
2246 I915_WRITE(VLV_IIR
, 0xffffffff);
2247 I915_WRITE(VLV_IMR
, 0xffffffff);
2248 I915_WRITE(VLV_IER
, 0x0);
2249 POSTING_READ(VLV_IER
);
2252 static void ironlake_irq_uninstall(struct drm_device
*dev
)
2254 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2259 I915_WRITE(HWSTAM
, 0xffffffff);
2261 I915_WRITE(DEIMR
, 0xffffffff);
2262 I915_WRITE(DEIER
, 0x0);
2263 I915_WRITE(DEIIR
, I915_READ(DEIIR
));
2265 I915_WRITE(GTIMR
, 0xffffffff);
2266 I915_WRITE(GTIER
, 0x0);
2267 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
2269 I915_WRITE(SDEIMR
, 0xffffffff);
2270 I915_WRITE(SDEIER
, 0x0);
2271 I915_WRITE(SDEIIR
, I915_READ(SDEIIR
));
2274 static void i8xx_irq_preinstall(struct drm_device
* dev
)
2276 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2279 atomic_set(&dev_priv
->irq_received
, 0);
2282 I915_WRITE(PIPESTAT(pipe
), 0);
2283 I915_WRITE16(IMR
, 0xffff);
2284 I915_WRITE16(IER
, 0x0);
2285 POSTING_READ16(IER
);
2288 static int i8xx_irq_postinstall(struct drm_device
*dev
)
2290 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2293 ~(I915_ERROR_PAGE_TABLE
| I915_ERROR_MEMORY_REFRESH
));
2295 /* Unmask the interrupts that we always want on. */
2296 dev_priv
->irq_mask
=
2297 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
2298 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
2299 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
2300 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
|
2301 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
);
2302 I915_WRITE16(IMR
, dev_priv
->irq_mask
);
2305 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
2306 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
2307 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
|
2308 I915_USER_INTERRUPT
);
2309 POSTING_READ16(IER
);
2315 * Returns true when a page flip has completed.
2317 static bool i8xx_handle_vblank(struct drm_device
*dev
,
2320 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2321 u16 flip_pending
= DISPLAY_PLANE_FLIP_PENDING(pipe
);
2323 if (!drm_handle_vblank(dev
, pipe
))
2326 if ((iir
& flip_pending
) == 0)
2329 intel_prepare_page_flip(dev
, pipe
);
2331 /* We detect FlipDone by looking for the change in PendingFlip from '1'
2332 * to '0' on the following vblank, i.e. IIR has the Pendingflip
2333 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
2334 * the flip is completed (no longer pending). Since this doesn't raise
2335 * an interrupt per se, we watch for the change at vblank.
2337 if (I915_READ16(ISR
) & flip_pending
)
2340 intel_finish_page_flip(dev
, pipe
);
2345 static irqreturn_t
i8xx_irq_handler(int irq
, void *arg
)
2347 struct drm_device
*dev
= (struct drm_device
*) arg
;
2348 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2351 unsigned long irqflags
;
2355 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
2356 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
;
2358 atomic_inc(&dev_priv
->irq_received
);
2360 iir
= I915_READ16(IIR
);
2364 while (iir
& ~flip_mask
) {
2365 /* Can't rely on pipestat interrupt bit in iir as it might
2366 * have been cleared after the pipestat interrupt was received.
2367 * It doesn't set the bit in iir again, but it still produces
2368 * interrupts (for non-MSI).
2370 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2371 if (iir
& I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
)
2372 i915_handle_error(dev
, false);
2374 for_each_pipe(pipe
) {
2375 int reg
= PIPESTAT(pipe
);
2376 pipe_stats
[pipe
] = I915_READ(reg
);
2379 * Clear the PIPE*STAT regs before the IIR
2381 if (pipe_stats
[pipe
] & 0x8000ffff) {
2382 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
)
2383 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2385 I915_WRITE(reg
, pipe_stats
[pipe
]);
2389 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2391 I915_WRITE16(IIR
, iir
& ~flip_mask
);
2392 new_iir
= I915_READ16(IIR
); /* Flush posted writes */
2394 i915_update_dri1_breadcrumb(dev
);
2396 if (iir
& I915_USER_INTERRUPT
)
2397 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
2399 if (pipe_stats
[0] & PIPE_VBLANK_INTERRUPT_STATUS
&&
2400 i8xx_handle_vblank(dev
, 0, iir
))
2401 flip_mask
&= ~DISPLAY_PLANE_FLIP_PENDING(0);
2403 if (pipe_stats
[1] & PIPE_VBLANK_INTERRUPT_STATUS
&&
2404 i8xx_handle_vblank(dev
, 1, iir
))
2405 flip_mask
&= ~DISPLAY_PLANE_FLIP_PENDING(1);
2413 static void i8xx_irq_uninstall(struct drm_device
* dev
)
2415 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2418 for_each_pipe(pipe
) {
2419 /* Clear enable bits; then clear status bits */
2420 I915_WRITE(PIPESTAT(pipe
), 0);
2421 I915_WRITE(PIPESTAT(pipe
), I915_READ(PIPESTAT(pipe
)));
2423 I915_WRITE16(IMR
, 0xffff);
2424 I915_WRITE16(IER
, 0x0);
2425 I915_WRITE16(IIR
, I915_READ16(IIR
));
2428 static void i915_irq_preinstall(struct drm_device
* dev
)
2430 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2433 atomic_set(&dev_priv
->irq_received
, 0);
2435 if (I915_HAS_HOTPLUG(dev
)) {
2436 I915_WRITE(PORT_HOTPLUG_EN
, 0);
2437 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
2440 I915_WRITE16(HWSTAM
, 0xeffe);
2442 I915_WRITE(PIPESTAT(pipe
), 0);
2443 I915_WRITE(IMR
, 0xffffffff);
2444 I915_WRITE(IER
, 0x0);
2448 static int i915_irq_postinstall(struct drm_device
*dev
)
2450 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2453 I915_WRITE(EMR
, ~(I915_ERROR_PAGE_TABLE
| I915_ERROR_MEMORY_REFRESH
));
2455 /* Unmask the interrupts that we always want on. */
2456 dev_priv
->irq_mask
=
2457 ~(I915_ASLE_INTERRUPT
|
2458 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
2459 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
2460 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
2461 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
|
2462 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
);
2465 I915_ASLE_INTERRUPT
|
2466 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
2467 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
2468 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
|
2469 I915_USER_INTERRUPT
;
2471 if (I915_HAS_HOTPLUG(dev
)) {
2472 I915_WRITE(PORT_HOTPLUG_EN
, 0);
2473 POSTING_READ(PORT_HOTPLUG_EN
);
2475 /* Enable in IER... */
2476 enable_mask
|= I915_DISPLAY_PORT_INTERRUPT
;
2477 /* and unmask in IMR */
2478 dev_priv
->irq_mask
&= ~I915_DISPLAY_PORT_INTERRUPT
;
2481 I915_WRITE(IMR
, dev_priv
->irq_mask
);
2482 I915_WRITE(IER
, enable_mask
);
2485 intel_opregion_enable_asle(dev
);
2490 static void i915_hpd_irq_setup(struct drm_device
*dev
)
2492 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2495 if (I915_HAS_HOTPLUG(dev
)) {
2496 hotplug_en
= I915_READ(PORT_HOTPLUG_EN
);
2498 if (dev_priv
->hotplug_supported_mask
& PORTB_HOTPLUG_INT_STATUS
)
2499 hotplug_en
|= PORTB_HOTPLUG_INT_EN
;
2500 if (dev_priv
->hotplug_supported_mask
& PORTC_HOTPLUG_INT_STATUS
)
2501 hotplug_en
|= PORTC_HOTPLUG_INT_EN
;
2502 if (dev_priv
->hotplug_supported_mask
& PORTD_HOTPLUG_INT_STATUS
)
2503 hotplug_en
|= PORTD_HOTPLUG_INT_EN
;
2504 if (dev_priv
->hotplug_supported_mask
& SDVOC_HOTPLUG_INT_STATUS_I915
)
2505 hotplug_en
|= SDVOC_HOTPLUG_INT_EN
;
2506 if (dev_priv
->hotplug_supported_mask
& SDVOB_HOTPLUG_INT_STATUS_I915
)
2507 hotplug_en
|= SDVOB_HOTPLUG_INT_EN
;
2508 if (dev_priv
->hotplug_supported_mask
& CRT_HOTPLUG_INT_STATUS
) {
2509 hotplug_en
|= CRT_HOTPLUG_INT_EN
;
2510 hotplug_en
|= CRT_HOTPLUG_VOLTAGE_COMPARE_50
;
2513 /* Ignore TV since it's buggy */
2515 I915_WRITE(PORT_HOTPLUG_EN
, hotplug_en
);
2520 * Returns true when a page flip has completed.
2522 static bool i915_handle_vblank(struct drm_device
*dev
,
2523 int plane
, int pipe
, u32 iir
)
2525 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2526 u32 flip_pending
= DISPLAY_PLANE_FLIP_PENDING(plane
);
2528 if (!drm_handle_vblank(dev
, pipe
))
2531 if ((iir
& flip_pending
) == 0)
2534 intel_prepare_page_flip(dev
, plane
);
2536 /* We detect FlipDone by looking for the change in PendingFlip from '1'
2537 * to '0' on the following vblank, i.e. IIR has the Pendingflip
2538 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
2539 * the flip is completed (no longer pending). Since this doesn't raise
2540 * an interrupt per se, we watch for the change at vblank.
2542 if (I915_READ(ISR
) & flip_pending
)
2545 intel_finish_page_flip(dev
, pipe
);
2550 static irqreturn_t
i915_irq_handler(int irq
, void *arg
)
2552 struct drm_device
*dev
= (struct drm_device
*) arg
;
2553 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2554 u32 iir
, new_iir
, pipe_stats
[I915_MAX_PIPES
];
2555 unsigned long irqflags
;
2557 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
2558 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
;
2559 int pipe
, ret
= IRQ_NONE
;
2561 atomic_inc(&dev_priv
->irq_received
);
2563 iir
= I915_READ(IIR
);
2565 bool irq_received
= (iir
& ~flip_mask
) != 0;
2566 bool blc_event
= false;
2568 /* Can't rely on pipestat interrupt bit in iir as it might
2569 * have been cleared after the pipestat interrupt was received.
2570 * It doesn't set the bit in iir again, but it still produces
2571 * interrupts (for non-MSI).
2573 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2574 if (iir
& I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
)
2575 i915_handle_error(dev
, false);
2577 for_each_pipe(pipe
) {
2578 int reg
= PIPESTAT(pipe
);
2579 pipe_stats
[pipe
] = I915_READ(reg
);
2581 /* Clear the PIPE*STAT regs before the IIR */
2582 if (pipe_stats
[pipe
] & 0x8000ffff) {
2583 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
)
2584 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2586 I915_WRITE(reg
, pipe_stats
[pipe
]);
2587 irq_received
= true;
2590 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2595 /* Consume port. Then clear IIR or we'll miss events */
2596 if ((I915_HAS_HOTPLUG(dev
)) &&
2597 (iir
& I915_DISPLAY_PORT_INTERRUPT
)) {
2598 u32 hotplug_status
= I915_READ(PORT_HOTPLUG_STAT
);
2600 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2602 if (hotplug_status
& dev_priv
->hotplug_supported_mask
)
2603 queue_work(dev_priv
->wq
,
2604 &dev_priv
->hotplug_work
);
2606 I915_WRITE(PORT_HOTPLUG_STAT
, hotplug_status
);
2607 POSTING_READ(PORT_HOTPLUG_STAT
);
2610 I915_WRITE(IIR
, iir
& ~flip_mask
);
2611 new_iir
= I915_READ(IIR
); /* Flush posted writes */
2613 if (iir
& I915_USER_INTERRUPT
)
2614 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
2616 for_each_pipe(pipe
) {
2621 if (pipe_stats
[pipe
] & PIPE_VBLANK_INTERRUPT_STATUS
&&
2622 i915_handle_vblank(dev
, plane
, pipe
, iir
))
2623 flip_mask
&= ~DISPLAY_PLANE_FLIP_PENDING(plane
);
2625 if (pipe_stats
[pipe
] & PIPE_LEGACY_BLC_EVENT_STATUS
)
2629 if (blc_event
|| (iir
& I915_ASLE_INTERRUPT
))
2630 intel_opregion_asle_intr(dev
);
2632 /* With MSI, interrupts are only generated when iir
2633 * transitions from zero to nonzero. If another bit got
2634 * set while we were handling the existing iir bits, then
2635 * we would never get another interrupt.
2637 * This is fine on non-MSI as well, as if we hit this path
2638 * we avoid exiting the interrupt handler only to generate
2641 * Note that for MSI this could cause a stray interrupt report
2642 * if an interrupt landed in the time between writing IIR and
2643 * the posting read. This should be rare enough to never
2644 * trigger the 99% of 100,000 interrupts test for disabling
2649 } while (iir
& ~flip_mask
);
2651 i915_update_dri1_breadcrumb(dev
);
2656 static void i915_irq_uninstall(struct drm_device
* dev
)
2658 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2661 if (I915_HAS_HOTPLUG(dev
)) {
2662 I915_WRITE(PORT_HOTPLUG_EN
, 0);
2663 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
2666 I915_WRITE16(HWSTAM
, 0xffff);
2667 for_each_pipe(pipe
) {
2668 /* Clear enable bits; then clear status bits */
2669 I915_WRITE(PIPESTAT(pipe
), 0);
2670 I915_WRITE(PIPESTAT(pipe
), I915_READ(PIPESTAT(pipe
)));
2672 I915_WRITE(IMR
, 0xffffffff);
2673 I915_WRITE(IER
, 0x0);
2675 I915_WRITE(IIR
, I915_READ(IIR
));
2678 static void i965_irq_preinstall(struct drm_device
* dev
)
2680 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2683 atomic_set(&dev_priv
->irq_received
, 0);
2685 I915_WRITE(PORT_HOTPLUG_EN
, 0);
2686 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
2688 I915_WRITE(HWSTAM
, 0xeffe);
2690 I915_WRITE(PIPESTAT(pipe
), 0);
2691 I915_WRITE(IMR
, 0xffffffff);
2692 I915_WRITE(IER
, 0x0);
2696 static int i965_irq_postinstall(struct drm_device
*dev
)
2698 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2702 /* Unmask the interrupts that we always want on. */
2703 dev_priv
->irq_mask
= ~(I915_ASLE_INTERRUPT
|
2704 I915_DISPLAY_PORT_INTERRUPT
|
2705 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
2706 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
2707 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
2708 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
|
2709 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
);
2711 enable_mask
= ~dev_priv
->irq_mask
;
2712 enable_mask
&= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
2713 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
);
2714 enable_mask
|= I915_USER_INTERRUPT
;
2717 enable_mask
|= I915_BSD_USER_INTERRUPT
;
2719 i915_enable_pipestat(dev_priv
, 0, PIPE_GMBUS_EVENT_ENABLE
);
2722 * Enable some error detection, note the instruction error mask
2723 * bit is reserved, so we leave it masked.
2726 error_mask
= ~(GM45_ERROR_PAGE_TABLE
|
2727 GM45_ERROR_MEM_PRIV
|
2728 GM45_ERROR_CP_PRIV
|
2729 I915_ERROR_MEMORY_REFRESH
);
2731 error_mask
= ~(I915_ERROR_PAGE_TABLE
|
2732 I915_ERROR_MEMORY_REFRESH
);
2734 I915_WRITE(EMR
, error_mask
);
2736 I915_WRITE(IMR
, dev_priv
->irq_mask
);
2737 I915_WRITE(IER
, enable_mask
);
2740 I915_WRITE(PORT_HOTPLUG_EN
, 0);
2741 POSTING_READ(PORT_HOTPLUG_EN
);
2743 intel_opregion_enable_asle(dev
);
2748 static void i965_hpd_irq_setup(struct drm_device
*dev
)
2750 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2753 /* Note HDMI and DP share hotplug bits */
2755 if (dev_priv
->hotplug_supported_mask
& PORTB_HOTPLUG_INT_STATUS
)
2756 hotplug_en
|= PORTB_HOTPLUG_INT_EN
;
2757 if (dev_priv
->hotplug_supported_mask
& PORTC_HOTPLUG_INT_STATUS
)
2758 hotplug_en
|= PORTC_HOTPLUG_INT_EN
;
2759 if (dev_priv
->hotplug_supported_mask
& PORTD_HOTPLUG_INT_STATUS
)
2760 hotplug_en
|= PORTD_HOTPLUG_INT_EN
;
2762 if (dev_priv
->hotplug_supported_mask
& SDVOC_HOTPLUG_INT_STATUS_G4X
)
2763 hotplug_en
|= SDVOC_HOTPLUG_INT_EN
;
2764 if (dev_priv
->hotplug_supported_mask
& SDVOB_HOTPLUG_INT_STATUS_G4X
)
2765 hotplug_en
|= SDVOB_HOTPLUG_INT_EN
;
2767 if (dev_priv
->hotplug_supported_mask
& SDVOC_HOTPLUG_INT_STATUS_I965
)
2768 hotplug_en
|= SDVOC_HOTPLUG_INT_EN
;
2769 if (dev_priv
->hotplug_supported_mask
& SDVOB_HOTPLUG_INT_STATUS_I965
)
2770 hotplug_en
|= SDVOB_HOTPLUG_INT_EN
;
2772 if (dev_priv
->hotplug_supported_mask
& CRT_HOTPLUG_INT_STATUS
) {
2773 hotplug_en
|= CRT_HOTPLUG_INT_EN
;
2775 /* Programming the CRT detection parameters tends
2776 to generate a spurious hotplug event about three
2777 seconds later. So just do it once.
2780 hotplug_en
|= CRT_HOTPLUG_ACTIVATION_PERIOD_64
;
2781 hotplug_en
|= CRT_HOTPLUG_VOLTAGE_COMPARE_50
;
2784 /* Ignore TV since it's buggy */
2786 I915_WRITE(PORT_HOTPLUG_EN
, hotplug_en
);
2789 static irqreturn_t
i965_irq_handler(int irq
, void *arg
)
2791 struct drm_device
*dev
= (struct drm_device
*) arg
;
2792 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2794 u32 pipe_stats
[I915_MAX_PIPES
];
2795 unsigned long irqflags
;
2797 int ret
= IRQ_NONE
, pipe
;
2799 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
2800 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
;
2802 atomic_inc(&dev_priv
->irq_received
);
2804 iir
= I915_READ(IIR
);
2807 bool blc_event
= false;
2809 irq_received
= (iir
& ~flip_mask
) != 0;
2811 /* Can't rely on pipestat interrupt bit in iir as it might
2812 * have been cleared after the pipestat interrupt was received.
2813 * It doesn't set the bit in iir again, but it still produces
2814 * interrupts (for non-MSI).
2816 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2817 if (iir
& I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
)
2818 i915_handle_error(dev
, false);
2820 for_each_pipe(pipe
) {
2821 int reg
= PIPESTAT(pipe
);
2822 pipe_stats
[pipe
] = I915_READ(reg
);
2825 * Clear the PIPE*STAT regs before the IIR
2827 if (pipe_stats
[pipe
] & 0x8000ffff) {
2828 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
)
2829 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2831 I915_WRITE(reg
, pipe_stats
[pipe
]);
2835 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2842 /* Consume port. Then clear IIR or we'll miss events */
2843 if (iir
& I915_DISPLAY_PORT_INTERRUPT
) {
2844 u32 hotplug_status
= I915_READ(PORT_HOTPLUG_STAT
);
2846 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2848 if (hotplug_status
& dev_priv
->hotplug_supported_mask
)
2849 queue_work(dev_priv
->wq
,
2850 &dev_priv
->hotplug_work
);
2852 I915_WRITE(PORT_HOTPLUG_STAT
, hotplug_status
);
2853 I915_READ(PORT_HOTPLUG_STAT
);
2856 I915_WRITE(IIR
, iir
& ~flip_mask
);
2857 new_iir
= I915_READ(IIR
); /* Flush posted writes */
2859 if (iir
& I915_USER_INTERRUPT
)
2860 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
2861 if (iir
& I915_BSD_USER_INTERRUPT
)
2862 notify_ring(dev
, &dev_priv
->ring
[VCS
]);
2864 for_each_pipe(pipe
) {
2865 if (pipe_stats
[pipe
] & PIPE_START_VBLANK_INTERRUPT_STATUS
&&
2866 i915_handle_vblank(dev
, pipe
, pipe
, iir
))
2867 flip_mask
&= ~DISPLAY_PLANE_FLIP_PENDING(pipe
);
2869 if (pipe_stats
[pipe
] & PIPE_LEGACY_BLC_EVENT_STATUS
)
2874 if (blc_event
|| (iir
& I915_ASLE_INTERRUPT
))
2875 intel_opregion_asle_intr(dev
);
2877 if (pipe_stats
[0] & PIPE_GMBUS_INTERRUPT_STATUS
)
2878 gmbus_irq_handler(dev
);
2880 /* With MSI, interrupts are only generated when iir
2881 * transitions from zero to nonzero. If another bit got
2882 * set while we were handling the existing iir bits, then
2883 * we would never get another interrupt.
2885 * This is fine on non-MSI as well, as if we hit this path
2886 * we avoid exiting the interrupt handler only to generate
2889 * Note that for MSI this could cause a stray interrupt report
2890 * if an interrupt landed in the time between writing IIR and
2891 * the posting read. This should be rare enough to never
2892 * trigger the 99% of 100,000 interrupts test for disabling
2898 i915_update_dri1_breadcrumb(dev
);
2903 static void i965_irq_uninstall(struct drm_device
* dev
)
2905 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2911 I915_WRITE(PORT_HOTPLUG_EN
, 0);
2912 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
2914 I915_WRITE(HWSTAM
, 0xffffffff);
2916 I915_WRITE(PIPESTAT(pipe
), 0);
2917 I915_WRITE(IMR
, 0xffffffff);
2918 I915_WRITE(IER
, 0x0);
2921 I915_WRITE(PIPESTAT(pipe
),
2922 I915_READ(PIPESTAT(pipe
)) & 0x8000ffff);
2923 I915_WRITE(IIR
, I915_READ(IIR
));
2926 void intel_irq_init(struct drm_device
*dev
)
2928 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2930 INIT_WORK(&dev_priv
->hotplug_work
, i915_hotplug_work_func
);
2931 INIT_WORK(&dev_priv
->gpu_error
.work
, i915_error_work_func
);
2932 INIT_WORK(&dev_priv
->rps
.work
, gen6_pm_rps_work
);
2933 INIT_WORK(&dev_priv
->l3_parity
.error_work
, ivybridge_parity_work
);
2935 setup_timer(&dev_priv
->gpu_error
.hangcheck_timer
,
2936 i915_hangcheck_elapsed
,
2937 (unsigned long) dev
);
2939 pm_qos_add_request(&dev_priv
->pm_qos
, PM_QOS_CPU_DMA_LATENCY
, PM_QOS_DEFAULT_VALUE
);
2941 dev
->driver
->get_vblank_counter
= i915_get_vblank_counter
;
2942 dev
->max_vblank_count
= 0xffffff; /* only 24 bits of frame count */
2943 if (IS_G4X(dev
) || INTEL_INFO(dev
)->gen
>= 5) {
2944 dev
->max_vblank_count
= 0xffffffff; /* full 32 bit counter */
2945 dev
->driver
->get_vblank_counter
= gm45_get_vblank_counter
;
2948 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
2949 dev
->driver
->get_vblank_timestamp
= i915_get_vblank_timestamp
;
2951 dev
->driver
->get_vblank_timestamp
= NULL
;
2952 dev
->driver
->get_scanout_position
= i915_get_crtc_scanoutpos
;
2954 if (IS_VALLEYVIEW(dev
)) {
2955 dev
->driver
->irq_handler
= valleyview_irq_handler
;
2956 dev
->driver
->irq_preinstall
= valleyview_irq_preinstall
;
2957 dev
->driver
->irq_postinstall
= valleyview_irq_postinstall
;
2958 dev
->driver
->irq_uninstall
= valleyview_irq_uninstall
;
2959 dev
->driver
->enable_vblank
= valleyview_enable_vblank
;
2960 dev
->driver
->disable_vblank
= valleyview_disable_vblank
;
2961 dev_priv
->display
.hpd_irq_setup
= valleyview_hpd_irq_setup
;
2962 } else if (IS_IVYBRIDGE(dev
) || IS_HASWELL(dev
)) {
2963 /* Share pre & uninstall handlers with ILK/SNB */
2964 dev
->driver
->irq_handler
= ivybridge_irq_handler
;
2965 dev
->driver
->irq_preinstall
= ironlake_irq_preinstall
;
2966 dev
->driver
->irq_postinstall
= ivybridge_irq_postinstall
;
2967 dev
->driver
->irq_uninstall
= ironlake_irq_uninstall
;
2968 dev
->driver
->enable_vblank
= ivybridge_enable_vblank
;
2969 dev
->driver
->disable_vblank
= ivybridge_disable_vblank
;
2970 } else if (HAS_PCH_SPLIT(dev
)) {
2971 dev
->driver
->irq_handler
= ironlake_irq_handler
;
2972 dev
->driver
->irq_preinstall
= ironlake_irq_preinstall
;
2973 dev
->driver
->irq_postinstall
= ironlake_irq_postinstall
;
2974 dev
->driver
->irq_uninstall
= ironlake_irq_uninstall
;
2975 dev
->driver
->enable_vblank
= ironlake_enable_vblank
;
2976 dev
->driver
->disable_vblank
= ironlake_disable_vblank
;
2978 if (INTEL_INFO(dev
)->gen
== 2) {
2979 dev
->driver
->irq_preinstall
= i8xx_irq_preinstall
;
2980 dev
->driver
->irq_postinstall
= i8xx_irq_postinstall
;
2981 dev
->driver
->irq_handler
= i8xx_irq_handler
;
2982 dev
->driver
->irq_uninstall
= i8xx_irq_uninstall
;
2983 } else if (INTEL_INFO(dev
)->gen
== 3) {
2984 dev
->driver
->irq_preinstall
= i915_irq_preinstall
;
2985 dev
->driver
->irq_postinstall
= i915_irq_postinstall
;
2986 dev
->driver
->irq_uninstall
= i915_irq_uninstall
;
2987 dev
->driver
->irq_handler
= i915_irq_handler
;
2988 dev_priv
->display
.hpd_irq_setup
= i915_hpd_irq_setup
;
2990 dev
->driver
->irq_preinstall
= i965_irq_preinstall
;
2991 dev
->driver
->irq_postinstall
= i965_irq_postinstall
;
2992 dev
->driver
->irq_uninstall
= i965_irq_uninstall
;
2993 dev
->driver
->irq_handler
= i965_irq_handler
;
2994 dev_priv
->display
.hpd_irq_setup
= i965_hpd_irq_setup
;
2996 dev
->driver
->enable_vblank
= i915_enable_vblank
;
2997 dev
->driver
->disable_vblank
= i915_disable_vblank
;
3001 void intel_hpd_init(struct drm_device
*dev
)
3003 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3005 if (dev_priv
->display
.hpd_irq_setup
)
3006 dev_priv
->display
.hpd_irq_setup(dev
);