i915: Add support for MSI and interrupt mitigation.
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_irq.c
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2 */
3 /*
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 */
28
29 #include "drmP.h"
30 #include "drm.h"
31 #include "i915_drm.h"
32 #include "i915_drv.h"
33
34 #define MAX_NOPID ((u32)~0)
35
36 /** These are the interrupts used by the driver */
37 #define I915_INTERRUPT_ENABLE_MASK (I915_USER_INTERRUPT | \
38 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | \
39 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT)
40
41 static inline void
42 i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
43 {
44 if ((dev_priv->irq_mask_reg & mask) != 0) {
45 dev_priv->irq_mask_reg &= ~mask;
46 I915_WRITE(IMR, dev_priv->irq_mask_reg);
47 (void) I915_READ(IMR);
48 }
49 }
50
51 static inline void
52 i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
53 {
54 if ((dev_priv->irq_mask_reg & mask) != mask) {
55 dev_priv->irq_mask_reg |= mask;
56 I915_WRITE(IMR, dev_priv->irq_mask_reg);
57 (void) I915_READ(IMR);
58 }
59 }
60
61 /**
62 * Emit blits for scheduled buffer swaps.
63 *
64 * This function will be called with the HW lock held.
65 */
66 static void i915_vblank_tasklet(struct drm_device *dev)
67 {
68 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
69 unsigned long irqflags;
70 struct list_head *list, *tmp, hits, *hit;
71 int nhits, nrects, slice[2], upper[2], lower[2], i;
72 unsigned counter[2] = { atomic_read(&dev->vbl_received),
73 atomic_read(&dev->vbl_received2) };
74 struct drm_drawable_info *drw;
75 drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
76 u32 cpp = dev_priv->cpp;
77 u32 cmd = (cpp == 4) ? (XY_SRC_COPY_BLT_CMD |
78 XY_SRC_COPY_BLT_WRITE_ALPHA |
79 XY_SRC_COPY_BLT_WRITE_RGB)
80 : XY_SRC_COPY_BLT_CMD;
81 u32 src_pitch = sarea_priv->pitch * cpp;
82 u32 dst_pitch = sarea_priv->pitch * cpp;
83 u32 ropcpp = (0xcc << 16) | ((cpp - 1) << 24);
84 RING_LOCALS;
85
86 if (IS_I965G(dev) && sarea_priv->front_tiled) {
87 cmd |= XY_SRC_COPY_BLT_DST_TILED;
88 dst_pitch >>= 2;
89 }
90 if (IS_I965G(dev) && sarea_priv->back_tiled) {
91 cmd |= XY_SRC_COPY_BLT_SRC_TILED;
92 src_pitch >>= 2;
93 }
94
95 DRM_DEBUG("\n");
96
97 INIT_LIST_HEAD(&hits);
98
99 nhits = nrects = 0;
100
101 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
102
103 /* Find buffer swaps scheduled for this vertical blank */
104 list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) {
105 drm_i915_vbl_swap_t *vbl_swap =
106 list_entry(list, drm_i915_vbl_swap_t, head);
107
108 if ((counter[vbl_swap->pipe] - vbl_swap->sequence) > (1<<23))
109 continue;
110
111 list_del(list);
112 dev_priv->swaps_pending--;
113
114 spin_unlock(&dev_priv->swaps_lock);
115 spin_lock(&dev->drw_lock);
116
117 drw = drm_get_drawable_info(dev, vbl_swap->drw_id);
118
119 if (!drw) {
120 spin_unlock(&dev->drw_lock);
121 drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
122 spin_lock(&dev_priv->swaps_lock);
123 continue;
124 }
125
126 list_for_each(hit, &hits) {
127 drm_i915_vbl_swap_t *swap_cmp =
128 list_entry(hit, drm_i915_vbl_swap_t, head);
129 struct drm_drawable_info *drw_cmp =
130 drm_get_drawable_info(dev, swap_cmp->drw_id);
131
132 if (drw_cmp &&
133 drw_cmp->rects[0].y1 > drw->rects[0].y1) {
134 list_add_tail(list, hit);
135 break;
136 }
137 }
138
139 spin_unlock(&dev->drw_lock);
140
141 /* List of hits was empty, or we reached the end of it */
142 if (hit == &hits)
143 list_add_tail(list, hits.prev);
144
145 nhits++;
146
147 spin_lock(&dev_priv->swaps_lock);
148 }
149
150 if (nhits == 0) {
151 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
152 return;
153 }
154
155 spin_unlock(&dev_priv->swaps_lock);
156
157 i915_kernel_lost_context(dev);
158
159 if (IS_I965G(dev)) {
160 BEGIN_LP_RING(4);
161
162 OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
163 OUT_RING(0);
164 OUT_RING(((sarea_priv->width - 1) & 0xffff) | ((sarea_priv->height - 1) << 16));
165 OUT_RING(0);
166 ADVANCE_LP_RING();
167 } else {
168 BEGIN_LP_RING(6);
169
170 OUT_RING(GFX_OP_DRAWRECT_INFO);
171 OUT_RING(0);
172 OUT_RING(0);
173 OUT_RING(sarea_priv->width | sarea_priv->height << 16);
174 OUT_RING(sarea_priv->width | sarea_priv->height << 16);
175 OUT_RING(0);
176
177 ADVANCE_LP_RING();
178 }
179
180 sarea_priv->ctxOwner = DRM_KERNEL_CONTEXT;
181
182 upper[0] = upper[1] = 0;
183 slice[0] = max(sarea_priv->pipeA_h / nhits, 1);
184 slice[1] = max(sarea_priv->pipeB_h / nhits, 1);
185 lower[0] = sarea_priv->pipeA_y + slice[0];
186 lower[1] = sarea_priv->pipeB_y + slice[0];
187
188 spin_lock(&dev->drw_lock);
189
190 /* Emit blits for buffer swaps, partitioning both outputs into as many
191 * slices as there are buffer swaps scheduled in order to avoid tearing
192 * (based on the assumption that a single buffer swap would always
193 * complete before scanout starts).
194 */
195 for (i = 0; i++ < nhits;
196 upper[0] = lower[0], lower[0] += slice[0],
197 upper[1] = lower[1], lower[1] += slice[1]) {
198 if (i == nhits)
199 lower[0] = lower[1] = sarea_priv->height;
200
201 list_for_each(hit, &hits) {
202 drm_i915_vbl_swap_t *swap_hit =
203 list_entry(hit, drm_i915_vbl_swap_t, head);
204 struct drm_clip_rect *rect;
205 int num_rects, pipe;
206 unsigned short top, bottom;
207
208 drw = drm_get_drawable_info(dev, swap_hit->drw_id);
209
210 if (!drw)
211 continue;
212
213 rect = drw->rects;
214 pipe = swap_hit->pipe;
215 top = upper[pipe];
216 bottom = lower[pipe];
217
218 for (num_rects = drw->num_rects; num_rects--; rect++) {
219 int y1 = max(rect->y1, top);
220 int y2 = min(rect->y2, bottom);
221
222 if (y1 >= y2)
223 continue;
224
225 BEGIN_LP_RING(8);
226
227 OUT_RING(cmd);
228 OUT_RING(ropcpp | dst_pitch);
229 OUT_RING((y1 << 16) | rect->x1);
230 OUT_RING((y2 << 16) | rect->x2);
231 OUT_RING(sarea_priv->front_offset);
232 OUT_RING((y1 << 16) | rect->x1);
233 OUT_RING(src_pitch);
234 OUT_RING(sarea_priv->back_offset);
235
236 ADVANCE_LP_RING();
237 }
238 }
239 }
240
241 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
242
243 list_for_each_safe(hit, tmp, &hits) {
244 drm_i915_vbl_swap_t *swap_hit =
245 list_entry(hit, drm_i915_vbl_swap_t, head);
246
247 list_del(hit);
248
249 drm_free(swap_hit, sizeof(*swap_hit), DRM_MEM_DRIVER);
250 }
251 }
252
253 irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
254 {
255 struct drm_device *dev = (struct drm_device *) arg;
256 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
257 u32 pipea_stats, pipeb_stats;
258 u32 iir;
259
260 pipea_stats = I915_READ(PIPEASTAT);
261 pipeb_stats = I915_READ(PIPEBSTAT);
262
263 if (dev->pdev->msi_enabled)
264 I915_WRITE(IMR, ~0);
265 iir = I915_READ(IIR);
266
267 DRM_DEBUG("iir=%08x\n", iir);
268
269 if (iir == 0) {
270 if (dev->pdev->msi_enabled) {
271 I915_WRITE(IMR, dev_priv->irq_mask_reg);
272 (void) I915_READ(IMR);
273 }
274 return IRQ_NONE;
275 }
276
277 I915_WRITE(IIR, iir);
278 if (dev->pdev->msi_enabled)
279 I915_WRITE(IMR, dev_priv->irq_mask_reg);
280 (void) I915_READ(IIR); /* Flush posted writes */
281
282 dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
283
284 if (iir & I915_USER_INTERRUPT)
285 DRM_WAKEUP(&dev_priv->irq_queue);
286
287 if (iir & (I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
288 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT)) {
289 int vblank_pipe = dev_priv->vblank_pipe;
290
291 if ((vblank_pipe &
292 (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B))
293 == (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B)) {
294 if (iir & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT)
295 atomic_inc(&dev->vbl_received);
296 if (iir & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT)
297 atomic_inc(&dev->vbl_received2);
298 } else if (((iir & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT) &&
299 (vblank_pipe & DRM_I915_VBLANK_PIPE_A)) ||
300 ((iir & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT) &&
301 (vblank_pipe & DRM_I915_VBLANK_PIPE_B)))
302 atomic_inc(&dev->vbl_received);
303
304 DRM_WAKEUP(&dev->vbl_queue);
305 drm_vbl_send_signals(dev);
306
307 if (dev_priv->swaps_pending > 0)
308 drm_locked_tasklet(dev, i915_vblank_tasklet);
309 I915_WRITE(PIPEASTAT,
310 pipea_stats|I915_VBLANK_INTERRUPT_ENABLE|
311 PIPE_VBLANK_INTERRUPT_STATUS);
312 I915_WRITE(PIPEBSTAT,
313 pipeb_stats|I915_VBLANK_INTERRUPT_ENABLE|
314 PIPE_VBLANK_INTERRUPT_STATUS);
315 }
316
317 return IRQ_HANDLED;
318 }
319
320 static int i915_emit_irq(struct drm_device * dev)
321 {
322 drm_i915_private_t *dev_priv = dev->dev_private;
323 RING_LOCALS;
324
325 i915_kernel_lost_context(dev);
326
327 DRM_DEBUG("\n");
328
329 dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter;
330
331 if (dev_priv->counter > 0x7FFFFFFFUL)
332 dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1;
333
334 BEGIN_LP_RING(6);
335 OUT_RING(MI_STORE_DWORD_INDEX);
336 OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT);
337 OUT_RING(dev_priv->counter);
338 OUT_RING(0);
339 OUT_RING(0);
340 OUT_RING(MI_USER_INTERRUPT);
341 ADVANCE_LP_RING();
342
343 return dev_priv->counter;
344 }
345
346 static void i915_user_irq_get(struct drm_device *dev)
347 {
348 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
349
350 spin_lock(&dev_priv->user_irq_lock);
351 if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1))
352 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
353 spin_unlock(&dev_priv->user_irq_lock);
354 }
355
356 static void i915_user_irq_put(struct drm_device *dev)
357 {
358 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
359
360 spin_lock(&dev_priv->user_irq_lock);
361 BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
362 if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0))
363 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
364 spin_unlock(&dev_priv->user_irq_lock);
365 }
366
367 static int i915_wait_irq(struct drm_device * dev, int irq_nr)
368 {
369 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
370 int ret = 0;
371
372 DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
373 READ_BREADCRUMB(dev_priv));
374
375 if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
376 dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
377 return 0;
378 }
379
380 dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
381
382 i915_user_irq_get(dev);
383 DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
384 READ_BREADCRUMB(dev_priv) >= irq_nr);
385 i915_user_irq_put(dev);
386
387 if (ret == -EBUSY) {
388 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
389 READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
390 }
391
392 dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
393 return ret;
394 }
395
396 static int i915_driver_vblank_do_wait(struct drm_device *dev, unsigned int *sequence,
397 atomic_t *counter)
398 {
399 drm_i915_private_t *dev_priv = dev->dev_private;
400 unsigned int cur_vblank;
401 int ret = 0;
402
403 if (!dev_priv) {
404 DRM_ERROR("called with no initialization\n");
405 return -EINVAL;
406 }
407
408 DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
409 (((cur_vblank = atomic_read(counter))
410 - *sequence) <= (1<<23)));
411
412 *sequence = cur_vblank;
413
414 return ret;
415 }
416
417
418 int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence)
419 {
420 return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received);
421 }
422
423 int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence)
424 {
425 return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received2);
426 }
427
428 /* Needs the lock as it touches the ring.
429 */
430 int i915_irq_emit(struct drm_device *dev, void *data,
431 struct drm_file *file_priv)
432 {
433 drm_i915_private_t *dev_priv = dev->dev_private;
434 drm_i915_irq_emit_t *emit = data;
435 int result;
436
437 LOCK_TEST_WITH_RETURN(dev, file_priv);
438
439 if (!dev_priv) {
440 DRM_ERROR("called with no initialization\n");
441 return -EINVAL;
442 }
443
444 result = i915_emit_irq(dev);
445
446 if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
447 DRM_ERROR("copy_to_user\n");
448 return -EFAULT;
449 }
450
451 return 0;
452 }
453
454 /* Doesn't need the hardware lock.
455 */
456 int i915_irq_wait(struct drm_device *dev, void *data,
457 struct drm_file *file_priv)
458 {
459 drm_i915_private_t *dev_priv = dev->dev_private;
460 drm_i915_irq_wait_t *irqwait = data;
461
462 if (!dev_priv) {
463 DRM_ERROR("called with no initialization\n");
464 return -EINVAL;
465 }
466
467 return i915_wait_irq(dev, irqwait->irq_seq);
468 }
469
470 /* Set the vblank monitor pipe
471 */
472 int i915_vblank_pipe_set(struct drm_device *dev, void *data,
473 struct drm_file *file_priv)
474 {
475 drm_i915_private_t *dev_priv = dev->dev_private;
476 drm_i915_vblank_pipe_t *pipe = data;
477 u32 enable_mask = 0, disable_mask = 0;
478
479 if (!dev_priv) {
480 DRM_ERROR("called with no initialization\n");
481 return -EINVAL;
482 }
483
484 if (pipe->pipe & ~(DRM_I915_VBLANK_PIPE_A|DRM_I915_VBLANK_PIPE_B)) {
485 DRM_ERROR("called with invalid pipe 0x%x\n", pipe->pipe);
486 return -EINVAL;
487 }
488
489 if (pipe->pipe & DRM_I915_VBLANK_PIPE_A)
490 enable_mask |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
491 else
492 disable_mask |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
493
494 if (pipe->pipe & DRM_I915_VBLANK_PIPE_B)
495 enable_mask |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
496 else
497 disable_mask |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
498
499 i915_enable_irq(dev_priv, enable_mask);
500 i915_disable_irq(dev_priv, disable_mask);
501
502 dev_priv->vblank_pipe = pipe->pipe;
503
504 return 0;
505 }
506
507 int i915_vblank_pipe_get(struct drm_device *dev, void *data,
508 struct drm_file *file_priv)
509 {
510 drm_i915_private_t *dev_priv = dev->dev_private;
511 drm_i915_vblank_pipe_t *pipe = data;
512 u16 flag;
513
514 if (!dev_priv) {
515 DRM_ERROR("called with no initialization\n");
516 return -EINVAL;
517 }
518
519 flag = I915_READ(IMR);
520 pipe->pipe = 0;
521 if (flag & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT)
522 pipe->pipe |= DRM_I915_VBLANK_PIPE_A;
523 if (flag & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT)
524 pipe->pipe |= DRM_I915_VBLANK_PIPE_B;
525
526 return 0;
527 }
528
529 /**
530 * Schedule buffer swap at given vertical blank.
531 */
532 int i915_vblank_swap(struct drm_device *dev, void *data,
533 struct drm_file *file_priv)
534 {
535 drm_i915_private_t *dev_priv = dev->dev_private;
536 drm_i915_vblank_swap_t *swap = data;
537 drm_i915_vbl_swap_t *vbl_swap;
538 unsigned int pipe, seqtype, curseq;
539 unsigned long irqflags;
540 struct list_head *list;
541
542 if (!dev_priv) {
543 DRM_ERROR("%s called with no initialization\n", __func__);
544 return -EINVAL;
545 }
546
547 if (dev_priv->sarea_priv->rotation) {
548 DRM_DEBUG("Rotation not supported\n");
549 return -EINVAL;
550 }
551
552 if (swap->seqtype & ~(_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE |
553 _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)) {
554 DRM_ERROR("Invalid sequence type 0x%x\n", swap->seqtype);
555 return -EINVAL;
556 }
557
558 pipe = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0;
559
560 seqtype = swap->seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE);
561
562 if (!(dev_priv->vblank_pipe & (1 << pipe))) {
563 DRM_ERROR("Invalid pipe %d\n", pipe);
564 return -EINVAL;
565 }
566
567 spin_lock_irqsave(&dev->drw_lock, irqflags);
568
569 if (!drm_get_drawable_info(dev, swap->drawable)) {
570 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
571 DRM_DEBUG("Invalid drawable ID %d\n", swap->drawable);
572 return -EINVAL;
573 }
574
575 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
576
577 curseq = atomic_read(pipe ? &dev->vbl_received2 : &dev->vbl_received);
578
579 if (seqtype == _DRM_VBLANK_RELATIVE)
580 swap->sequence += curseq;
581
582 if ((curseq - swap->sequence) <= (1<<23)) {
583 if (swap->seqtype & _DRM_VBLANK_NEXTONMISS) {
584 swap->sequence = curseq + 1;
585 } else {
586 DRM_DEBUG("Missed target sequence\n");
587 return -EINVAL;
588 }
589 }
590
591 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
592
593 list_for_each(list, &dev_priv->vbl_swaps.head) {
594 vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head);
595
596 if (vbl_swap->drw_id == swap->drawable &&
597 vbl_swap->pipe == pipe &&
598 vbl_swap->sequence == swap->sequence) {
599 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
600 DRM_DEBUG("Already scheduled\n");
601 return 0;
602 }
603 }
604
605 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
606
607 if (dev_priv->swaps_pending >= 100) {
608 DRM_DEBUG("Too many swaps queued\n");
609 return -EBUSY;
610 }
611
612 vbl_swap = drm_calloc(1, sizeof(*vbl_swap), DRM_MEM_DRIVER);
613
614 if (!vbl_swap) {
615 DRM_ERROR("Failed to allocate memory to queue swap\n");
616 return -ENOMEM;
617 }
618
619 DRM_DEBUG("\n");
620
621 vbl_swap->drw_id = swap->drawable;
622 vbl_swap->pipe = pipe;
623 vbl_swap->sequence = swap->sequence;
624
625 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
626
627 list_add_tail(&vbl_swap->head, &dev_priv->vbl_swaps.head);
628 dev_priv->swaps_pending++;
629
630 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
631
632 return 0;
633 }
634
635 /* drm_dma.h hooks
636 */
637 void i915_driver_irq_preinstall(struct drm_device * dev)
638 {
639 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
640
641 I915_WRITE(HWSTAM, 0xfffe);
642 I915_WRITE(IMR, 0x0);
643 I915_WRITE(IER, 0x0);
644 }
645
646 void i915_driver_irq_postinstall(struct drm_device * dev)
647 {
648 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
649
650 spin_lock_init(&dev_priv->swaps_lock);
651 INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
652 dev_priv->swaps_pending = 0;
653
654 if (!dev_priv->vblank_pipe)
655 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
656
657 /* Set initial unmasked IRQs to just the selected vblank pipes. */
658 dev_priv->irq_mask_reg = ~0;
659 if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A)
660 dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
661 if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B)
662 dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
663
664 I915_WRITE(IMR, dev_priv->irq_mask_reg);
665 I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK);
666 (void) I915_READ(IER);
667
668 DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
669 }
670
671 void i915_driver_irq_uninstall(struct drm_device * dev)
672 {
673 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
674 u16 temp;
675
676 if (!dev_priv)
677 return;
678
679 I915_WRITE(HWSTAM, 0xffff);
680 I915_WRITE(IMR, 0xffff);
681 I915_WRITE(IER, 0x0);
682
683 temp = I915_READ(IIR);
684 I915_WRITE(IIR, temp);
685 }
This page took 0.096031 seconds and 5 git commands to generate.