1 /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
31 #include "drm_crtc_helper.h"
32 #include "drm_fb_helper.h"
33 #include "intel_drv.h"
36 #include "i915_trace.h"
37 #include <linux/pci.h>
38 #include <linux/vgaarb.h>
39 #include <linux/acpi.h>
40 #include <linux/pnp.h>
41 #include <linux/vga_switcheroo.h>
42 #include <linux/slab.h>
43 #include <acpi/video.h>
46 * Sets up the hardware status page for devices that need a physical address
49 static int i915_init_phys_hws(struct drm_device
*dev
)
51 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
52 /* Program Hardware Status Page */
53 dev_priv
->status_page_dmah
=
54 drm_pci_alloc(dev
, PAGE_SIZE
, PAGE_SIZE
);
56 if (!dev_priv
->status_page_dmah
) {
57 DRM_ERROR("Can not allocate hardware status page\n");
60 dev_priv
->render_ring
.status_page
.page_addr
61 = dev_priv
->status_page_dmah
->vaddr
;
62 dev_priv
->dma_status_page
= dev_priv
->status_page_dmah
->busaddr
;
64 memset(dev_priv
->render_ring
.status_page
.page_addr
, 0, PAGE_SIZE
);
66 if (INTEL_INFO(dev
)->gen
>= 4)
67 dev_priv
->dma_status_page
|= (dev_priv
->dma_status_page
>> 28) &
70 I915_WRITE(HWS_PGA
, dev_priv
->dma_status_page
);
71 DRM_DEBUG_DRIVER("Enabled hardware status page\n");
76 * Frees the hardware status page, whether it's a physical address or a virtual
77 * address set up by the X Server.
79 static void i915_free_hws(struct drm_device
*dev
)
81 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
82 if (dev_priv
->status_page_dmah
) {
83 drm_pci_free(dev
, dev_priv
->status_page_dmah
);
84 dev_priv
->status_page_dmah
= NULL
;
87 if (dev_priv
->render_ring
.status_page
.gfx_addr
) {
88 dev_priv
->render_ring
.status_page
.gfx_addr
= 0;
89 drm_core_ioremapfree(&dev_priv
->hws_map
, dev
);
92 /* Need to rewrite hardware status page */
93 I915_WRITE(HWS_PGA
, 0x1ffff000);
96 void i915_kernel_lost_context(struct drm_device
* dev
)
98 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
99 struct drm_i915_master_private
*master_priv
;
100 struct intel_ring_buffer
*ring
= &dev_priv
->render_ring
;
103 * We should never lose context on the ring with modesetting
104 * as we don't expose it to userspace
106 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
109 ring
->head
= I915_READ(PRB0_HEAD
) & HEAD_ADDR
;
110 ring
->tail
= I915_READ(PRB0_TAIL
) & TAIL_ADDR
;
111 ring
->space
= ring
->head
- (ring
->tail
+ 8);
113 ring
->space
+= ring
->size
;
115 if (!dev
->primary
->master
)
118 master_priv
= dev
->primary
->master
->driver_priv
;
119 if (ring
->head
== ring
->tail
&& master_priv
->sarea_priv
)
120 master_priv
->sarea_priv
->perf_boxes
|= I915_BOX_RING_EMPTY
;
123 static int i915_dma_cleanup(struct drm_device
* dev
)
125 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
126 /* Make sure interrupts are disabled here because the uninstall ioctl
127 * may not have been called from userspace and after dev_private
128 * is freed, it's too late.
130 if (dev
->irq_enabled
)
131 drm_irq_uninstall(dev
);
133 mutex_lock(&dev
->struct_mutex
);
134 intel_cleanup_ring_buffer(&dev_priv
->render_ring
);
135 intel_cleanup_ring_buffer(&dev_priv
->bsd_ring
);
136 intel_cleanup_ring_buffer(&dev_priv
->blt_ring
);
137 mutex_unlock(&dev
->struct_mutex
);
139 /* Clear the HWS virtual address at teardown */
140 if (I915_NEED_GFX_HWS(dev
))
146 static int i915_initialize(struct drm_device
* dev
, drm_i915_init_t
* init
)
148 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
149 struct drm_i915_master_private
*master_priv
= dev
->primary
->master
->driver_priv
;
151 master_priv
->sarea
= drm_getsarea(dev
);
152 if (master_priv
->sarea
) {
153 master_priv
->sarea_priv
= (drm_i915_sarea_t
*)
154 ((u8
*)master_priv
->sarea
->handle
+ init
->sarea_priv_offset
);
156 DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n");
159 if (init
->ring_size
!= 0) {
160 if (dev_priv
->render_ring
.gem_object
!= NULL
) {
161 i915_dma_cleanup(dev
);
162 DRM_ERROR("Client tried to initialize ringbuffer in "
167 dev_priv
->render_ring
.size
= init
->ring_size
;
169 dev_priv
->render_ring
.map
.offset
= init
->ring_start
;
170 dev_priv
->render_ring
.map
.size
= init
->ring_size
;
171 dev_priv
->render_ring
.map
.type
= 0;
172 dev_priv
->render_ring
.map
.flags
= 0;
173 dev_priv
->render_ring
.map
.mtrr
= 0;
175 drm_core_ioremap_wc(&dev_priv
->render_ring
.map
, dev
);
177 if (dev_priv
->render_ring
.map
.handle
== NULL
) {
178 i915_dma_cleanup(dev
);
179 DRM_ERROR("can not ioremap virtual address for"
185 dev_priv
->render_ring
.virtual_start
= dev_priv
->render_ring
.map
.handle
;
187 dev_priv
->cpp
= init
->cpp
;
188 dev_priv
->back_offset
= init
->back_offset
;
189 dev_priv
->front_offset
= init
->front_offset
;
190 dev_priv
->current_page
= 0;
191 if (master_priv
->sarea_priv
)
192 master_priv
->sarea_priv
->pf_current_page
= 0;
194 /* Allow hardware batchbuffers unless told otherwise.
196 dev_priv
->allow_batchbuffer
= 1;
201 static int i915_dma_resume(struct drm_device
* dev
)
203 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
205 struct intel_ring_buffer
*ring
;
206 DRM_DEBUG_DRIVER("%s\n", __func__
);
208 ring
= &dev_priv
->render_ring
;
210 if (ring
->map
.handle
== NULL
) {
211 DRM_ERROR("can not ioremap virtual address for"
216 /* Program Hardware Status Page */
217 if (!ring
->status_page
.page_addr
) {
218 DRM_ERROR("Can not find hardware status page\n");
221 DRM_DEBUG_DRIVER("hw status page @ %p\n",
222 ring
->status_page
.page_addr
);
223 if (ring
->status_page
.gfx_addr
!= 0)
224 intel_ring_setup_status_page(ring
);
226 I915_WRITE(HWS_PGA
, dev_priv
->dma_status_page
);
228 DRM_DEBUG_DRIVER("Enabled hardware status page\n");
233 static int i915_dma_init(struct drm_device
*dev
, void *data
,
234 struct drm_file
*file_priv
)
236 drm_i915_init_t
*init
= data
;
239 switch (init
->func
) {
241 retcode
= i915_initialize(dev
, init
);
243 case I915_CLEANUP_DMA
:
244 retcode
= i915_dma_cleanup(dev
);
246 case I915_RESUME_DMA
:
247 retcode
= i915_dma_resume(dev
);
257 /* Implement basically the same security restrictions as hardware does
258 * for MI_BATCH_NON_SECURE. These can be made stricter at any time.
260 * Most of the calculations below involve calculating the size of a
261 * particular instruction. It's important to get the size right as
262 * that tells us where the next instruction to check is. Any illegal
263 * instruction detected will be given a size of zero, which is a
264 * signal to abort the rest of the buffer.
266 static int validate_cmd(int cmd
)
268 switch (((cmd
>> 29) & 0x7)) {
270 switch ((cmd
>> 23) & 0x3f) {
272 return 1; /* MI_NOOP */
274 return 1; /* MI_FLUSH */
276 return 0; /* disallow everything else */
280 return 0; /* reserved */
282 return (cmd
& 0xff) + 2; /* 2d commands */
284 if (((cmd
>> 24) & 0x1f) <= 0x18)
287 switch ((cmd
>> 24) & 0x1f) {
291 switch ((cmd
>> 16) & 0xff) {
293 return (cmd
& 0x1f) + 2;
295 return (cmd
& 0xf) + 2;
297 return (cmd
& 0xffff) + 2;
301 return (cmd
& 0xffff) + 1;
305 if ((cmd
& (1 << 23)) == 0) /* inline vertices */
306 return (cmd
& 0x1ffff) + 2;
307 else if (cmd
& (1 << 17)) /* indirect random */
308 if ((cmd
& 0xffff) == 0)
309 return 0; /* unknown length, too hard */
311 return (((cmd
& 0xffff) + 1) / 2) + 1;
313 return 2; /* indirect sequential */
324 static int i915_emit_cmds(struct drm_device
* dev
, int *buffer
, int dwords
)
326 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
329 if ((dwords
+1) * sizeof(int) >= dev_priv
->render_ring
.size
- 8)
332 for (i
= 0; i
< dwords
;) {
333 int sz
= validate_cmd(buffer
[i
]);
334 if (sz
== 0 || i
+ sz
> dwords
)
339 ret
= BEGIN_LP_RING((dwords
+1)&~1);
343 for (i
= 0; i
< dwords
; i
++)
354 i915_emit_box(struct drm_device
*dev
,
355 struct drm_clip_rect
*boxes
,
356 int i
, int DR1
, int DR4
)
358 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
359 struct drm_clip_rect box
= boxes
[i
];
362 if (box
.y2
<= box
.y1
|| box
.x2
<= box
.x1
|| box
.y2
<= 0 || box
.x2
<= 0) {
363 DRM_ERROR("Bad box %d,%d..%d,%d\n",
364 box
.x1
, box
.y1
, box
.x2
, box
.y2
);
368 if (INTEL_INFO(dev
)->gen
>= 4) {
369 ret
= BEGIN_LP_RING(4);
373 OUT_RING(GFX_OP_DRAWRECT_INFO_I965
);
374 OUT_RING((box
.x1
& 0xffff) | (box
.y1
<< 16));
375 OUT_RING(((box
.x2
- 1) & 0xffff) | ((box
.y2
- 1) << 16));
378 ret
= BEGIN_LP_RING(6);
382 OUT_RING(GFX_OP_DRAWRECT_INFO
);
384 OUT_RING((box
.x1
& 0xffff) | (box
.y1
<< 16));
385 OUT_RING(((box
.x2
- 1) & 0xffff) | ((box
.y2
- 1) << 16));
394 /* XXX: Emitting the counter should really be moved to part of the IRQ
395 * emit. For now, do it in both places:
398 static void i915_emit_breadcrumb(struct drm_device
*dev
)
400 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
401 struct drm_i915_master_private
*master_priv
= dev
->primary
->master
->driver_priv
;
404 if (dev_priv
->counter
> 0x7FFFFFFFUL
)
405 dev_priv
->counter
= 0;
406 if (master_priv
->sarea_priv
)
407 master_priv
->sarea_priv
->last_enqueue
= dev_priv
->counter
;
409 if (BEGIN_LP_RING(4) == 0) {
410 OUT_RING(MI_STORE_DWORD_INDEX
);
411 OUT_RING(I915_BREADCRUMB_INDEX
<< MI_STORE_DWORD_INDEX_SHIFT
);
412 OUT_RING(dev_priv
->counter
);
418 static int i915_dispatch_cmdbuffer(struct drm_device
* dev
,
419 drm_i915_cmdbuffer_t
*cmd
,
420 struct drm_clip_rect
*cliprects
,
423 int nbox
= cmd
->num_cliprects
;
424 int i
= 0, count
, ret
;
427 DRM_ERROR("alignment");
431 i915_kernel_lost_context(dev
);
433 count
= nbox
? nbox
: 1;
435 for (i
= 0; i
< count
; i
++) {
437 ret
= i915_emit_box(dev
, cliprects
, i
,
443 ret
= i915_emit_cmds(dev
, cmdbuf
, cmd
->sz
/ 4);
448 i915_emit_breadcrumb(dev
);
452 static int i915_dispatch_batchbuffer(struct drm_device
* dev
,
453 drm_i915_batchbuffer_t
* batch
,
454 struct drm_clip_rect
*cliprects
)
456 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
457 int nbox
= batch
->num_cliprects
;
460 if ((batch
->start
| batch
->used
) & 0x7) {
461 DRM_ERROR("alignment");
465 i915_kernel_lost_context(dev
);
467 count
= nbox
? nbox
: 1;
468 for (i
= 0; i
< count
; i
++) {
470 ret
= i915_emit_box(dev
, cliprects
, i
,
471 batch
->DR1
, batch
->DR4
);
476 if (!IS_I830(dev
) && !IS_845G(dev
)) {
477 ret
= BEGIN_LP_RING(2);
481 if (INTEL_INFO(dev
)->gen
>= 4) {
482 OUT_RING(MI_BATCH_BUFFER_START
| (2 << 6) | MI_BATCH_NON_SECURE_I965
);
483 OUT_RING(batch
->start
);
485 OUT_RING(MI_BATCH_BUFFER_START
| (2 << 6));
486 OUT_RING(batch
->start
| MI_BATCH_NON_SECURE
);
489 ret
= BEGIN_LP_RING(4);
493 OUT_RING(MI_BATCH_BUFFER
);
494 OUT_RING(batch
->start
| MI_BATCH_NON_SECURE
);
495 OUT_RING(batch
->start
+ batch
->used
- 4);
502 if (IS_G4X(dev
) || IS_GEN5(dev
)) {
503 if (BEGIN_LP_RING(2) == 0) {
504 OUT_RING(MI_FLUSH
| MI_NO_WRITE_FLUSH
| MI_INVALIDATE_ISP
);
510 i915_emit_breadcrumb(dev
);
514 static int i915_dispatch_flip(struct drm_device
* dev
)
516 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
517 struct drm_i915_master_private
*master_priv
=
518 dev
->primary
->master
->driver_priv
;
521 if (!master_priv
->sarea_priv
)
524 DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n",
526 dev_priv
->current_page
,
527 master_priv
->sarea_priv
->pf_current_page
);
529 i915_kernel_lost_context(dev
);
531 ret
= BEGIN_LP_RING(10);
535 OUT_RING(MI_FLUSH
| MI_READ_FLUSH
);
538 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO
| ASYNC_FLIP
);
540 if (dev_priv
->current_page
== 0) {
541 OUT_RING(dev_priv
->back_offset
);
542 dev_priv
->current_page
= 1;
544 OUT_RING(dev_priv
->front_offset
);
545 dev_priv
->current_page
= 0;
549 OUT_RING(MI_WAIT_FOR_EVENT
| MI_WAIT_FOR_PLANE_A_FLIP
);
554 master_priv
->sarea_priv
->last_enqueue
= dev_priv
->counter
++;
556 if (BEGIN_LP_RING(4) == 0) {
557 OUT_RING(MI_STORE_DWORD_INDEX
);
558 OUT_RING(I915_BREADCRUMB_INDEX
<< MI_STORE_DWORD_INDEX_SHIFT
);
559 OUT_RING(dev_priv
->counter
);
564 master_priv
->sarea_priv
->pf_current_page
= dev_priv
->current_page
;
568 static int i915_quiescent(struct drm_device
* dev
)
570 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
572 i915_kernel_lost_context(dev
);
573 return intel_wait_ring_buffer(&dev_priv
->render_ring
,
574 dev_priv
->render_ring
.size
- 8);
577 static int i915_flush_ioctl(struct drm_device
*dev
, void *data
,
578 struct drm_file
*file_priv
)
582 RING_LOCK_TEST_WITH_RETURN(dev
, file_priv
);
584 mutex_lock(&dev
->struct_mutex
);
585 ret
= i915_quiescent(dev
);
586 mutex_unlock(&dev
->struct_mutex
);
591 static int i915_batchbuffer(struct drm_device
*dev
, void *data
,
592 struct drm_file
*file_priv
)
594 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
595 struct drm_i915_master_private
*master_priv
= dev
->primary
->master
->driver_priv
;
596 drm_i915_sarea_t
*sarea_priv
= (drm_i915_sarea_t
*)
597 master_priv
->sarea_priv
;
598 drm_i915_batchbuffer_t
*batch
= data
;
600 struct drm_clip_rect
*cliprects
= NULL
;
602 if (!dev_priv
->allow_batchbuffer
) {
603 DRM_ERROR("Batchbuffer ioctl disabled\n");
607 DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n",
608 batch
->start
, batch
->used
, batch
->num_cliprects
);
610 RING_LOCK_TEST_WITH_RETURN(dev
, file_priv
);
612 if (batch
->num_cliprects
< 0)
615 if (batch
->num_cliprects
) {
616 cliprects
= kcalloc(batch
->num_cliprects
,
617 sizeof(struct drm_clip_rect
),
619 if (cliprects
== NULL
)
622 ret
= copy_from_user(cliprects
, batch
->cliprects
,
623 batch
->num_cliprects
*
624 sizeof(struct drm_clip_rect
));
631 mutex_lock(&dev
->struct_mutex
);
632 ret
= i915_dispatch_batchbuffer(dev
, batch
, cliprects
);
633 mutex_unlock(&dev
->struct_mutex
);
636 sarea_priv
->last_dispatch
= READ_BREADCRUMB(dev_priv
);
644 static int i915_cmdbuffer(struct drm_device
*dev
, void *data
,
645 struct drm_file
*file_priv
)
647 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
648 struct drm_i915_master_private
*master_priv
= dev
->primary
->master
->driver_priv
;
649 drm_i915_sarea_t
*sarea_priv
= (drm_i915_sarea_t
*)
650 master_priv
->sarea_priv
;
651 drm_i915_cmdbuffer_t
*cmdbuf
= data
;
652 struct drm_clip_rect
*cliprects
= NULL
;
656 DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
657 cmdbuf
->buf
, cmdbuf
->sz
, cmdbuf
->num_cliprects
);
659 RING_LOCK_TEST_WITH_RETURN(dev
, file_priv
);
661 if (cmdbuf
->num_cliprects
< 0)
664 batch_data
= kmalloc(cmdbuf
->sz
, GFP_KERNEL
);
665 if (batch_data
== NULL
)
668 ret
= copy_from_user(batch_data
, cmdbuf
->buf
, cmdbuf
->sz
);
671 goto fail_batch_free
;
674 if (cmdbuf
->num_cliprects
) {
675 cliprects
= kcalloc(cmdbuf
->num_cliprects
,
676 sizeof(struct drm_clip_rect
), GFP_KERNEL
);
677 if (cliprects
== NULL
) {
679 goto fail_batch_free
;
682 ret
= copy_from_user(cliprects
, cmdbuf
->cliprects
,
683 cmdbuf
->num_cliprects
*
684 sizeof(struct drm_clip_rect
));
691 mutex_lock(&dev
->struct_mutex
);
692 ret
= i915_dispatch_cmdbuffer(dev
, cmdbuf
, cliprects
, batch_data
);
693 mutex_unlock(&dev
->struct_mutex
);
695 DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
700 sarea_priv
->last_dispatch
= READ_BREADCRUMB(dev_priv
);
710 static int i915_flip_bufs(struct drm_device
*dev
, void *data
,
711 struct drm_file
*file_priv
)
715 DRM_DEBUG_DRIVER("%s\n", __func__
);
717 RING_LOCK_TEST_WITH_RETURN(dev
, file_priv
);
719 mutex_lock(&dev
->struct_mutex
);
720 ret
= i915_dispatch_flip(dev
);
721 mutex_unlock(&dev
->struct_mutex
);
726 static int i915_getparam(struct drm_device
*dev
, void *data
,
727 struct drm_file
*file_priv
)
729 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
730 drm_i915_getparam_t
*param
= data
;
734 DRM_ERROR("called with no initialization\n");
738 switch (param
->param
) {
739 case I915_PARAM_IRQ_ACTIVE
:
740 value
= dev
->pdev
->irq
? 1 : 0;
742 case I915_PARAM_ALLOW_BATCHBUFFER
:
743 value
= dev_priv
->allow_batchbuffer
? 1 : 0;
745 case I915_PARAM_LAST_DISPATCH
:
746 value
= READ_BREADCRUMB(dev_priv
);
748 case I915_PARAM_CHIPSET_ID
:
749 value
= dev
->pci_device
;
751 case I915_PARAM_HAS_GEM
:
752 value
= dev_priv
->has_gem
;
754 case I915_PARAM_NUM_FENCES_AVAIL
:
755 value
= dev_priv
->num_fence_regs
- dev_priv
->fence_reg_start
;
757 case I915_PARAM_HAS_OVERLAY
:
758 value
= dev_priv
->overlay
? 1 : 0;
760 case I915_PARAM_HAS_PAGEFLIPPING
:
763 case I915_PARAM_HAS_EXECBUF2
:
765 value
= dev_priv
->has_gem
;
767 case I915_PARAM_HAS_BSD
:
768 value
= HAS_BSD(dev
);
770 case I915_PARAM_HAS_BLT
:
771 value
= HAS_BLT(dev
);
773 case I915_PARAM_HAS_RELAXED_FENCING
:
777 DRM_DEBUG_DRIVER("Unknown parameter %d\n",
782 if (DRM_COPY_TO_USER(param
->value
, &value
, sizeof(int))) {
783 DRM_ERROR("DRM_COPY_TO_USER failed\n");
790 static int i915_setparam(struct drm_device
*dev
, void *data
,
791 struct drm_file
*file_priv
)
793 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
794 drm_i915_setparam_t
*param
= data
;
797 DRM_ERROR("called with no initialization\n");
801 switch (param
->param
) {
802 case I915_SETPARAM_USE_MI_BATCHBUFFER_START
:
804 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY
:
805 dev_priv
->tex_lru_log_granularity
= param
->value
;
807 case I915_SETPARAM_ALLOW_BATCHBUFFER
:
808 dev_priv
->allow_batchbuffer
= param
->value
;
810 case I915_SETPARAM_NUM_USED_FENCES
:
811 if (param
->value
> dev_priv
->num_fence_regs
||
814 /* Userspace can use first N regs */
815 dev_priv
->fence_reg_start
= param
->value
;
818 DRM_DEBUG_DRIVER("unknown parameter %d\n",
826 static int i915_set_status_page(struct drm_device
*dev
, void *data
,
827 struct drm_file
*file_priv
)
829 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
830 drm_i915_hws_addr_t
*hws
= data
;
831 struct intel_ring_buffer
*ring
= &dev_priv
->render_ring
;
833 if (!I915_NEED_GFX_HWS(dev
))
837 DRM_ERROR("called with no initialization\n");
841 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
842 WARN(1, "tried to set status page when mode setting active\n");
846 DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32
)hws
->addr
);
848 ring
->status_page
.gfx_addr
= hws
->addr
& (0x1ffff<<12);
850 dev_priv
->hws_map
.offset
= dev
->agp
->base
+ hws
->addr
;
851 dev_priv
->hws_map
.size
= 4*1024;
852 dev_priv
->hws_map
.type
= 0;
853 dev_priv
->hws_map
.flags
= 0;
854 dev_priv
->hws_map
.mtrr
= 0;
856 drm_core_ioremap_wc(&dev_priv
->hws_map
, dev
);
857 if (dev_priv
->hws_map
.handle
== NULL
) {
858 i915_dma_cleanup(dev
);
859 ring
->status_page
.gfx_addr
= 0;
860 DRM_ERROR("can not ioremap virtual address for"
861 " G33 hw status page\n");
864 ring
->status_page
.page_addr
= dev_priv
->hws_map
.handle
;
865 memset(ring
->status_page
.page_addr
, 0, PAGE_SIZE
);
866 I915_WRITE(HWS_PGA
, ring
->status_page
.gfx_addr
);
868 DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
869 ring
->status_page
.gfx_addr
);
870 DRM_DEBUG_DRIVER("load hws at %p\n",
871 ring
->status_page
.page_addr
);
875 static int i915_get_bridge_dev(struct drm_device
*dev
)
877 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
879 dev_priv
->bridge_dev
= pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
880 if (!dev_priv
->bridge_dev
) {
881 DRM_ERROR("bridge device not found\n");
887 #define MCHBAR_I915 0x44
888 #define MCHBAR_I965 0x48
889 #define MCHBAR_SIZE (4*4096)
891 #define DEVEN_REG 0x54
892 #define DEVEN_MCHBAR_EN (1 << 28)
894 /* Allocate space for the MCH regs if needed, return nonzero on error */
896 intel_alloc_mchbar_resource(struct drm_device
*dev
)
898 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
899 int reg
= INTEL_INFO(dev
)->gen
>= 4 ? MCHBAR_I965
: MCHBAR_I915
;
900 u32 temp_lo
, temp_hi
= 0;
904 if (INTEL_INFO(dev
)->gen
>= 4)
905 pci_read_config_dword(dev_priv
->bridge_dev
, reg
+ 4, &temp_hi
);
906 pci_read_config_dword(dev_priv
->bridge_dev
, reg
, &temp_lo
);
907 mchbar_addr
= ((u64
)temp_hi
<< 32) | temp_lo
;
909 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
912 pnp_range_reserved(mchbar_addr
, mchbar_addr
+ MCHBAR_SIZE
))
916 /* Get some space for it */
917 dev_priv
->mch_res
.name
= "i915 MCHBAR";
918 dev_priv
->mch_res
.flags
= IORESOURCE_MEM
;
919 ret
= pci_bus_alloc_resource(dev_priv
->bridge_dev
->bus
,
921 MCHBAR_SIZE
, MCHBAR_SIZE
,
923 0, pcibios_align_resource
,
924 dev_priv
->bridge_dev
);
926 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret
);
927 dev_priv
->mch_res
.start
= 0;
931 if (INTEL_INFO(dev
)->gen
>= 4)
932 pci_write_config_dword(dev_priv
->bridge_dev
, reg
+ 4,
933 upper_32_bits(dev_priv
->mch_res
.start
));
935 pci_write_config_dword(dev_priv
->bridge_dev
, reg
,
936 lower_32_bits(dev_priv
->mch_res
.start
));
940 /* Setup MCHBAR if possible, return true if we should disable it again */
942 intel_setup_mchbar(struct drm_device
*dev
)
944 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
945 int mchbar_reg
= INTEL_INFO(dev
)->gen
>= 4 ? MCHBAR_I965
: MCHBAR_I915
;
949 dev_priv
->mchbar_need_disable
= false;
951 if (IS_I915G(dev
) || IS_I915GM(dev
)) {
952 pci_read_config_dword(dev_priv
->bridge_dev
, DEVEN_REG
, &temp
);
953 enabled
= !!(temp
& DEVEN_MCHBAR_EN
);
955 pci_read_config_dword(dev_priv
->bridge_dev
, mchbar_reg
, &temp
);
959 /* If it's already enabled, don't have to do anything */
963 if (intel_alloc_mchbar_resource(dev
))
966 dev_priv
->mchbar_need_disable
= true;
968 /* Space is allocated or reserved, so enable it. */
969 if (IS_I915G(dev
) || IS_I915GM(dev
)) {
970 pci_write_config_dword(dev_priv
->bridge_dev
, DEVEN_REG
,
971 temp
| DEVEN_MCHBAR_EN
);
973 pci_read_config_dword(dev_priv
->bridge_dev
, mchbar_reg
, &temp
);
974 pci_write_config_dword(dev_priv
->bridge_dev
, mchbar_reg
, temp
| 1);
979 intel_teardown_mchbar(struct drm_device
*dev
)
981 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
982 int mchbar_reg
= INTEL_INFO(dev
)->gen
>= 4 ? MCHBAR_I965
: MCHBAR_I915
;
985 if (dev_priv
->mchbar_need_disable
) {
986 if (IS_I915G(dev
) || IS_I915GM(dev
)) {
987 pci_read_config_dword(dev_priv
->bridge_dev
, DEVEN_REG
, &temp
);
988 temp
&= ~DEVEN_MCHBAR_EN
;
989 pci_write_config_dword(dev_priv
->bridge_dev
, DEVEN_REG
, temp
);
991 pci_read_config_dword(dev_priv
->bridge_dev
, mchbar_reg
, &temp
);
993 pci_write_config_dword(dev_priv
->bridge_dev
, mchbar_reg
, temp
);
997 if (dev_priv
->mch_res
.start
)
998 release_resource(&dev_priv
->mch_res
);
1001 #define PTE_ADDRESS_MASK 0xfffff000
1002 #define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */
1003 #define PTE_MAPPING_TYPE_UNCACHED (0 << 1)
1004 #define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */
1005 #define PTE_MAPPING_TYPE_CACHED (3 << 1)
1006 #define PTE_MAPPING_TYPE_MASK (3 << 1)
1007 #define PTE_VALID (1 << 0)
1010 * i915_gtt_to_phys - take a GTT address and turn it into a physical one
1012 * @gtt_addr: address to translate
1014 * Some chip functions require allocations from stolen space but need the
1015 * physical address of the memory in question. We use this routine
1016 * to get a physical address suitable for register programming from a given
1019 static unsigned long i915_gtt_to_phys(struct drm_device
*dev
,
1020 unsigned long gtt_addr
)
1023 unsigned long entry
, phys
;
1024 int gtt_bar
= IS_GEN2(dev
) ? 1 : 0;
1025 int gtt_offset
, gtt_size
;
1027 if (INTEL_INFO(dev
)->gen
>= 4) {
1028 if (IS_G4X(dev
) || INTEL_INFO(dev
)->gen
> 4) {
1029 gtt_offset
= 2*1024*1024;
1030 gtt_size
= 2*1024*1024;
1032 gtt_offset
= 512*1024;
1033 gtt_size
= 512*1024;
1038 gtt_size
= pci_resource_len(dev
->pdev
, gtt_bar
);
1041 gtt
= ioremap_wc(pci_resource_start(dev
->pdev
, gtt_bar
) + gtt_offset
,
1044 DRM_ERROR("ioremap of GTT failed\n");
1048 entry
= *(volatile u32
*)(gtt
+ (gtt_addr
/ 1024));
1050 DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr
, entry
);
1052 /* Mask out these reserved bits on this hardware. */
1053 if (INTEL_INFO(dev
)->gen
< 4 && !IS_G33(dev
))
1054 entry
&= ~PTE_ADDRESS_MASK_HIGH
;
1056 /* If it's not a mapping type we know, then bail. */
1057 if ((entry
& PTE_MAPPING_TYPE_MASK
) != PTE_MAPPING_TYPE_UNCACHED
&&
1058 (entry
& PTE_MAPPING_TYPE_MASK
) != PTE_MAPPING_TYPE_CACHED
) {
1063 if (!(entry
& PTE_VALID
)) {
1064 DRM_ERROR("bad GTT entry in stolen space\n");
1071 phys
=(entry
& PTE_ADDRESS_MASK
) |
1072 ((uint64_t)(entry
& PTE_ADDRESS_MASK_HIGH
) << (32 - 4));
1074 DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, phys addr: 0x%08lx\n", gtt_addr
, phys
);
1079 static void i915_warn_stolen(struct drm_device
*dev
)
1081 DRM_ERROR("not enough stolen space for compressed buffer, disabling\n");
1082 DRM_ERROR("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n");
1085 static void i915_setup_compression(struct drm_device
*dev
, int size
)
1087 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1088 struct drm_mm_node
*compressed_fb
, *uninitialized_var(compressed_llb
);
1089 unsigned long cfb_base
;
1090 unsigned long ll_base
= 0;
1092 /* Leave 1M for line length buffer & misc. */
1093 compressed_fb
= drm_mm_search_free(&dev_priv
->mm
.vram
, size
, 4096, 0);
1094 if (!compressed_fb
) {
1095 dev_priv
->no_fbc_reason
= FBC_STOLEN_TOO_SMALL
;
1096 i915_warn_stolen(dev
);
1100 compressed_fb
= drm_mm_get_block(compressed_fb
, size
, 4096);
1101 if (!compressed_fb
) {
1102 i915_warn_stolen(dev
);
1103 dev_priv
->no_fbc_reason
= FBC_STOLEN_TOO_SMALL
;
1107 cfb_base
= i915_gtt_to_phys(dev
, compressed_fb
->start
);
1109 DRM_ERROR("failed to get stolen phys addr, disabling FBC\n");
1110 drm_mm_put_block(compressed_fb
);
1113 if (!(IS_GM45(dev
) || IS_IRONLAKE_M(dev
))) {
1114 compressed_llb
= drm_mm_search_free(&dev_priv
->mm
.vram
, 4096,
1116 if (!compressed_llb
) {
1117 i915_warn_stolen(dev
);
1121 compressed_llb
= drm_mm_get_block(compressed_llb
, 4096, 4096);
1122 if (!compressed_llb
) {
1123 i915_warn_stolen(dev
);
1127 ll_base
= i915_gtt_to_phys(dev
, compressed_llb
->start
);
1129 DRM_ERROR("failed to get stolen phys addr, disabling FBC\n");
1130 drm_mm_put_block(compressed_fb
);
1131 drm_mm_put_block(compressed_llb
);
1135 dev_priv
->cfb_size
= size
;
1137 intel_disable_fbc(dev
);
1138 dev_priv
->compressed_fb
= compressed_fb
;
1139 if (IS_IRONLAKE_M(dev
))
1140 I915_WRITE(ILK_DPFC_CB_BASE
, compressed_fb
->start
);
1141 else if (IS_GM45(dev
)) {
1142 I915_WRITE(DPFC_CB_BASE
, compressed_fb
->start
);
1144 I915_WRITE(FBC_CFB_BASE
, cfb_base
);
1145 I915_WRITE(FBC_LL_BASE
, ll_base
);
1146 dev_priv
->compressed_llb
= compressed_llb
;
1149 DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base
,
1150 ll_base
, size
>> 20);
1153 static void i915_cleanup_compression(struct drm_device
*dev
)
1155 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1157 drm_mm_put_block(dev_priv
->compressed_fb
);
1158 if (dev_priv
->compressed_llb
)
1159 drm_mm_put_block(dev_priv
->compressed_llb
);
1162 /* true = enable decode, false = disable decoder */
1163 static unsigned int i915_vga_set_decode(void *cookie
, bool state
)
1165 struct drm_device
*dev
= cookie
;
1167 intel_modeset_vga_set_state(dev
, state
);
1169 return VGA_RSRC_LEGACY_IO
| VGA_RSRC_LEGACY_MEM
|
1170 VGA_RSRC_NORMAL_IO
| VGA_RSRC_NORMAL_MEM
;
1172 return VGA_RSRC_NORMAL_IO
| VGA_RSRC_NORMAL_MEM
;
1175 static void i915_switcheroo_set_state(struct pci_dev
*pdev
, enum vga_switcheroo_state state
)
1177 struct drm_device
*dev
= pci_get_drvdata(pdev
);
1178 pm_message_t pmm
= { .event
= PM_EVENT_SUSPEND
};
1179 if (state
== VGA_SWITCHEROO_ON
) {
1180 printk(KERN_INFO
"i915: switched on\n");
1181 /* i915 resume handler doesn't set to D0 */
1182 pci_set_power_state(dev
->pdev
, PCI_D0
);
1185 printk(KERN_ERR
"i915: switched off\n");
1186 i915_suspend(dev
, pmm
);
1190 static bool i915_switcheroo_can_switch(struct pci_dev
*pdev
)
1192 struct drm_device
*dev
= pci_get_drvdata(pdev
);
1195 spin_lock(&dev
->count_lock
);
1196 can_switch
= (dev
->open_count
== 0);
1197 spin_unlock(&dev
->count_lock
);
1201 static int i915_load_modeset_init(struct drm_device
*dev
)
1203 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1204 unsigned long prealloc_size
, gtt_size
, mappable_size
;
1207 prealloc_size
= dev_priv
->mm
.gtt
->gtt_stolen_entries
<< PAGE_SHIFT
;
1208 gtt_size
= dev_priv
->mm
.gtt
->gtt_total_entries
<< PAGE_SHIFT
;
1209 mappable_size
= dev_priv
->mm
.gtt
->gtt_mappable_entries
<< PAGE_SHIFT
;
1210 gtt_size
-= PAGE_SIZE
;
1212 /* Basic memrange allocator for stolen space (aka mm.vram) */
1213 drm_mm_init(&dev_priv
->mm
.vram
, 0, prealloc_size
);
1215 /* Let GEM Manage from end of prealloc space to end of aperture.
1217 * However, leave one page at the end still bound to the scratch page.
1218 * There are a number of places where the hardware apparently
1219 * prefetches past the end of the object, and we've seen multiple
1220 * hangs with the GPU head pointer stuck in a batchbuffer bound
1221 * at the last page of the aperture. One page should be enough to
1222 * keep any prefetching inside of the aperture.
1224 i915_gem_do_init(dev
, prealloc_size
, mappable_size
, gtt_size
);
1226 mutex_lock(&dev
->struct_mutex
);
1227 ret
= i915_gem_init_ringbuffer(dev
);
1228 mutex_unlock(&dev
->struct_mutex
);
1232 /* Try to set up FBC with a reasonable compressed buffer size */
1233 if (I915_HAS_FBC(dev
) && i915_powersave
) {
1236 /* Try to get an 8M buffer... */
1237 if (prealloc_size
> (9*1024*1024))
1238 cfb_size
= 8*1024*1024;
1239 else /* fall back to 7/8 of the stolen space */
1240 cfb_size
= prealloc_size
* 7 / 8;
1241 i915_setup_compression(dev
, cfb_size
);
1244 /* Allow hardware batchbuffers unless told otherwise.
1246 dev_priv
->allow_batchbuffer
= 1;
1248 ret
= intel_parse_bios(dev
);
1250 DRM_INFO("failed to find VBIOS tables\n");
1252 /* if we have > 1 VGA cards, then disable the radeon VGA resources */
1253 ret
= vga_client_register(dev
->pdev
, dev
, NULL
, i915_vga_set_decode
);
1255 goto cleanup_ringbuffer
;
1257 intel_register_dsm_handler();
1259 ret
= vga_switcheroo_register_client(dev
->pdev
,
1260 i915_switcheroo_set_state
,
1261 i915_switcheroo_can_switch
);
1263 goto cleanup_vga_client
;
1265 /* IIR "flip pending" bit means done if this bit is set */
1266 if (IS_GEN3(dev
) && (I915_READ(ECOSKPD
) & ECO_FLIP_DONE
))
1267 dev_priv
->flip_pending_is_done
= true;
1269 intel_modeset_init(dev
);
1271 ret
= drm_irq_install(dev
);
1273 goto cleanup_vga_switcheroo
;
1275 /* Always safe in the mode setting case. */
1276 /* FIXME: do pre/post-mode set stuff in core KMS code */
1277 dev
->vblank_disable_allowed
= 1;
1279 ret
= intel_fbdev_init(dev
);
1283 drm_kms_helper_poll_init(dev
);
1285 /* We're off and running w/KMS */
1286 dev_priv
->mm
.suspended
= 0;
1291 drm_irq_uninstall(dev
);
1292 cleanup_vga_switcheroo
:
1293 vga_switcheroo_unregister_client(dev
->pdev
);
1295 vga_client_register(dev
->pdev
, NULL
, NULL
, NULL
);
1297 mutex_lock(&dev
->struct_mutex
);
1298 i915_gem_cleanup_ringbuffer(dev
);
1299 mutex_unlock(&dev
->struct_mutex
);
1304 int i915_master_create(struct drm_device
*dev
, struct drm_master
*master
)
1306 struct drm_i915_master_private
*master_priv
;
1308 master_priv
= kzalloc(sizeof(*master_priv
), GFP_KERNEL
);
1312 master
->driver_priv
= master_priv
;
1316 void i915_master_destroy(struct drm_device
*dev
, struct drm_master
*master
)
1318 struct drm_i915_master_private
*master_priv
= master
->driver_priv
;
1325 master
->driver_priv
= NULL
;
1328 static void i915_pineview_get_mem_freq(struct drm_device
*dev
)
1330 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1333 tmp
= I915_READ(CLKCFG
);
1335 switch (tmp
& CLKCFG_FSB_MASK
) {
1336 case CLKCFG_FSB_533
:
1337 dev_priv
->fsb_freq
= 533; /* 133*4 */
1339 case CLKCFG_FSB_800
:
1340 dev_priv
->fsb_freq
= 800; /* 200*4 */
1342 case CLKCFG_FSB_667
:
1343 dev_priv
->fsb_freq
= 667; /* 167*4 */
1345 case CLKCFG_FSB_400
:
1346 dev_priv
->fsb_freq
= 400; /* 100*4 */
1350 switch (tmp
& CLKCFG_MEM_MASK
) {
1351 case CLKCFG_MEM_533
:
1352 dev_priv
->mem_freq
= 533;
1354 case CLKCFG_MEM_667
:
1355 dev_priv
->mem_freq
= 667;
1357 case CLKCFG_MEM_800
:
1358 dev_priv
->mem_freq
= 800;
1362 /* detect pineview DDR3 setting */
1363 tmp
= I915_READ(CSHRDDR3CTL
);
1364 dev_priv
->is_ddr3
= (tmp
& CSHRDDR3CTL_DDR3
) ? 1 : 0;
1367 static void i915_ironlake_get_mem_freq(struct drm_device
*dev
)
1369 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1372 ddrpll
= I915_READ16(DDRMPLL1
);
1373 csipll
= I915_READ16(CSIPLL0
);
1375 switch (ddrpll
& 0xff) {
1377 dev_priv
->mem_freq
= 800;
1380 dev_priv
->mem_freq
= 1066;
1383 dev_priv
->mem_freq
= 1333;
1386 dev_priv
->mem_freq
= 1600;
1389 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
1391 dev_priv
->mem_freq
= 0;
1395 dev_priv
->r_t
= dev_priv
->mem_freq
;
1397 switch (csipll
& 0x3ff) {
1399 dev_priv
->fsb_freq
= 3200;
1402 dev_priv
->fsb_freq
= 3733;
1405 dev_priv
->fsb_freq
= 4266;
1408 dev_priv
->fsb_freq
= 4800;
1411 dev_priv
->fsb_freq
= 5333;
1414 dev_priv
->fsb_freq
= 5866;
1417 dev_priv
->fsb_freq
= 6400;
1420 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
1422 dev_priv
->fsb_freq
= 0;
1426 if (dev_priv
->fsb_freq
== 3200) {
1428 } else if (dev_priv
->fsb_freq
> 3200 && dev_priv
->fsb_freq
<= 4800) {
1437 unsigned long vd
; /* in .1 mil */
1438 unsigned long vm
; /* in .1 mil */
1442 static struct v_table v_table
[] = {
1443 { 0, 16125, 15000, 0x7f, },
1444 { 1, 16000, 14875, 0x7e, },
1445 { 2, 15875, 14750, 0x7d, },
1446 { 3, 15750, 14625, 0x7c, },
1447 { 4, 15625, 14500, 0x7b, },
1448 { 5, 15500, 14375, 0x7a, },
1449 { 6, 15375, 14250, 0x79, },
1450 { 7, 15250, 14125, 0x78, },
1451 { 8, 15125, 14000, 0x77, },
1452 { 9, 15000, 13875, 0x76, },
1453 { 10, 14875, 13750, 0x75, },
1454 { 11, 14750, 13625, 0x74, },
1455 { 12, 14625, 13500, 0x73, },
1456 { 13, 14500, 13375, 0x72, },
1457 { 14, 14375, 13250, 0x71, },
1458 { 15, 14250, 13125, 0x70, },
1459 { 16, 14125, 13000, 0x6f, },
1460 { 17, 14000, 12875, 0x6e, },
1461 { 18, 13875, 12750, 0x6d, },
1462 { 19, 13750, 12625, 0x6c, },
1463 { 20, 13625, 12500, 0x6b, },
1464 { 21, 13500, 12375, 0x6a, },
1465 { 22, 13375, 12250, 0x69, },
1466 { 23, 13250, 12125, 0x68, },
1467 { 24, 13125, 12000, 0x67, },
1468 { 25, 13000, 11875, 0x66, },
1469 { 26, 12875, 11750, 0x65, },
1470 { 27, 12750, 11625, 0x64, },
1471 { 28, 12625, 11500, 0x63, },
1472 { 29, 12500, 11375, 0x62, },
1473 { 30, 12375, 11250, 0x61, },
1474 { 31, 12250, 11125, 0x60, },
1475 { 32, 12125, 11000, 0x5f, },
1476 { 33, 12000, 10875, 0x5e, },
1477 { 34, 11875, 10750, 0x5d, },
1478 { 35, 11750, 10625, 0x5c, },
1479 { 36, 11625, 10500, 0x5b, },
1480 { 37, 11500, 10375, 0x5a, },
1481 { 38, 11375, 10250, 0x59, },
1482 { 39, 11250, 10125, 0x58, },
1483 { 40, 11125, 10000, 0x57, },
1484 { 41, 11000, 9875, 0x56, },
1485 { 42, 10875, 9750, 0x55, },
1486 { 43, 10750, 9625, 0x54, },
1487 { 44, 10625, 9500, 0x53, },
1488 { 45, 10500, 9375, 0x52, },
1489 { 46, 10375, 9250, 0x51, },
1490 { 47, 10250, 9125, 0x50, },
1491 { 48, 10125, 9000, 0x4f, },
1492 { 49, 10000, 8875, 0x4e, },
1493 { 50, 9875, 8750, 0x4d, },
1494 { 51, 9750, 8625, 0x4c, },
1495 { 52, 9625, 8500, 0x4b, },
1496 { 53, 9500, 8375, 0x4a, },
1497 { 54, 9375, 8250, 0x49, },
1498 { 55, 9250, 8125, 0x48, },
1499 { 56, 9125, 8000, 0x47, },
1500 { 57, 9000, 7875, 0x46, },
1501 { 58, 8875, 7750, 0x45, },
1502 { 59, 8750, 7625, 0x44, },
1503 { 60, 8625, 7500, 0x43, },
1504 { 61, 8500, 7375, 0x42, },
1505 { 62, 8375, 7250, 0x41, },
1506 { 63, 8250, 7125, 0x40, },
1507 { 64, 8125, 7000, 0x3f, },
1508 { 65, 8000, 6875, 0x3e, },
1509 { 66, 7875, 6750, 0x3d, },
1510 { 67, 7750, 6625, 0x3c, },
1511 { 68, 7625, 6500, 0x3b, },
1512 { 69, 7500, 6375, 0x3a, },
1513 { 70, 7375, 6250, 0x39, },
1514 { 71, 7250, 6125, 0x38, },
1515 { 72, 7125, 6000, 0x37, },
1516 { 73, 7000, 5875, 0x36, },
1517 { 74, 6875, 5750, 0x35, },
1518 { 75, 6750, 5625, 0x34, },
1519 { 76, 6625, 5500, 0x33, },
1520 { 77, 6500, 5375, 0x32, },
1521 { 78, 6375, 5250, 0x31, },
1522 { 79, 6250, 5125, 0x30, },
1523 { 80, 6125, 5000, 0x2f, },
1524 { 81, 6000, 4875, 0x2e, },
1525 { 82, 5875, 4750, 0x2d, },
1526 { 83, 5750, 4625, 0x2c, },
1527 { 84, 5625, 4500, 0x2b, },
1528 { 85, 5500, 4375, 0x2a, },
1529 { 86, 5375, 4250, 0x29, },
1530 { 87, 5250, 4125, 0x28, },
1531 { 88, 5125, 4000, 0x27, },
1532 { 89, 5000, 3875, 0x26, },
1533 { 90, 4875, 3750, 0x25, },
1534 { 91, 4750, 3625, 0x24, },
1535 { 92, 4625, 3500, 0x23, },
1536 { 93, 4500, 3375, 0x22, },
1537 { 94, 4375, 3250, 0x21, },
1538 { 95, 4250, 3125, 0x20, },
1539 { 96, 4125, 3000, 0x1f, },
1540 { 97, 4125, 3000, 0x1e, },
1541 { 98, 4125, 3000, 0x1d, },
1542 { 99, 4125, 3000, 0x1c, },
1543 { 100, 4125, 3000, 0x1b, },
1544 { 101, 4125, 3000, 0x1a, },
1545 { 102, 4125, 3000, 0x19, },
1546 { 103, 4125, 3000, 0x18, },
1547 { 104, 4125, 3000, 0x17, },
1548 { 105, 4125, 3000, 0x16, },
1549 { 106, 4125, 3000, 0x15, },
1550 { 107, 4125, 3000, 0x14, },
1551 { 108, 4125, 3000, 0x13, },
1552 { 109, 4125, 3000, 0x12, },
1553 { 110, 4125, 3000, 0x11, },
1554 { 111, 4125, 3000, 0x10, },
1555 { 112, 4125, 3000, 0x0f, },
1556 { 113, 4125, 3000, 0x0e, },
1557 { 114, 4125, 3000, 0x0d, },
1558 { 115, 4125, 3000, 0x0c, },
1559 { 116, 4125, 3000, 0x0b, },
1560 { 117, 4125, 3000, 0x0a, },
1561 { 118, 4125, 3000, 0x09, },
1562 { 119, 4125, 3000, 0x08, },
1563 { 120, 1125, 0, 0x07, },
1564 { 121, 1000, 0, 0x06, },
1565 { 122, 875, 0, 0x05, },
1566 { 123, 750, 0, 0x04, },
1567 { 124, 625, 0, 0x03, },
1568 { 125, 500, 0, 0x02, },
1569 { 126, 375, 0, 0x01, },
1570 { 127, 0, 0, 0x00, },
1580 static struct cparams cparams
[] = {
1581 { 1, 1333, 301, 28664 },
1582 { 1, 1066, 294, 24460 },
1583 { 1, 800, 294, 25192 },
1584 { 0, 1333, 276, 27605 },
1585 { 0, 1066, 276, 27605 },
1586 { 0, 800, 231, 23784 },
1589 unsigned long i915_chipset_val(struct drm_i915_private
*dev_priv
)
1591 u64 total_count
, diff
, ret
;
1592 u32 count1
, count2
, count3
, m
= 0, c
= 0;
1593 unsigned long now
= jiffies_to_msecs(jiffies
), diff1
;
1596 diff1
= now
- dev_priv
->last_time1
;
1598 count1
= I915_READ(DMIEC
);
1599 count2
= I915_READ(DDREC
);
1600 count3
= I915_READ(CSIEC
);
1602 total_count
= count1
+ count2
+ count3
;
1604 /* FIXME: handle per-counter overflow */
1605 if (total_count
< dev_priv
->last_count1
) {
1606 diff
= ~0UL - dev_priv
->last_count1
;
1607 diff
+= total_count
;
1609 diff
= total_count
- dev_priv
->last_count1
;
1612 for (i
= 0; i
< ARRAY_SIZE(cparams
); i
++) {
1613 if (cparams
[i
].i
== dev_priv
->c_m
&&
1614 cparams
[i
].t
== dev_priv
->r_t
) {
1621 diff
= div_u64(diff
, diff1
);
1622 ret
= ((m
* diff
) + c
);
1623 ret
= div_u64(ret
, 10);
1625 dev_priv
->last_count1
= total_count
;
1626 dev_priv
->last_time1
= now
;
1631 unsigned long i915_mch_val(struct drm_i915_private
*dev_priv
)
1633 unsigned long m
, x
, b
;
1636 tsfs
= I915_READ(TSFS
);
1638 m
= ((tsfs
& TSFS_SLOPE_MASK
) >> TSFS_SLOPE_SHIFT
);
1639 x
= I915_READ8(TR1
);
1641 b
= tsfs
& TSFS_INTR_MASK
;
1643 return ((m
* x
) / 127) - b
;
1646 static unsigned long pvid_to_extvid(struct drm_i915_private
*dev_priv
, u8 pxvid
)
1648 unsigned long val
= 0;
1651 for (i
= 0; i
< ARRAY_SIZE(v_table
); i
++) {
1652 if (v_table
[i
].pvid
== pxvid
) {
1653 if (IS_MOBILE(dev_priv
->dev
))
1654 val
= v_table
[i
].vm
;
1656 val
= v_table
[i
].vd
;
1663 void i915_update_gfx_val(struct drm_i915_private
*dev_priv
)
1665 struct timespec now
, diff1
;
1667 unsigned long diffms
;
1670 getrawmonotonic(&now
);
1671 diff1
= timespec_sub(now
, dev_priv
->last_time2
);
1673 /* Don't divide by 0 */
1674 diffms
= diff1
.tv_sec
* 1000 + diff1
.tv_nsec
/ 1000000;
1678 count
= I915_READ(GFXEC
);
1680 if (count
< dev_priv
->last_count2
) {
1681 diff
= ~0UL - dev_priv
->last_count2
;
1684 diff
= count
- dev_priv
->last_count2
;
1687 dev_priv
->last_count2
= count
;
1688 dev_priv
->last_time2
= now
;
1690 /* More magic constants... */
1692 diff
= div_u64(diff
, diffms
* 10);
1693 dev_priv
->gfx_power
= diff
;
1696 unsigned long i915_gfx_val(struct drm_i915_private
*dev_priv
)
1698 unsigned long t
, corr
, state1
, corr2
, state2
;
1701 pxvid
= I915_READ(PXVFREQ_BASE
+ (dev_priv
->cur_delay
* 4));
1702 pxvid
= (pxvid
>> 24) & 0x7f;
1703 ext_v
= pvid_to_extvid(dev_priv
, pxvid
);
1707 t
= i915_mch_val(dev_priv
);
1709 /* Revel in the empirically derived constants */
1711 /* Correction factor in 1/100000 units */
1713 corr
= ((t
* 2349) + 135940);
1715 corr
= ((t
* 964) + 29317);
1717 corr
= ((t
* 301) + 1004);
1719 corr
= corr
* ((150142 * state1
) / 10000 - 78642);
1721 corr2
= (corr
* dev_priv
->corr
);
1723 state2
= (corr2
* state1
) / 10000;
1724 state2
/= 100; /* convert to mW */
1726 i915_update_gfx_val(dev_priv
);
1728 return dev_priv
->gfx_power
+ state2
;
1731 /* Global for IPS driver to get at the current i915 device */
1732 static struct drm_i915_private
*i915_mch_dev
;
1734 * Lock protecting IPS related data structures
1736 * - dev_priv->max_delay
1737 * - dev_priv->min_delay
1739 * - dev_priv->gpu_busy
1741 static DEFINE_SPINLOCK(mchdev_lock
);
1744 * i915_read_mch_val - return value for IPS use
1746 * Calculate and return a value for the IPS driver to use when deciding whether
1747 * we have thermal and power headroom to increase CPU or GPU power budget.
1749 unsigned long i915_read_mch_val(void)
1751 struct drm_i915_private
*dev_priv
;
1752 unsigned long chipset_val
, graphics_val
, ret
= 0;
1754 spin_lock(&mchdev_lock
);
1757 dev_priv
= i915_mch_dev
;
1759 chipset_val
= i915_chipset_val(dev_priv
);
1760 graphics_val
= i915_gfx_val(dev_priv
);
1762 ret
= chipset_val
+ graphics_val
;
1765 spin_unlock(&mchdev_lock
);
1769 EXPORT_SYMBOL_GPL(i915_read_mch_val
);
1772 * i915_gpu_raise - raise GPU frequency limit
1774 * Raise the limit; IPS indicates we have thermal headroom.
1776 bool i915_gpu_raise(void)
1778 struct drm_i915_private
*dev_priv
;
1781 spin_lock(&mchdev_lock
);
1782 if (!i915_mch_dev
) {
1786 dev_priv
= i915_mch_dev
;
1788 if (dev_priv
->max_delay
> dev_priv
->fmax
)
1789 dev_priv
->max_delay
--;
1792 spin_unlock(&mchdev_lock
);
1796 EXPORT_SYMBOL_GPL(i915_gpu_raise
);
1799 * i915_gpu_lower - lower GPU frequency limit
1801 * IPS indicates we're close to a thermal limit, so throttle back the GPU
1802 * frequency maximum.
1804 bool i915_gpu_lower(void)
1806 struct drm_i915_private
*dev_priv
;
1809 spin_lock(&mchdev_lock
);
1810 if (!i915_mch_dev
) {
1814 dev_priv
= i915_mch_dev
;
1816 if (dev_priv
->max_delay
< dev_priv
->min_delay
)
1817 dev_priv
->max_delay
++;
1820 spin_unlock(&mchdev_lock
);
1824 EXPORT_SYMBOL_GPL(i915_gpu_lower
);
1827 * i915_gpu_busy - indicate GPU business to IPS
1829 * Tell the IPS driver whether or not the GPU is busy.
1831 bool i915_gpu_busy(void)
1833 struct drm_i915_private
*dev_priv
;
1836 spin_lock(&mchdev_lock
);
1839 dev_priv
= i915_mch_dev
;
1841 ret
= dev_priv
->busy
;
1844 spin_unlock(&mchdev_lock
);
1848 EXPORT_SYMBOL_GPL(i915_gpu_busy
);
1851 * i915_gpu_turbo_disable - disable graphics turbo
1853 * Disable graphics turbo by resetting the max frequency and setting the
1854 * current frequency to the default.
1856 bool i915_gpu_turbo_disable(void)
1858 struct drm_i915_private
*dev_priv
;
1861 spin_lock(&mchdev_lock
);
1862 if (!i915_mch_dev
) {
1866 dev_priv
= i915_mch_dev
;
1868 dev_priv
->max_delay
= dev_priv
->fstart
;
1870 if (!ironlake_set_drps(dev_priv
->dev
, dev_priv
->fstart
))
1874 spin_unlock(&mchdev_lock
);
1878 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable
);
1881 * i915_driver_load - setup chip and create an initial config
1883 * @flags: startup flags
1885 * The driver load routine has to do several things:
1886 * - drive output discovery via intel_modeset_init()
1887 * - initialize the memory manager
1888 * - allocate initial config memory
1889 * - setup the DRM framebuffer with the allocated memory
1891 int i915_driver_load(struct drm_device
*dev
, unsigned long flags
)
1893 struct drm_i915_private
*dev_priv
;
1894 int ret
= 0, mmio_bar
;
1895 uint32_t agp_size
, prealloc_size
;
1896 /* i915 has 4 more counters */
1898 dev
->types
[6] = _DRM_STAT_IRQ
;
1899 dev
->types
[7] = _DRM_STAT_PRIMARY
;
1900 dev
->types
[8] = _DRM_STAT_SECONDARY
;
1901 dev
->types
[9] = _DRM_STAT_DMA
;
1903 dev_priv
= kzalloc(sizeof(drm_i915_private_t
), GFP_KERNEL
);
1904 if (dev_priv
== NULL
)
1907 dev
->dev_private
= (void *)dev_priv
;
1908 dev_priv
->dev
= dev
;
1909 dev_priv
->info
= (struct intel_device_info
*) flags
;
1911 if (i915_get_bridge_dev(dev
)) {
1916 /* overlay on gen2 is broken and can't address above 1G */
1918 dma_set_coherent_mask(&dev
->pdev
->dev
, DMA_BIT_MASK(30));
1920 mmio_bar
= IS_GEN2(dev
) ? 1 : 0;
1921 dev_priv
->regs
= pci_iomap(dev
->pdev
, mmio_bar
, 0);
1922 if (!dev_priv
->regs
) {
1923 DRM_ERROR("failed to map registers\n");
1928 dev_priv
->mm
.gtt
= intel_gtt_get();
1929 if (!dev_priv
->mm
.gtt
) {
1930 DRM_ERROR("Failed to initialize GTT\n");
1935 prealloc_size
= dev_priv
->mm
.gtt
->gtt_stolen_entries
<< PAGE_SHIFT
;
1936 agp_size
= dev_priv
->mm
.gtt
->gtt_mappable_entries
<< PAGE_SHIFT
;
1938 dev_priv
->mm
.gtt_mapping
=
1939 io_mapping_create_wc(dev
->agp
->base
, agp_size
);
1940 if (dev_priv
->mm
.gtt_mapping
== NULL
) {
1945 /* Set up a WC MTRR for non-PAT systems. This is more common than
1946 * one would think, because the kernel disables PAT on first
1947 * generation Core chips because WC PAT gets overridden by a UC
1948 * MTRR if present. Even if a UC MTRR isn't present.
1950 dev_priv
->mm
.gtt_mtrr
= mtrr_add(dev
->agp
->base
,
1952 MTRR_TYPE_WRCOMB
, 1);
1953 if (dev_priv
->mm
.gtt_mtrr
< 0) {
1954 DRM_INFO("MTRR allocation failed. Graphics "
1955 "performance may suffer.\n");
1958 /* The i915 workqueue is primarily used for batched retirement of
1959 * requests (and thus managing bo) once the task has been completed
1960 * by the GPU. i915_gem_retire_requests() is called directly when we
1961 * need high-priority retirement, such as waiting for an explicit
1964 * It is also used for periodic low-priority events, such as
1965 * idle-timers and hangcheck.
1967 * All tasks on the workqueue are expected to acquire the dev mutex
1968 * so there is no point in running more than one instance of the
1969 * workqueue at any time: max_active = 1 and NON_REENTRANT.
1971 dev_priv
->wq
= alloc_workqueue("i915",
1972 WQ_UNBOUND
| WQ_NON_REENTRANT
,
1974 if (dev_priv
->wq
== NULL
) {
1975 DRM_ERROR("Failed to create our workqueue.\n");
1980 /* enable GEM by default */
1981 dev_priv
->has_gem
= 1;
1983 if (prealloc_size
> agp_size
* 3 / 4) {
1984 DRM_ERROR("Detected broken video BIOS with %d/%dkB of video "
1986 prealloc_size
/ 1024, agp_size
/ 1024);
1987 DRM_ERROR("Disabling GEM. (try reducing stolen memory or "
1988 "updating the BIOS to fix).\n");
1989 dev_priv
->has_gem
= 0;
1992 if (dev_priv
->has_gem
== 0 &&
1993 drm_core_check_feature(dev
, DRIVER_MODESET
)) {
1994 DRM_ERROR("kernel modesetting requires GEM, disabling driver.\n");
1999 dev
->driver
->get_vblank_counter
= i915_get_vblank_counter
;
2000 dev
->max_vblank_count
= 0xffffff; /* only 24 bits of frame count */
2001 if (IS_G4X(dev
) || IS_GEN5(dev
) || IS_GEN6(dev
)) {
2002 dev
->max_vblank_count
= 0xffffffff; /* full 32 bit counter */
2003 dev
->driver
->get_vblank_counter
= gm45_get_vblank_counter
;
2006 /* Try to make sure MCHBAR is enabled before poking at it */
2007 intel_setup_mchbar(dev
);
2008 intel_setup_gmbus(dev
);
2009 intel_opregion_setup(dev
);
2011 /* Make sure the bios did its job and set up vital registers */
2012 intel_setup_bios(dev
);
2017 if (!I915_NEED_GFX_HWS(dev
)) {
2018 ret
= i915_init_phys_hws(dev
);
2020 goto out_workqueue_free
;
2023 if (IS_PINEVIEW(dev
))
2024 i915_pineview_get_mem_freq(dev
);
2025 else if (IS_GEN5(dev
))
2026 i915_ironlake_get_mem_freq(dev
);
2028 /* On the 945G/GM, the chipset reports the MSI capability on the
2029 * integrated graphics even though the support isn't actually there
2030 * according to the published specs. It doesn't appear to function
2031 * correctly in testing on 945G.
2032 * This may be a side effect of MSI having been made available for PEG
2033 * and the registers being closely associated.
2035 * According to chipset errata, on the 965GM, MSI interrupts may
2036 * be lost or delayed, but we use them anyways to avoid
2037 * stuck interrupts on some machines.
2039 if (!IS_I945G(dev
) && !IS_I945GM(dev
))
2040 pci_enable_msi(dev
->pdev
);
2042 spin_lock_init(&dev_priv
->user_irq_lock
);
2043 spin_lock_init(&dev_priv
->error_lock
);
2044 dev_priv
->trace_irq_seqno
= 0;
2046 ret
= drm_vblank_init(dev
, I915_NUM_PIPE
);
2049 (void) i915_driver_unload(dev
);
2053 /* Start out suspended */
2054 dev_priv
->mm
.suspended
= 1;
2056 intel_detect_pch(dev
);
2058 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
2059 ret
= i915_load_modeset_init(dev
);
2061 DRM_ERROR("failed to init modeset\n");
2062 goto out_workqueue_free
;
2066 /* Must be done after probing outputs */
2067 intel_opregion_init(dev
);
2068 acpi_video_register();
2070 setup_timer(&dev_priv
->hangcheck_timer
, i915_hangcheck_elapsed
,
2071 (unsigned long) dev
);
2073 spin_lock(&mchdev_lock
);
2074 i915_mch_dev
= dev_priv
;
2075 dev_priv
->mchdev_lock
= &mchdev_lock
;
2076 spin_unlock(&mchdev_lock
);
2081 destroy_workqueue(dev_priv
->wq
);
2083 io_mapping_free(dev_priv
->mm
.gtt_mapping
);
2085 pci_iounmap(dev
->pdev
, dev_priv
->regs
);
2087 pci_dev_put(dev_priv
->bridge_dev
);
2093 int i915_driver_unload(struct drm_device
*dev
)
2095 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2098 spin_lock(&mchdev_lock
);
2099 i915_mch_dev
= NULL
;
2100 spin_unlock(&mchdev_lock
);
2102 if (dev_priv
->mm
.inactive_shrinker
.shrink
)
2103 unregister_shrinker(&dev_priv
->mm
.inactive_shrinker
);
2105 mutex_lock(&dev
->struct_mutex
);
2106 ret
= i915_gpu_idle(dev
);
2108 DRM_ERROR("failed to idle hardware: %d\n", ret
);
2109 mutex_unlock(&dev
->struct_mutex
);
2111 /* Cancel the retire work handler, which should be idle now. */
2112 cancel_delayed_work_sync(&dev_priv
->mm
.retire_work
);
2114 io_mapping_free(dev_priv
->mm
.gtt_mapping
);
2115 if (dev_priv
->mm
.gtt_mtrr
>= 0) {
2116 mtrr_del(dev_priv
->mm
.gtt_mtrr
, dev
->agp
->base
,
2117 dev
->agp
->agp_info
.aper_size
* 1024 * 1024);
2118 dev_priv
->mm
.gtt_mtrr
= -1;
2121 acpi_video_unregister();
2123 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
2124 intel_fbdev_fini(dev
);
2125 intel_modeset_cleanup(dev
);
2128 * free the memory space allocated for the child device
2129 * config parsed from VBT
2131 if (dev_priv
->child_dev
&& dev_priv
->child_dev_num
) {
2132 kfree(dev_priv
->child_dev
);
2133 dev_priv
->child_dev
= NULL
;
2134 dev_priv
->child_dev_num
= 0;
2137 vga_switcheroo_unregister_client(dev
->pdev
);
2138 vga_client_register(dev
->pdev
, NULL
, NULL
, NULL
);
2141 /* Free error state after interrupts are fully disabled. */
2142 del_timer_sync(&dev_priv
->hangcheck_timer
);
2143 cancel_work_sync(&dev_priv
->error_work
);
2144 i915_destroy_error_state(dev
);
2146 if (dev
->pdev
->msi_enabled
)
2147 pci_disable_msi(dev
->pdev
);
2149 intel_opregion_fini(dev
);
2151 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
2152 /* Flush any outstanding unpin_work. */
2153 flush_workqueue(dev_priv
->wq
);
2155 i915_gem_free_all_phys_object(dev
);
2157 mutex_lock(&dev
->struct_mutex
);
2158 i915_gem_cleanup_ringbuffer(dev
);
2159 mutex_unlock(&dev
->struct_mutex
);
2160 if (I915_HAS_FBC(dev
) && i915_powersave
)
2161 i915_cleanup_compression(dev
);
2162 drm_mm_takedown(&dev_priv
->mm
.vram
);
2164 intel_cleanup_overlay(dev
);
2166 if (!I915_NEED_GFX_HWS(dev
))
2170 if (dev_priv
->regs
!= NULL
)
2171 pci_iounmap(dev
->pdev
, dev_priv
->regs
);
2173 intel_teardown_gmbus(dev
);
2174 intel_teardown_mchbar(dev
);
2176 destroy_workqueue(dev_priv
->wq
);
2178 pci_dev_put(dev_priv
->bridge_dev
);
2179 kfree(dev
->dev_private
);
2184 int i915_driver_open(struct drm_device
*dev
, struct drm_file
*file
)
2186 struct drm_i915_file_private
*file_priv
;
2188 DRM_DEBUG_DRIVER("\n");
2189 file_priv
= kmalloc(sizeof(*file_priv
), GFP_KERNEL
);
2193 file
->driver_priv
= file_priv
;
2195 spin_lock_init(&file_priv
->mm
.lock
);
2196 INIT_LIST_HEAD(&file_priv
->mm
.request_list
);
2202 * i915_driver_lastclose - clean up after all DRM clients have exited
2205 * Take care of cleaning up after all DRM clients have exited. In the
2206 * mode setting case, we want to restore the kernel's initial mode (just
2207 * in case the last client left us in a bad state).
2209 * Additionally, in the non-mode setting case, we'll tear down the AGP
2210 * and DMA structures, since the kernel won't be using them, and clea
2213 void i915_driver_lastclose(struct drm_device
* dev
)
2215 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2217 if (!dev_priv
|| drm_core_check_feature(dev
, DRIVER_MODESET
)) {
2218 drm_fb_helper_restore();
2219 vga_switcheroo_process_delayed_switch();
2223 i915_gem_lastclose(dev
);
2225 if (dev_priv
->agp_heap
)
2226 i915_mem_takedown(&(dev_priv
->agp_heap
));
2228 i915_dma_cleanup(dev
);
2231 void i915_driver_preclose(struct drm_device
* dev
, struct drm_file
*file_priv
)
2233 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2234 i915_gem_release(dev
, file_priv
);
2235 if (!drm_core_check_feature(dev
, DRIVER_MODESET
))
2236 i915_mem_release(dev
, file_priv
, dev_priv
->agp_heap
);
2239 void i915_driver_postclose(struct drm_device
*dev
, struct drm_file
*file
)
2241 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
2246 struct drm_ioctl_desc i915_ioctls
[] = {
2247 DRM_IOCTL_DEF_DRV(I915_INIT
, i915_dma_init
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
2248 DRM_IOCTL_DEF_DRV(I915_FLUSH
, i915_flush_ioctl
, DRM_AUTH
),
2249 DRM_IOCTL_DEF_DRV(I915_FLIP
, i915_flip_bufs
, DRM_AUTH
),
2250 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER
, i915_batchbuffer
, DRM_AUTH
),
2251 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT
, i915_irq_emit
, DRM_AUTH
),
2252 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT
, i915_irq_wait
, DRM_AUTH
),
2253 DRM_IOCTL_DEF_DRV(I915_GETPARAM
, i915_getparam
, DRM_AUTH
),
2254 DRM_IOCTL_DEF_DRV(I915_SETPARAM
, i915_setparam
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
2255 DRM_IOCTL_DEF_DRV(I915_ALLOC
, i915_mem_alloc
, DRM_AUTH
),
2256 DRM_IOCTL_DEF_DRV(I915_FREE
, i915_mem_free
, DRM_AUTH
),
2257 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP
, i915_mem_init_heap
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
2258 DRM_IOCTL_DEF_DRV(I915_CMDBUFFER
, i915_cmdbuffer
, DRM_AUTH
),
2259 DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP
, i915_mem_destroy_heap
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
2260 DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE
, i915_vblank_pipe_set
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
2261 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE
, i915_vblank_pipe_get
, DRM_AUTH
),
2262 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP
, i915_vblank_swap
, DRM_AUTH
),
2263 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR
, i915_set_status_page
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
2264 DRM_IOCTL_DEF_DRV(I915_GEM_INIT
, i915_gem_init_ioctl
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
|DRM_UNLOCKED
),
2265 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER
, i915_gem_execbuffer
, DRM_AUTH
|DRM_UNLOCKED
),
2266 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2
, i915_gem_execbuffer2
, DRM_AUTH
|DRM_UNLOCKED
),
2267 DRM_IOCTL_DEF_DRV(I915_GEM_PIN
, i915_gem_pin_ioctl
, DRM_AUTH
|DRM_ROOT_ONLY
|DRM_UNLOCKED
),
2268 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN
, i915_gem_unpin_ioctl
, DRM_AUTH
|DRM_ROOT_ONLY
|DRM_UNLOCKED
),
2269 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY
, i915_gem_busy_ioctl
, DRM_AUTH
|DRM_UNLOCKED
),
2270 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE
, i915_gem_throttle_ioctl
, DRM_AUTH
|DRM_UNLOCKED
),
2271 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT
, i915_gem_entervt_ioctl
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
|DRM_UNLOCKED
),
2272 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT
, i915_gem_leavevt_ioctl
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
|DRM_UNLOCKED
),
2273 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE
, i915_gem_create_ioctl
, DRM_UNLOCKED
),
2274 DRM_IOCTL_DEF_DRV(I915_GEM_PREAD
, i915_gem_pread_ioctl
, DRM_UNLOCKED
),
2275 DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE
, i915_gem_pwrite_ioctl
, DRM_UNLOCKED
),
2276 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP
, i915_gem_mmap_ioctl
, DRM_UNLOCKED
),
2277 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT
, i915_gem_mmap_gtt_ioctl
, DRM_UNLOCKED
),
2278 DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN
, i915_gem_set_domain_ioctl
, DRM_UNLOCKED
),
2279 DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH
, i915_gem_sw_finish_ioctl
, DRM_UNLOCKED
),
2280 DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING
, i915_gem_set_tiling
, DRM_UNLOCKED
),
2281 DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING
, i915_gem_get_tiling
, DRM_UNLOCKED
),
2282 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE
, i915_gem_get_aperture_ioctl
, DRM_UNLOCKED
),
2283 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID
, intel_get_pipe_from_crtc_id
, DRM_UNLOCKED
),
2284 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE
, i915_gem_madvise_ioctl
, DRM_UNLOCKED
),
2285 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE
, intel_overlay_put_image
, DRM_MASTER
|DRM_CONTROL_ALLOW
|DRM_UNLOCKED
),
2286 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS
, intel_overlay_attrs
, DRM_MASTER
|DRM_CONTROL_ALLOW
|DRM_UNLOCKED
),
2289 int i915_max_ioctl
= DRM_ARRAY_SIZE(i915_ioctls
);
2292 * Determine if the device really is AGP or not.
2294 * All Intel graphics chipsets are treated as AGP, even if they are really
2297 * \param dev The device to be tested.
2300 * A value of 1 is always retured to indictate every i9x5 is AGP.
2302 int i915_driver_device_is_agp(struct drm_device
* dev
)