1 /* i830_dma.c -- DMA support for the I830 -*- linux-c -*-
2 * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
27 * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
28 * Jeff Hartmann <jhartmann@valinux.com>
29 * Keith Whitwell <keith@tungstengraphics.com>
30 * Abraham vd Merwe <abraham@2d3d.co.za>
38 #include <linux/interrupt.h> /* For task queue support */
39 #include <linux/pagemap.h> /* For FASTCALL on unlock_page() */
40 #include <linux/delay.h>
41 #include <asm/uaccess.h>
43 #define I830_BUF_FREE 2
44 #define I830_BUF_CLIENT 1
45 #define I830_BUF_HARDWARE 0
47 #define I830_BUF_UNMAPPED 0
48 #define I830_BUF_MAPPED 1
50 static drm_buf_t
*i830_freelist_get(drm_device_t
*dev
)
52 drm_device_dma_t
*dma
= dev
->dma
;
56 /* Linear search might not be the best solution */
58 for (i
= 0; i
< dma
->buf_count
; i
++) {
59 drm_buf_t
*buf
= dma
->buflist
[ i
];
60 drm_i830_buf_priv_t
*buf_priv
= buf
->dev_private
;
61 /* In use is already a pointer */
62 used
= cmpxchg(buf_priv
->in_use
, I830_BUF_FREE
,
64 if(used
== I830_BUF_FREE
) {
71 /* This should only be called if the buffer is not sent to the hardware
72 * yet, the hardware updates in use for us once its on the ring buffer.
75 static int i830_freelist_put(drm_device_t
*dev
, drm_buf_t
*buf
)
77 drm_i830_buf_priv_t
*buf_priv
= buf
->dev_private
;
80 /* In use is already a pointer */
81 used
= cmpxchg(buf_priv
->in_use
, I830_BUF_CLIENT
, I830_BUF_FREE
);
82 if(used
!= I830_BUF_CLIENT
) {
83 DRM_ERROR("Freeing buffer thats not in use : %d\n", buf
->idx
);
90 static int i830_mmap_buffers(struct file
*filp
, struct vm_area_struct
*vma
)
92 drm_file_t
*priv
= filp
->private_data
;
94 drm_i830_private_t
*dev_priv
;
96 drm_i830_buf_priv_t
*buf_priv
;
99 dev
= priv
->head
->dev
;
100 dev_priv
= dev
->dev_private
;
101 buf
= dev_priv
->mmap_buffer
;
102 buf_priv
= buf
->dev_private
;
104 vma
->vm_flags
|= (VM_IO
| VM_DONTCOPY
);
107 buf_priv
->currently_mapped
= I830_BUF_MAPPED
;
110 if (io_remap_pfn_range(vma
, vma
->vm_start
,
111 VM_OFFSET(vma
) >> PAGE_SHIFT
,
112 vma
->vm_end
- vma
->vm_start
,
113 vma
->vm_page_prot
)) return -EAGAIN
;
117 static struct file_operations i830_buffer_fops
= {
120 .release
= drm_release
,
122 .mmap
= i830_mmap_buffers
,
123 .fasync
= drm_fasync
,
126 static int i830_map_buffer(drm_buf_t
*buf
, struct file
*filp
)
128 drm_file_t
*priv
= filp
->private_data
;
129 drm_device_t
*dev
= priv
->head
->dev
;
130 drm_i830_buf_priv_t
*buf_priv
= buf
->dev_private
;
131 drm_i830_private_t
*dev_priv
= dev
->dev_private
;
132 struct file_operations
*old_fops
;
133 unsigned long virtual;
136 if(buf_priv
->currently_mapped
== I830_BUF_MAPPED
) return -EINVAL
;
138 down_write( ¤t
->mm
->mmap_sem
);
139 old_fops
= filp
->f_op
;
140 filp
->f_op
= &i830_buffer_fops
;
141 dev_priv
->mmap_buffer
= buf
;
142 virtual = do_mmap(filp
, 0, buf
->total
, PROT_READ
|PROT_WRITE
,
143 MAP_SHARED
, buf
->bus_address
);
144 dev_priv
->mmap_buffer
= NULL
;
145 filp
->f_op
= old_fops
;
146 if (IS_ERR((void *)virtual)) { /* ugh */
148 DRM_ERROR("mmap error\n");
150 buf_priv
->virtual = NULL
;
152 buf_priv
->virtual = (void __user
*)virtual;
154 up_write( ¤t
->mm
->mmap_sem
);
159 static int i830_unmap_buffer(drm_buf_t
*buf
)
161 drm_i830_buf_priv_t
*buf_priv
= buf
->dev_private
;
164 if(buf_priv
->currently_mapped
!= I830_BUF_MAPPED
)
167 down_write(¤t
->mm
->mmap_sem
);
168 retcode
= do_munmap(current
->mm
,
169 (unsigned long)buf_priv
->virtual,
170 (size_t) buf
->total
);
171 up_write(¤t
->mm
->mmap_sem
);
173 buf_priv
->currently_mapped
= I830_BUF_UNMAPPED
;
174 buf_priv
->virtual = NULL
;
179 static int i830_dma_get_buffer(drm_device_t
*dev
, drm_i830_dma_t
*d
,
183 drm_i830_buf_priv_t
*buf_priv
;
186 buf
= i830_freelist_get(dev
);
189 DRM_DEBUG("retcode=%d\n", retcode
);
193 retcode
= i830_map_buffer(buf
, filp
);
195 i830_freelist_put(dev
, buf
);
196 DRM_ERROR("mapbuf failed, retcode %d\n", retcode
);
200 buf_priv
= buf
->dev_private
;
202 d
->request_idx
= buf
->idx
;
203 d
->request_size
= buf
->total
;
204 d
->virtual = buf_priv
->virtual;
209 static int i830_dma_cleanup(drm_device_t
*dev
)
211 drm_device_dma_t
*dma
= dev
->dma
;
213 /* Make sure interrupts are disabled here because the uninstall ioctl
214 * may not have been called from userspace and after dev_private
215 * is freed, it's too late.
217 if ( dev
->irq_enabled
) drm_irq_uninstall(dev
);
219 if (dev
->dev_private
) {
221 drm_i830_private_t
*dev_priv
=
222 (drm_i830_private_t
*) dev
->dev_private
;
224 if (dev_priv
->ring
.virtual_start
) {
225 drm_ioremapfree((void *) dev_priv
->ring
.virtual_start
,
226 dev_priv
->ring
.Size
, dev
);
228 if (dev_priv
->hw_status_page
) {
229 pci_free_consistent(dev
->pdev
, PAGE_SIZE
,
230 dev_priv
->hw_status_page
,
231 dev_priv
->dma_status_page
);
232 /* Need to rewrite hardware status page */
233 I830_WRITE(0x02080, 0x1ffff000);
236 drm_free(dev
->dev_private
, sizeof(drm_i830_private_t
),
238 dev
->dev_private
= NULL
;
240 for (i
= 0; i
< dma
->buf_count
; i
++) {
241 drm_buf_t
*buf
= dma
->buflist
[ i
];
242 drm_i830_buf_priv_t
*buf_priv
= buf
->dev_private
;
243 if ( buf_priv
->kernel_virtual
&& buf
->total
)
244 drm_ioremapfree(buf_priv
->kernel_virtual
, buf
->total
, dev
);
250 int i830_wait_ring(drm_device_t
*dev
, int n
, const char *caller
)
252 drm_i830_private_t
*dev_priv
= dev
->dev_private
;
253 drm_i830_ring_buffer_t
*ring
= &(dev_priv
->ring
);
256 unsigned int last_head
= I830_READ(LP_RING
+ RING_HEAD
) & HEAD_ADDR
;
258 end
= jiffies
+ (HZ
*3);
259 while (ring
->space
< n
) {
260 ring
->head
= I830_READ(LP_RING
+ RING_HEAD
) & HEAD_ADDR
;
261 ring
->space
= ring
->head
- (ring
->tail
+8);
262 if (ring
->space
< 0) ring
->space
+= ring
->Size
;
264 if (ring
->head
!= last_head
) {
265 end
= jiffies
+ (HZ
*3);
266 last_head
= ring
->head
;
270 if(time_before(end
, jiffies
)) {
271 DRM_ERROR("space: %d wanted %d\n", ring
->space
, n
);
272 DRM_ERROR("lockup\n");
276 dev_priv
->sarea_priv
->perf_boxes
|= I830_BOX_WAIT
;
283 static void i830_kernel_lost_context(drm_device_t
*dev
)
285 drm_i830_private_t
*dev_priv
= dev
->dev_private
;
286 drm_i830_ring_buffer_t
*ring
= &(dev_priv
->ring
);
288 ring
->head
= I830_READ(LP_RING
+ RING_HEAD
) & HEAD_ADDR
;
289 ring
->tail
= I830_READ(LP_RING
+ RING_TAIL
) & TAIL_ADDR
;
290 ring
->space
= ring
->head
- (ring
->tail
+8);
291 if (ring
->space
< 0) ring
->space
+= ring
->Size
;
293 if (ring
->head
== ring
->tail
)
294 dev_priv
->sarea_priv
->perf_boxes
|= I830_BOX_RING_EMPTY
;
297 static int i830_freelist_init(drm_device_t
*dev
, drm_i830_private_t
*dev_priv
)
299 drm_device_dma_t
*dma
= dev
->dma
;
301 u32
*hw_status
= (u32
*)(dev_priv
->hw_status_page
+ my_idx
);
304 if(dma
->buf_count
> 1019) {
305 /* Not enough space in the status page for the freelist */
309 for (i
= 0; i
< dma
->buf_count
; i
++) {
310 drm_buf_t
*buf
= dma
->buflist
[ i
];
311 drm_i830_buf_priv_t
*buf_priv
= buf
->dev_private
;
313 buf_priv
->in_use
= hw_status
++;
314 buf_priv
->my_use_idx
= my_idx
;
317 *buf_priv
->in_use
= I830_BUF_FREE
;
319 buf_priv
->kernel_virtual
= drm_ioremap(buf
->bus_address
,
325 static int i830_dma_initialize(drm_device_t
*dev
,
326 drm_i830_private_t
*dev_priv
,
327 drm_i830_init_t
*init
)
329 struct list_head
*list
;
331 memset(dev_priv
, 0, sizeof(drm_i830_private_t
));
333 list_for_each(list
, &dev
->maplist
->head
) {
334 drm_map_list_t
*r_list
= list_entry(list
, drm_map_list_t
, head
);
336 r_list
->map
->type
== _DRM_SHM
&&
337 r_list
->map
->flags
& _DRM_CONTAINS_LOCK
) {
338 dev_priv
->sarea_map
= r_list
->map
;
343 if(!dev_priv
->sarea_map
) {
344 dev
->dev_private
= (void *)dev_priv
;
345 i830_dma_cleanup(dev
);
346 DRM_ERROR("can not find sarea!\n");
349 dev_priv
->mmio_map
= drm_core_findmap(dev
, init
->mmio_offset
);
350 if(!dev_priv
->mmio_map
) {
351 dev
->dev_private
= (void *)dev_priv
;
352 i830_dma_cleanup(dev
);
353 DRM_ERROR("can not find mmio map!\n");
356 dev
->agp_buffer_token
= init
->buffers_offset
;
357 dev
->agp_buffer_map
= drm_core_findmap(dev
, init
->buffers_offset
);
358 if(!dev
->agp_buffer_map
) {
359 dev
->dev_private
= (void *)dev_priv
;
360 i830_dma_cleanup(dev
);
361 DRM_ERROR("can not find dma buffer map!\n");
365 dev_priv
->sarea_priv
= (drm_i830_sarea_t
*)
366 ((u8
*)dev_priv
->sarea_map
->handle
+
367 init
->sarea_priv_offset
);
369 dev_priv
->ring
.Start
= init
->ring_start
;
370 dev_priv
->ring
.End
= init
->ring_end
;
371 dev_priv
->ring
.Size
= init
->ring_size
;
373 dev_priv
->ring
.virtual_start
= drm_ioremap(dev
->agp
->base
+
375 init
->ring_size
, dev
);
377 if (dev_priv
->ring
.virtual_start
== NULL
) {
378 dev
->dev_private
= (void *) dev_priv
;
379 i830_dma_cleanup(dev
);
380 DRM_ERROR("can not ioremap virtual address for"
385 dev_priv
->ring
.tail_mask
= dev_priv
->ring
.Size
- 1;
387 dev_priv
->w
= init
->w
;
388 dev_priv
->h
= init
->h
;
389 dev_priv
->pitch
= init
->pitch
;
390 dev_priv
->back_offset
= init
->back_offset
;
391 dev_priv
->depth_offset
= init
->depth_offset
;
392 dev_priv
->front_offset
= init
->front_offset
;
394 dev_priv
->front_di1
= init
->front_offset
| init
->pitch_bits
;
395 dev_priv
->back_di1
= init
->back_offset
| init
->pitch_bits
;
396 dev_priv
->zi1
= init
->depth_offset
| init
->pitch_bits
;
398 DRM_DEBUG("front_di1 %x\n", dev_priv
->front_di1
);
399 DRM_DEBUG("back_offset %x\n", dev_priv
->back_offset
);
400 DRM_DEBUG("back_di1 %x\n", dev_priv
->back_di1
);
401 DRM_DEBUG("pitch_bits %x\n", init
->pitch_bits
);
403 dev_priv
->cpp
= init
->cpp
;
404 /* We are using separate values as placeholders for mechanisms for
405 * private backbuffer/depthbuffer usage.
408 dev_priv
->back_pitch
= init
->back_pitch
;
409 dev_priv
->depth_pitch
= init
->depth_pitch
;
410 dev_priv
->do_boxes
= 0;
411 dev_priv
->use_mi_batchbuffer_start
= 0;
413 /* Program Hardware Status Page */
414 dev_priv
->hw_status_page
=
415 pci_alloc_consistent(dev
->pdev
, PAGE_SIZE
,
416 &dev_priv
->dma_status_page
);
417 if (!dev_priv
->hw_status_page
) {
418 dev
->dev_private
= (void *)dev_priv
;
419 i830_dma_cleanup(dev
);
420 DRM_ERROR("Can not allocate hardware status page\n");
423 memset(dev_priv
->hw_status_page
, 0, PAGE_SIZE
);
424 DRM_DEBUG("hw status page @ %p\n", dev_priv
->hw_status_page
);
426 I830_WRITE(0x02080, dev_priv
->dma_status_page
);
427 DRM_DEBUG("Enabled hardware status page\n");
429 /* Now we need to init our freelist */
430 if(i830_freelist_init(dev
, dev_priv
) != 0) {
431 dev
->dev_private
= (void *)dev_priv
;
432 i830_dma_cleanup(dev
);
433 DRM_ERROR("Not enough space in the status page for"
437 dev
->dev_private
= (void *)dev_priv
;
442 static int i830_dma_init(struct inode
*inode
, struct file
*filp
,
443 unsigned int cmd
, unsigned long arg
)
445 drm_file_t
*priv
= filp
->private_data
;
446 drm_device_t
*dev
= priv
->head
->dev
;
447 drm_i830_private_t
*dev_priv
;
448 drm_i830_init_t init
;
451 if (copy_from_user(&init
, (void * __user
) arg
, sizeof(init
)))
456 dev_priv
= drm_alloc(sizeof(drm_i830_private_t
),
458 if(dev_priv
== NULL
) return -ENOMEM
;
459 retcode
= i830_dma_initialize(dev
, dev_priv
, &init
);
461 case I830_CLEANUP_DMA
:
462 retcode
= i830_dma_cleanup(dev
);
472 #define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16))
473 #define ST1_ENABLE (1<<16)
474 #define ST1_MASK (0xffff)
476 /* Most efficient way to verify state for the i830 is as it is
477 * emitted. Non-conformant state is silently dropped.
479 static void i830EmitContextVerified( drm_device_t
*dev
,
482 drm_i830_private_t
*dev_priv
= dev
->dev_private
;
487 BEGIN_LP_RING( I830_CTX_SETUP_SIZE
+ 4 );
489 for ( i
= 0 ; i
< I830_CTXREG_BLENDCOLR0
; i
++ ) {
491 if ((tmp
& (7<<29)) == CMD_3D
&&
492 (tmp
& (0x1f<<24)) < (0x1d<<24)) {
496 DRM_ERROR("Skipping %d\n", i
);
500 OUT_RING( STATE3D_CONST_BLEND_COLOR_CMD
);
501 OUT_RING( code
[I830_CTXREG_BLENDCOLR
] );
504 for ( i
= I830_CTXREG_VF
; i
< I830_CTXREG_MCSB0
; i
++ ) {
506 if ((tmp
& (7<<29)) == CMD_3D
&&
507 (tmp
& (0x1f<<24)) < (0x1d<<24)) {
511 DRM_ERROR("Skipping %d\n", i
);
515 OUT_RING( STATE3D_MAP_COORD_SETBIND_CMD
);
516 OUT_RING( code
[I830_CTXREG_MCSB1
] );
525 static void i830EmitTexVerified( drm_device_t
*dev
, unsigned int *code
)
527 drm_i830_private_t
*dev_priv
= dev
->dev_private
;
532 if (code
[I830_TEXREG_MI0
] == GFX_OP_MAP_INFO
||
533 (code
[I830_TEXREG_MI0
] & ~(0xf*LOAD_TEXTURE_MAP0
)) ==
534 (STATE3D_LOAD_STATE_IMMEDIATE_2
|4)) {
536 BEGIN_LP_RING( I830_TEX_SETUP_SIZE
);
538 OUT_RING( code
[I830_TEXREG_MI0
] ); /* TM0LI */
539 OUT_RING( code
[I830_TEXREG_MI1
] ); /* TM0S0 */
540 OUT_RING( code
[I830_TEXREG_MI2
] ); /* TM0S1 */
541 OUT_RING( code
[I830_TEXREG_MI3
] ); /* TM0S2 */
542 OUT_RING( code
[I830_TEXREG_MI4
] ); /* TM0S3 */
543 OUT_RING( code
[I830_TEXREG_MI5
] ); /* TM0S4 */
545 for ( i
= 6 ; i
< I830_TEX_SETUP_SIZE
; i
++ ) {
557 printk("rejected packet %x\n", code
[0]);
560 static void i830EmitTexBlendVerified( drm_device_t
*dev
,
564 drm_i830_private_t
*dev_priv
= dev
->dev_private
;
572 BEGIN_LP_RING( num
+ 1 );
574 for ( i
= 0 ; i
< num
; i
++ ) {
586 static void i830EmitTexPalette( drm_device_t
*dev
,
587 unsigned int *palette
,
591 drm_i830_private_t
*dev_priv
= dev
->dev_private
;
597 BEGIN_LP_RING( 258 );
600 OUT_RING(CMD_OP_MAP_PALETTE_LOAD
|
604 OUT_RING(CMD_OP_MAP_PALETTE_LOAD
| MAP_PALETTE_NUM(number
));
606 for(i
= 0; i
< 256; i
++) {
607 OUT_RING(palette
[i
]);
610 /* KW: WHERE IS THE ADVANCE_LP_RING? This is effectively a noop!
614 /* Need to do some additional checking when setting the dest buffer.
616 static void i830EmitDestVerified( drm_device_t
*dev
,
619 drm_i830_private_t
*dev_priv
= dev
->dev_private
;
623 BEGIN_LP_RING( I830_DEST_SETUP_SIZE
+ 10 );
626 tmp
= code
[I830_DESTREG_CBUFADDR
];
627 if (tmp
== dev_priv
->front_di1
|| tmp
== dev_priv
->back_di1
) {
628 if (((int)outring
) & 8) {
633 OUT_RING( CMD_OP_DESTBUFFER_INFO
);
634 OUT_RING( BUF_3D_ID_COLOR_BACK
|
635 BUF_3D_PITCH(dev_priv
->back_pitch
* dev_priv
->cpp
) |
640 OUT_RING( CMD_OP_DESTBUFFER_INFO
);
641 OUT_RING( BUF_3D_ID_DEPTH
| BUF_3D_USE_FENCE
|
642 BUF_3D_PITCH(dev_priv
->depth_pitch
* dev_priv
->cpp
));
643 OUT_RING( dev_priv
->zi1
);
646 DRM_ERROR("bad di1 %x (allow %x or %x)\n",
647 tmp
, dev_priv
->front_di1
, dev_priv
->back_di1
);
654 OUT_RING( GFX_OP_DESTBUFFER_VARS
);
655 OUT_RING( code
[I830_DESTREG_DV1
] );
657 OUT_RING( GFX_OP_DRAWRECT_INFO
);
658 OUT_RING( code
[I830_DESTREG_DR1
] );
659 OUT_RING( code
[I830_DESTREG_DR2
] );
660 OUT_RING( code
[I830_DESTREG_DR3
] );
661 OUT_RING( code
[I830_DESTREG_DR4
] );
663 /* Need to verify this */
664 tmp
= code
[I830_DESTREG_SENABLE
];
665 if((tmp
& ~0x3) == GFX_OP_SCISSOR_ENABLE
) {
668 DRM_ERROR("bad scissor enable\n");
672 OUT_RING( GFX_OP_SCISSOR_RECT
);
673 OUT_RING( code
[I830_DESTREG_SR1
] );
674 OUT_RING( code
[I830_DESTREG_SR2
] );
680 static void i830EmitStippleVerified( drm_device_t
*dev
,
683 drm_i830_private_t
*dev_priv
= dev
->dev_private
;
687 OUT_RING( GFX_OP_STIPPLE
);
693 static void i830EmitState( drm_device_t
*dev
)
695 drm_i830_private_t
*dev_priv
= dev
->dev_private
;
696 drm_i830_sarea_t
*sarea_priv
= dev_priv
->sarea_priv
;
697 unsigned int dirty
= sarea_priv
->dirty
;
699 DRM_DEBUG("%s %x\n", __FUNCTION__
, dirty
);
701 if (dirty
& I830_UPLOAD_BUFFERS
) {
702 i830EmitDestVerified( dev
, sarea_priv
->BufferState
);
703 sarea_priv
->dirty
&= ~I830_UPLOAD_BUFFERS
;
706 if (dirty
& I830_UPLOAD_CTX
) {
707 i830EmitContextVerified( dev
, sarea_priv
->ContextState
);
708 sarea_priv
->dirty
&= ~I830_UPLOAD_CTX
;
711 if (dirty
& I830_UPLOAD_TEX0
) {
712 i830EmitTexVerified( dev
, sarea_priv
->TexState
[0] );
713 sarea_priv
->dirty
&= ~I830_UPLOAD_TEX0
;
716 if (dirty
& I830_UPLOAD_TEX1
) {
717 i830EmitTexVerified( dev
, sarea_priv
->TexState
[1] );
718 sarea_priv
->dirty
&= ~I830_UPLOAD_TEX1
;
721 if (dirty
& I830_UPLOAD_TEXBLEND0
) {
722 i830EmitTexBlendVerified( dev
, sarea_priv
->TexBlendState
[0],
723 sarea_priv
->TexBlendStateWordsUsed
[0]);
724 sarea_priv
->dirty
&= ~I830_UPLOAD_TEXBLEND0
;
727 if (dirty
& I830_UPLOAD_TEXBLEND1
) {
728 i830EmitTexBlendVerified( dev
, sarea_priv
->TexBlendState
[1],
729 sarea_priv
->TexBlendStateWordsUsed
[1]);
730 sarea_priv
->dirty
&= ~I830_UPLOAD_TEXBLEND1
;
733 if (dirty
& I830_UPLOAD_TEX_PALETTE_SHARED
) {
734 i830EmitTexPalette(dev
, sarea_priv
->Palette
[0], 0, 1);
736 if (dirty
& I830_UPLOAD_TEX_PALETTE_N(0)) {
737 i830EmitTexPalette(dev
, sarea_priv
->Palette
[0], 0, 0);
738 sarea_priv
->dirty
&= ~I830_UPLOAD_TEX_PALETTE_N(0);
740 if (dirty
& I830_UPLOAD_TEX_PALETTE_N(1)) {
741 i830EmitTexPalette(dev
, sarea_priv
->Palette
[1], 1, 0);
742 sarea_priv
->dirty
&= ~I830_UPLOAD_TEX_PALETTE_N(1);
748 if (dirty
& I830_UPLOAD_TEX_PALETTE_N(2)) {
749 i830EmitTexPalette(dev
, sarea_priv
->Palette2
[0], 0, 0);
750 sarea_priv
->dirty
&= ~I830_UPLOAD_TEX_PALETTE_N(2);
752 if (dirty
& I830_UPLOAD_TEX_PALETTE_N(3)) {
753 i830EmitTexPalette(dev
, sarea_priv
->Palette2
[1], 1, 0);
754 sarea_priv
->dirty
&= ~I830_UPLOAD_TEX_PALETTE_N(2);
761 if (dirty
& I830_UPLOAD_STIPPLE
) {
762 i830EmitStippleVerified( dev
,
763 sarea_priv
->StippleState
);
764 sarea_priv
->dirty
&= ~I830_UPLOAD_STIPPLE
;
767 if (dirty
& I830_UPLOAD_TEX2
) {
768 i830EmitTexVerified( dev
, sarea_priv
->TexState2
);
769 sarea_priv
->dirty
&= ~I830_UPLOAD_TEX2
;
772 if (dirty
& I830_UPLOAD_TEX3
) {
773 i830EmitTexVerified( dev
, sarea_priv
->TexState3
);
774 sarea_priv
->dirty
&= ~I830_UPLOAD_TEX3
;
778 if (dirty
& I830_UPLOAD_TEXBLEND2
) {
779 i830EmitTexBlendVerified(
781 sarea_priv
->TexBlendState2
,
782 sarea_priv
->TexBlendStateWordsUsed2
);
784 sarea_priv
->dirty
&= ~I830_UPLOAD_TEXBLEND2
;
787 if (dirty
& I830_UPLOAD_TEXBLEND3
) {
788 i830EmitTexBlendVerified(
790 sarea_priv
->TexBlendState3
,
791 sarea_priv
->TexBlendStateWordsUsed3
);
792 sarea_priv
->dirty
&= ~I830_UPLOAD_TEXBLEND3
;
796 /* ================================================================
797 * Performance monitoring functions
800 static void i830_fill_box( drm_device_t
*dev
,
801 int x
, int y
, int w
, int h
,
802 int r
, int g
, int b
)
804 drm_i830_private_t
*dev_priv
= dev
->dev_private
;
806 unsigned int BR13
, CMD
;
809 BR13
= (0xF0 << 16) | (dev_priv
->pitch
* dev_priv
->cpp
) | (1<<24);
810 CMD
= XY_COLOR_BLT_CMD
;
811 x
+= dev_priv
->sarea_priv
->boxes
[0].x1
;
812 y
+= dev_priv
->sarea_priv
->boxes
[0].y1
;
814 if (dev_priv
->cpp
== 4) {
816 CMD
|= (XY_COLOR_BLT_WRITE_ALPHA
| XY_COLOR_BLT_WRITE_RGB
);
817 color
= (((0xff) << 24) | (r
<< 16) | (g
<< 8) | b
);
819 color
= (((r
& 0xf8) << 8) |
827 OUT_RING( (y
<< 16) | x
);
828 OUT_RING( ((y
+h
) << 16) | (x
+w
) );
830 if ( dev_priv
->current_page
== 1 ) {
831 OUT_RING( dev_priv
->front_offset
);
833 OUT_RING( dev_priv
->back_offset
);
840 static void i830_cp_performance_boxes( drm_device_t
*dev
)
842 drm_i830_private_t
*dev_priv
= dev
->dev_private
;
844 /* Purple box for page flipping
846 if ( dev_priv
->sarea_priv
->perf_boxes
& I830_BOX_FLIP
)
847 i830_fill_box( dev
, 4, 4, 8, 8, 255, 0, 255 );
849 /* Red box if we have to wait for idle at any point
851 if ( dev_priv
->sarea_priv
->perf_boxes
& I830_BOX_WAIT
)
852 i830_fill_box( dev
, 16, 4, 8, 8, 255, 0, 0 );
854 /* Blue box: lost context?
856 if ( dev_priv
->sarea_priv
->perf_boxes
& I830_BOX_LOST_CONTEXT
)
857 i830_fill_box( dev
, 28, 4, 8, 8, 0, 0, 255 );
859 /* Yellow box for texture swaps
861 if ( dev_priv
->sarea_priv
->perf_boxes
& I830_BOX_TEXTURE_LOAD
)
862 i830_fill_box( dev
, 40, 4, 8, 8, 255, 255, 0 );
864 /* Green box if hardware never idles (as far as we can tell)
866 if ( !(dev_priv
->sarea_priv
->perf_boxes
& I830_BOX_RING_EMPTY
) )
867 i830_fill_box( dev
, 64, 4, 8, 8, 0, 255, 0 );
870 /* Draw bars indicating number of buffers allocated
871 * (not a great measure, easily confused)
873 if (dev_priv
->dma_used
) {
874 int bar
= dev_priv
->dma_used
/ 10240;
875 if (bar
> 100) bar
= 100;
876 if (bar
< 1) bar
= 1;
877 i830_fill_box( dev
, 4, 16, bar
, 4, 196, 128, 128 );
878 dev_priv
->dma_used
= 0;
881 dev_priv
->sarea_priv
->perf_boxes
= 0;
884 static void i830_dma_dispatch_clear( drm_device_t
*dev
, int flags
,
885 unsigned int clear_color
,
886 unsigned int clear_zval
,
887 unsigned int clear_depthmask
)
889 drm_i830_private_t
*dev_priv
= dev
->dev_private
;
890 drm_i830_sarea_t
*sarea_priv
= dev_priv
->sarea_priv
;
891 int nbox
= sarea_priv
->nbox
;
892 drm_clip_rect_t
*pbox
= sarea_priv
->boxes
;
893 int pitch
= dev_priv
->pitch
;
894 int cpp
= dev_priv
->cpp
;
896 unsigned int BR13
, CMD
, D_CMD
;
900 if ( dev_priv
->current_page
== 1 ) {
901 unsigned int tmp
= flags
;
903 flags
&= ~(I830_FRONT
| I830_BACK
);
904 if ( tmp
& I830_FRONT
) flags
|= I830_BACK
;
905 if ( tmp
& I830_BACK
) flags
|= I830_FRONT
;
908 i830_kernel_lost_context(dev
);
912 BR13
= (0xF0 << 16) | (pitch
* cpp
) | (1<<24);
913 D_CMD
= CMD
= XY_COLOR_BLT_CMD
;
916 BR13
= (0xF0 << 16) | (pitch
* cpp
) | (1<<24) | (1<<25);
917 CMD
= (XY_COLOR_BLT_CMD
| XY_COLOR_BLT_WRITE_ALPHA
|
918 XY_COLOR_BLT_WRITE_RGB
);
919 D_CMD
= XY_COLOR_BLT_CMD
;
920 if(clear_depthmask
& 0x00ffffff)
921 D_CMD
|= XY_COLOR_BLT_WRITE_RGB
;
922 if(clear_depthmask
& 0xff000000)
923 D_CMD
|= XY_COLOR_BLT_WRITE_ALPHA
;
926 BR13
= (0xF0 << 16) | (pitch
* cpp
) | (1<<24);
927 D_CMD
= CMD
= XY_COLOR_BLT_CMD
;
931 if (nbox
> I830_NR_SAREA_CLIPRECTS
)
932 nbox
= I830_NR_SAREA_CLIPRECTS
;
934 for (i
= 0 ; i
< nbox
; i
++, pbox
++) {
935 if (pbox
->x1
> pbox
->x2
||
936 pbox
->y1
> pbox
->y2
||
937 pbox
->x2
> dev_priv
->w
||
938 pbox
->y2
> dev_priv
->h
)
941 if ( flags
& I830_FRONT
) {
942 DRM_DEBUG("clear front\n");
946 OUT_RING( (pbox
->y1
<< 16) | pbox
->x1
);
947 OUT_RING( (pbox
->y2
<< 16) | pbox
->x2
);
948 OUT_RING( dev_priv
->front_offset
);
949 OUT_RING( clear_color
);
953 if ( flags
& I830_BACK
) {
954 DRM_DEBUG("clear back\n");
958 OUT_RING( (pbox
->y1
<< 16) | pbox
->x1
);
959 OUT_RING( (pbox
->y2
<< 16) | pbox
->x2
);
960 OUT_RING( dev_priv
->back_offset
);
961 OUT_RING( clear_color
);
965 if ( flags
& I830_DEPTH
) {
966 DRM_DEBUG("clear depth\n");
970 OUT_RING( (pbox
->y1
<< 16) | pbox
->x1
);
971 OUT_RING( (pbox
->y2
<< 16) | pbox
->x2
);
972 OUT_RING( dev_priv
->depth_offset
);
973 OUT_RING( clear_zval
);
979 static void i830_dma_dispatch_swap( drm_device_t
*dev
)
981 drm_i830_private_t
*dev_priv
= dev
->dev_private
;
982 drm_i830_sarea_t
*sarea_priv
= dev_priv
->sarea_priv
;
983 int nbox
= sarea_priv
->nbox
;
984 drm_clip_rect_t
*pbox
= sarea_priv
->boxes
;
985 int pitch
= dev_priv
->pitch
;
986 int cpp
= dev_priv
->cpp
;
988 unsigned int CMD
, BR13
;
991 DRM_DEBUG("swapbuffers\n");
993 i830_kernel_lost_context(dev
);
995 if (dev_priv
->do_boxes
)
996 i830_cp_performance_boxes( dev
);
1000 BR13
= (pitch
* cpp
) | (0xCC << 16) | (1<<24);
1001 CMD
= XY_SRC_COPY_BLT_CMD
;
1004 BR13
= (pitch
* cpp
) | (0xCC << 16) | (1<<24) | (1<<25);
1005 CMD
= (XY_SRC_COPY_BLT_CMD
| XY_SRC_COPY_BLT_WRITE_ALPHA
|
1006 XY_SRC_COPY_BLT_WRITE_RGB
);
1009 BR13
= (pitch
* cpp
) | (0xCC << 16) | (1<<24);
1010 CMD
= XY_SRC_COPY_BLT_CMD
;
1015 if (nbox
> I830_NR_SAREA_CLIPRECTS
)
1016 nbox
= I830_NR_SAREA_CLIPRECTS
;
1018 for (i
= 0 ; i
< nbox
; i
++, pbox
++)
1020 if (pbox
->x1
> pbox
->x2
||
1021 pbox
->y1
> pbox
->y2
||
1022 pbox
->x2
> dev_priv
->w
||
1023 pbox
->y2
> dev_priv
->h
)
1026 DRM_DEBUG("dispatch swap %d,%d-%d,%d!\n",
1028 pbox
->x2
, pbox
->y2
);
1033 OUT_RING( (pbox
->y1
<< 16) | pbox
->x1
);
1034 OUT_RING( (pbox
->y2
<< 16) | pbox
->x2
);
1036 if (dev_priv
->current_page
== 0)
1037 OUT_RING( dev_priv
->front_offset
);
1039 OUT_RING( dev_priv
->back_offset
);
1041 OUT_RING( (pbox
->y1
<< 16) | pbox
->x1
);
1042 OUT_RING( BR13
& 0xffff );
1044 if (dev_priv
->current_page
== 0)
1045 OUT_RING( dev_priv
->back_offset
);
1047 OUT_RING( dev_priv
->front_offset
);
1053 static void i830_dma_dispatch_flip( drm_device_t
*dev
)
1055 drm_i830_private_t
*dev_priv
= dev
->dev_private
;
1058 DRM_DEBUG( "%s: page=%d pfCurrentPage=%d\n",
1060 dev_priv
->current_page
,
1061 dev_priv
->sarea_priv
->pf_current_page
);
1063 i830_kernel_lost_context(dev
);
1065 if (dev_priv
->do_boxes
) {
1066 dev_priv
->sarea_priv
->perf_boxes
|= I830_BOX_FLIP
;
1067 i830_cp_performance_boxes( dev
);
1072 OUT_RING( INST_PARSER_CLIENT
| INST_OP_FLUSH
| INST_FLUSH_MAP_CACHE
);
1077 OUT_RING( CMD_OP_DISPLAYBUFFER_INFO
| ASYNC_FLIP
);
1079 if ( dev_priv
->current_page
== 0 ) {
1080 OUT_RING( dev_priv
->back_offset
);
1081 dev_priv
->current_page
= 1;
1083 OUT_RING( dev_priv
->front_offset
);
1084 dev_priv
->current_page
= 0;
1091 OUT_RING( MI_WAIT_FOR_EVENT
|
1092 MI_WAIT_FOR_PLANE_A_FLIP
);
1097 dev_priv
->sarea_priv
->pf_current_page
= dev_priv
->current_page
;
1100 static void i830_dma_dispatch_vertex(drm_device_t
*dev
,
1105 drm_i830_private_t
*dev_priv
= dev
->dev_private
;
1106 drm_i830_buf_priv_t
*buf_priv
= buf
->dev_private
;
1107 drm_i830_sarea_t
*sarea_priv
= dev_priv
->sarea_priv
;
1108 drm_clip_rect_t
*box
= sarea_priv
->boxes
;
1109 int nbox
= sarea_priv
->nbox
;
1110 unsigned long address
= (unsigned long)buf
->bus_address
;
1111 unsigned long start
= address
- dev
->agp
->base
;
1115 i830_kernel_lost_context(dev
);
1117 if (nbox
> I830_NR_SAREA_CLIPRECTS
)
1118 nbox
= I830_NR_SAREA_CLIPRECTS
;
1121 u
= cmpxchg(buf_priv
->in_use
, I830_BUF_CLIENT
,
1123 if(u
!= I830_BUF_CLIENT
) {
1124 DRM_DEBUG("xxxx 2\n");
1131 if (sarea_priv
->dirty
)
1132 i830EmitState( dev
);
1134 DRM_DEBUG("dispatch vertex addr 0x%lx, used 0x%x nbox %d\n",
1135 address
, used
, nbox
);
1137 dev_priv
->counter
++;
1138 DRM_DEBUG( "dispatch counter : %ld\n", dev_priv
->counter
);
1139 DRM_DEBUG( "i830_dma_dispatch\n");
1140 DRM_DEBUG( "start : %lx\n", start
);
1141 DRM_DEBUG( "used : %d\n", used
);
1142 DRM_DEBUG( "start + used - 4 : %ld\n", start
+ used
- 4);
1144 if (buf_priv
->currently_mapped
== I830_BUF_MAPPED
) {
1145 u32
*vp
= buf_priv
->kernel_virtual
;
1147 vp
[0] = (GFX_OP_PRIMITIVE
|
1148 sarea_priv
->vertex_prim
|
1151 if (dev_priv
->use_mi_batchbuffer_start
) {
1152 vp
[used
/4] = MI_BATCH_BUFFER_END
;
1161 i830_unmap_buffer(buf
);
1168 OUT_RING( GFX_OP_DRAWRECT_INFO
);
1169 OUT_RING( sarea_priv
->BufferState
[I830_DESTREG_DR1
] );
1170 OUT_RING( box
[i
].x1
| (box
[i
].y1
<<16) );
1171 OUT_RING( box
[i
].x2
| (box
[i
].y2
<<16) );
1172 OUT_RING( sarea_priv
->BufferState
[I830_DESTREG_DR4
] );
1177 if (dev_priv
->use_mi_batchbuffer_start
) {
1179 OUT_RING( MI_BATCH_BUFFER_START
| (2<<6) );
1180 OUT_RING( start
| MI_BATCH_NON_SECURE
);
1185 OUT_RING( MI_BATCH_BUFFER
);
1186 OUT_RING( start
| MI_BATCH_NON_SECURE
);
1187 OUT_RING( start
+ used
- 4 );
1192 } while (++i
< nbox
);
1196 dev_priv
->counter
++;
1198 (void) cmpxchg(buf_priv
->in_use
, I830_BUF_CLIENT
,
1202 OUT_RING( CMD_STORE_DWORD_IDX
);
1204 OUT_RING( dev_priv
->counter
);
1205 OUT_RING( CMD_STORE_DWORD_IDX
);
1206 OUT_RING( buf_priv
->my_use_idx
);
1207 OUT_RING( I830_BUF_FREE
);
1208 OUT_RING( CMD_REPORT_HEAD
);
1215 static void i830_dma_quiescent(drm_device_t
*dev
)
1217 drm_i830_private_t
*dev_priv
= dev
->dev_private
;
1220 i830_kernel_lost_context(dev
);
1223 OUT_RING( INST_PARSER_CLIENT
| INST_OP_FLUSH
| INST_FLUSH_MAP_CACHE
);
1224 OUT_RING( CMD_REPORT_HEAD
);
1229 i830_wait_ring( dev
, dev_priv
->ring
.Size
- 8, __FUNCTION__
);
1232 static int i830_flush_queue(drm_device_t
*dev
)
1234 drm_i830_private_t
*dev_priv
= dev
->dev_private
;
1235 drm_device_dma_t
*dma
= dev
->dma
;
1239 i830_kernel_lost_context(dev
);
1242 OUT_RING( CMD_REPORT_HEAD
);
1246 i830_wait_ring( dev
, dev_priv
->ring
.Size
- 8, __FUNCTION__
);
1248 for (i
= 0; i
< dma
->buf_count
; i
++) {
1249 drm_buf_t
*buf
= dma
->buflist
[ i
];
1250 drm_i830_buf_priv_t
*buf_priv
= buf
->dev_private
;
1252 int used
= cmpxchg(buf_priv
->in_use
, I830_BUF_HARDWARE
,
1255 if (used
== I830_BUF_HARDWARE
)
1256 DRM_DEBUG("reclaimed from HARDWARE\n");
1257 if (used
== I830_BUF_CLIENT
)
1258 DRM_DEBUG("still on client\n");
1264 /* Must be called with the lock held */
1265 void i830_reclaim_buffers(drm_device_t
*dev
, struct file
*filp
)
1267 drm_device_dma_t
*dma
= dev
->dma
;
1271 if (!dev
->dev_private
) return;
1272 if (!dma
->buflist
) return;
1274 i830_flush_queue(dev
);
1276 for (i
= 0; i
< dma
->buf_count
; i
++) {
1277 drm_buf_t
*buf
= dma
->buflist
[ i
];
1278 drm_i830_buf_priv_t
*buf_priv
= buf
->dev_private
;
1280 if (buf
->filp
== filp
&& buf_priv
) {
1281 int used
= cmpxchg(buf_priv
->in_use
, I830_BUF_CLIENT
,
1284 if (used
== I830_BUF_CLIENT
)
1285 DRM_DEBUG("reclaimed from client\n");
1286 if(buf_priv
->currently_mapped
== I830_BUF_MAPPED
)
1287 buf_priv
->currently_mapped
= I830_BUF_UNMAPPED
;
1292 static int i830_flush_ioctl(struct inode
*inode
, struct file
*filp
,
1293 unsigned int cmd
, unsigned long arg
)
1295 drm_file_t
*priv
= filp
->private_data
;
1296 drm_device_t
*dev
= priv
->head
->dev
;
1298 LOCK_TEST_WITH_RETURN(dev
, filp
);
1300 i830_flush_queue(dev
);
1304 static int i830_dma_vertex(struct inode
*inode
, struct file
*filp
,
1305 unsigned int cmd
, unsigned long arg
)
1307 drm_file_t
*priv
= filp
->private_data
;
1308 drm_device_t
*dev
= priv
->head
->dev
;
1309 drm_device_dma_t
*dma
= dev
->dma
;
1310 drm_i830_private_t
*dev_priv
= (drm_i830_private_t
*)dev
->dev_private
;
1311 u32
*hw_status
= dev_priv
->hw_status_page
;
1312 drm_i830_sarea_t
*sarea_priv
= (drm_i830_sarea_t
*)
1313 dev_priv
->sarea_priv
;
1314 drm_i830_vertex_t vertex
;
1316 if (copy_from_user(&vertex
, (drm_i830_vertex_t __user
*)arg
, sizeof(vertex
)))
1319 LOCK_TEST_WITH_RETURN(dev
, filp
);
1321 DRM_DEBUG("i830 dma vertex, idx %d used %d discard %d\n",
1322 vertex
.idx
, vertex
.used
, vertex
.discard
);
1324 if(vertex
.idx
< 0 || vertex
.idx
> dma
->buf_count
) return -EINVAL
;
1326 i830_dma_dispatch_vertex( dev
,
1327 dma
->buflist
[ vertex
.idx
],
1328 vertex
.discard
, vertex
.used
);
1330 sarea_priv
->last_enqueue
= dev_priv
->counter
-1;
1331 sarea_priv
->last_dispatch
= (int) hw_status
[5];
1336 static int i830_clear_bufs(struct inode
*inode
, struct file
*filp
,
1337 unsigned int cmd
, unsigned long arg
)
1339 drm_file_t
*priv
= filp
->private_data
;
1340 drm_device_t
*dev
= priv
->head
->dev
;
1341 drm_i830_clear_t clear
;
1343 if (copy_from_user(&clear
, (drm_i830_clear_t __user
*)arg
, sizeof(clear
)))
1346 LOCK_TEST_WITH_RETURN(dev
, filp
);
1348 /* GH: Someone's doing nasty things... */
1349 if (!dev
->dev_private
) {
1353 i830_dma_dispatch_clear( dev
, clear
.flags
,
1356 clear
.clear_depthmask
);
1360 static int i830_swap_bufs(struct inode
*inode
, struct file
*filp
,
1361 unsigned int cmd
, unsigned long arg
)
1363 drm_file_t
*priv
= filp
->private_data
;
1364 drm_device_t
*dev
= priv
->head
->dev
;
1366 DRM_DEBUG("i830_swap_bufs\n");
1368 LOCK_TEST_WITH_RETURN(dev
, filp
);
1370 i830_dma_dispatch_swap( dev
);
1376 /* Not sure why this isn't set all the time:
1378 static void i830_do_init_pageflip( drm_device_t
*dev
)
1380 drm_i830_private_t
*dev_priv
= dev
->dev_private
;
1382 DRM_DEBUG("%s\n", __FUNCTION__
);
1383 dev_priv
->page_flipping
= 1;
1384 dev_priv
->current_page
= 0;
1385 dev_priv
->sarea_priv
->pf_current_page
= dev_priv
->current_page
;
1388 static int i830_do_cleanup_pageflip( drm_device_t
*dev
)
1390 drm_i830_private_t
*dev_priv
= dev
->dev_private
;
1392 DRM_DEBUG("%s\n", __FUNCTION__
);
1393 if (dev_priv
->current_page
!= 0)
1394 i830_dma_dispatch_flip( dev
);
1396 dev_priv
->page_flipping
= 0;
1400 static int i830_flip_bufs(struct inode
*inode
, struct file
*filp
,
1401 unsigned int cmd
, unsigned long arg
)
1403 drm_file_t
*priv
= filp
->private_data
;
1404 drm_device_t
*dev
= priv
->head
->dev
;
1405 drm_i830_private_t
*dev_priv
= dev
->dev_private
;
1407 DRM_DEBUG("%s\n", __FUNCTION__
);
1409 LOCK_TEST_WITH_RETURN(dev
, filp
);
1411 if (!dev_priv
->page_flipping
)
1412 i830_do_init_pageflip( dev
);
1414 i830_dma_dispatch_flip( dev
);
1418 static int i830_getage(struct inode
*inode
, struct file
*filp
, unsigned int cmd
,
1421 drm_file_t
*priv
= filp
->private_data
;
1422 drm_device_t
*dev
= priv
->head
->dev
;
1423 drm_i830_private_t
*dev_priv
= (drm_i830_private_t
*)dev
->dev_private
;
1424 u32
*hw_status
= dev_priv
->hw_status_page
;
1425 drm_i830_sarea_t
*sarea_priv
= (drm_i830_sarea_t
*)
1426 dev_priv
->sarea_priv
;
1428 sarea_priv
->last_dispatch
= (int) hw_status
[5];
1432 static int i830_getbuf(struct inode
*inode
, struct file
*filp
, unsigned int cmd
,
1435 drm_file_t
*priv
= filp
->private_data
;
1436 drm_device_t
*dev
= priv
->head
->dev
;
1439 drm_i830_private_t
*dev_priv
= (drm_i830_private_t
*)dev
->dev_private
;
1440 u32
*hw_status
= dev_priv
->hw_status_page
;
1441 drm_i830_sarea_t
*sarea_priv
= (drm_i830_sarea_t
*)
1442 dev_priv
->sarea_priv
;
1444 DRM_DEBUG("getbuf\n");
1445 if (copy_from_user(&d
, (drm_i830_dma_t __user
*)arg
, sizeof(d
)))
1448 LOCK_TEST_WITH_RETURN(dev
, filp
);
1452 retcode
= i830_dma_get_buffer(dev
, &d
, filp
);
1454 DRM_DEBUG("i830_dma: %d returning %d, granted = %d\n",
1455 current
->pid
, retcode
, d
.granted
);
1457 if (copy_to_user((drm_dma_t __user
*)arg
, &d
, sizeof(d
)))
1459 sarea_priv
->last_dispatch
= (int) hw_status
[5];
1464 static int i830_copybuf(struct inode
*inode
,
1465 struct file
*filp
, unsigned int cmd
, unsigned long arg
)
1467 /* Never copy - 2.4.x doesn't need it */
1471 static int i830_docopy(struct inode
*inode
, struct file
*filp
, unsigned int cmd
,
1479 static int i830_getparam( struct inode
*inode
, struct file
*filp
,
1480 unsigned int cmd
, unsigned long arg
)
1482 drm_file_t
*priv
= filp
->private_data
;
1483 drm_device_t
*dev
= priv
->head
->dev
;
1484 drm_i830_private_t
*dev_priv
= dev
->dev_private
;
1485 drm_i830_getparam_t param
;
1489 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__
);
1493 if (copy_from_user(¶m
, (drm_i830_getparam_t __user
*)arg
, sizeof(param
) ))
1496 switch( param
.param
) {
1497 case I830_PARAM_IRQ_ACTIVE
:
1498 value
= dev
->irq_enabled
;
1504 if ( copy_to_user( param
.value
, &value
, sizeof(int) ) ) {
1505 DRM_ERROR( "copy_to_user\n" );
1513 static int i830_setparam( struct inode
*inode
, struct file
*filp
,
1514 unsigned int cmd
, unsigned long arg
)
1516 drm_file_t
*priv
= filp
->private_data
;
1517 drm_device_t
*dev
= priv
->head
->dev
;
1518 drm_i830_private_t
*dev_priv
= dev
->dev_private
;
1519 drm_i830_setparam_t param
;
1522 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__
);
1526 if (copy_from_user(¶m
, (drm_i830_setparam_t __user
*)arg
, sizeof(param
) ))
1529 switch( param
.param
) {
1530 case I830_SETPARAM_USE_MI_BATCHBUFFER_START
:
1531 dev_priv
->use_mi_batchbuffer_start
= param
.value
;
1541 void i830_driver_pretakedown(drm_device_t
*dev
)
1543 i830_dma_cleanup( dev
);
1546 void i830_driver_prerelease(drm_device_t
*dev
, DRMFILE filp
)
1548 if (dev
->dev_private
) {
1549 drm_i830_private_t
*dev_priv
= dev
->dev_private
;
1550 if (dev_priv
->page_flipping
) {
1551 i830_do_cleanup_pageflip(dev
);
1556 void i830_driver_release(drm_device_t
*dev
, struct file
*filp
)
1558 i830_reclaim_buffers(dev
, filp
);
1561 int i830_driver_dma_quiescent(drm_device_t
*dev
)
1563 i830_dma_quiescent( dev
);
1567 drm_ioctl_desc_t i830_ioctls
[] = {
1568 [DRM_IOCTL_NR(DRM_I830_INIT
)] = { i830_dma_init
, 1, 1 },
1569 [DRM_IOCTL_NR(DRM_I830_VERTEX
)] = { i830_dma_vertex
, 1, 0 },
1570 [DRM_IOCTL_NR(DRM_I830_CLEAR
)] = { i830_clear_bufs
, 1, 0 },
1571 [DRM_IOCTL_NR(DRM_I830_FLUSH
)] = { i830_flush_ioctl
, 1, 0 },
1572 [DRM_IOCTL_NR(DRM_I830_GETAGE
)] = { i830_getage
, 1, 0 },
1573 [DRM_IOCTL_NR(DRM_I830_GETBUF
)] = { i830_getbuf
, 1, 0 },
1574 [DRM_IOCTL_NR(DRM_I830_SWAP
)] = { i830_swap_bufs
, 1, 0 },
1575 [DRM_IOCTL_NR(DRM_I830_COPY
)] = { i830_copybuf
, 1, 0 },
1576 [DRM_IOCTL_NR(DRM_I830_DOCOPY
)] = { i830_docopy
, 1, 0 },
1577 [DRM_IOCTL_NR(DRM_I830_FLIP
)] = { i830_flip_bufs
, 1, 0 },
1578 [DRM_IOCTL_NR(DRM_I830_IRQ_EMIT
)] = { i830_irq_emit
, 1, 0 },
1579 [DRM_IOCTL_NR(DRM_I830_IRQ_WAIT
)] = { i830_irq_wait
, 1, 0 },
1580 [DRM_IOCTL_NR(DRM_I830_GETPARAM
)] = { i830_getparam
, 1, 0 },
1581 [DRM_IOCTL_NR(DRM_I830_SETPARAM
)] = { i830_setparam
, 1, 0 }
1584 int i830_max_ioctl
= DRM_ARRAY_SIZE(i830_ioctls
);
1587 * Determine if the device really is AGP or not.
1589 * All Intel graphics chipsets are treated as AGP, even if they are really
1592 * \param dev The device to be tested.
1595 * A value of 1 is always retured to indictate every i8xx is AGP.
1597 int i830_driver_device_is_agp(drm_device_t
* dev
)