3 * Generic buffer template
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
10 * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
12 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
36 #include <linux/vmalloc.h>
39 unsigned long drm_get_resource_start(drm_device_t
*dev
, unsigned int resource
)
41 return pci_resource_start(dev
->pdev
, resource
);
43 EXPORT_SYMBOL(drm_get_resource_start
);
45 unsigned long drm_get_resource_len(drm_device_t
*dev
, unsigned int resource
)
47 return pci_resource_len(dev
->pdev
, resource
);
50 EXPORT_SYMBOL(drm_get_resource_len
);
52 static drm_map_list_t
*drm_find_matching_map(drm_device_t
*dev
,
55 struct list_head
*list
;
57 list_for_each(list
, &dev
->maplist
->head
) {
58 drm_map_list_t
*entry
= list_entry(list
, drm_map_list_t
, head
);
59 if (entry
->map
&& map
->type
== entry
->map
->type
&&
60 entry
->map
->offset
== map
->offset
) {
68 static int drm_map_handle(drm_device_t
*dev
, drm_hash_item_t
*hash
,
69 unsigned long user_token
, int hashed_handle
)
71 int use_hashed_handle
;
72 #if (BITS_PER_LONG == 64)
73 use_hashed_handle
= ((user_token
& 0xFFFFFFFF00000000UL
) || hashed_handle
);
74 #elif (BITS_PER_LONG == 32)
75 use_hashed_handle
= hashed_handle
;
77 #error Unsupported long size. Neither 64 nor 32 bits.
80 if (use_hashed_handle
) {
81 return drm_ht_just_insert_please(&dev
->map_hash
, hash
,
82 user_token
, 32 - PAGE_SHIFT
- 3,
83 PAGE_SHIFT
, DRM_MAP_HASH_OFFSET
);
85 hash
->key
= user_token
;
86 return drm_ht_insert_item(&dev
->map_hash
, hash
);
91 * Ioctl to specify a range of memory that is available for mapping by a non-root process.
93 * \param inode device inode.
94 * \param filp file pointer.
96 * \param arg pointer to a drm_map structure.
97 * \return zero on success or a negative value on error.
99 * Adjusts the memory offset to its absolute value according to the mapping
100 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
101 * applicable and if supported by the kernel.
103 static int drm_addmap_core(drm_device_t
* dev
, unsigned int offset
,
104 unsigned int size
, drm_map_type_t type
,
105 drm_map_flags_t flags
, drm_map_list_t
** maplist
)
108 drm_map_list_t
*list
;
109 drm_dma_handle_t
*dmah
;
110 unsigned long user_token
;
113 map
= drm_alloc(sizeof(*map
), DRM_MEM_MAPS
);
117 map
->offset
= offset
;
122 /* Only allow shared memory to be removable since we only keep enough
123 * book keeping information about shared memory to allow for removal
124 * when processes fork.
126 if ((map
->flags
& _DRM_REMOVABLE
) && map
->type
!= _DRM_SHM
) {
127 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
130 DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n",
131 map
->offset
, map
->size
, map
->type
);
132 if ((map
->offset
& (~PAGE_MASK
)) || (map
->size
& (~PAGE_MASK
))) {
133 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
141 case _DRM_FRAME_BUFFER
:
142 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__)
143 if (map
->offset
+ (map
->size
-1) < map
->offset
||
144 map
->offset
< virt_to_phys(high_memory
)) {
145 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
150 map
->offset
+= dev
->hose
->mem_space
->start
;
152 /* Some drivers preinitialize some maps, without the X Server
153 * needing to be aware of it. Therefore, we just return success
154 * when the server tries to create a duplicate map.
156 list
= drm_find_matching_map(dev
, map
);
158 if (list
->map
->size
!= map
->size
) {
159 DRM_DEBUG("Matching maps of type %d with "
160 "mismatched sizes, (%ld vs %ld)\n",
161 map
->type
, map
->size
,
163 list
->map
->size
= map
->size
;
166 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
171 if (drm_core_has_MTRR(dev
)) {
172 if (map
->type
== _DRM_FRAME_BUFFER
||
173 (map
->flags
& _DRM_WRITE_COMBINING
)) {
174 map
->mtrr
= mtrr_add(map
->offset
, map
->size
,
175 MTRR_TYPE_WRCOMB
, 1);
178 if (map
->type
== _DRM_REGISTERS
)
179 map
->handle
= drm_ioremap(map
->offset
, map
->size
, dev
);
183 map
->handle
= vmalloc_32(map
->size
);
184 DRM_DEBUG("%lu %d %p\n",
185 map
->size
, drm_order(map
->size
), map
->handle
);
187 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
190 map
->offset
= (unsigned long)map
->handle
;
191 if (map
->flags
& _DRM_CONTAINS_LOCK
) {
192 /* Prevent a 2nd X Server from creating a 2nd lock */
193 if (dev
->lock
.hw_lock
!= NULL
) {
195 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
198 dev
->sigdata
.lock
= dev
->lock
.hw_lock
= map
->handle
; /* Pointer to lock */
202 if (drm_core_has_AGP(dev
)) {
204 map
->offset
+= dev
->hose
->mem_space
->start
;
206 map
->offset
+= dev
->agp
->base
;
207 map
->mtrr
= dev
->agp
->agp_mtrr
; /* for getmap */
210 case _DRM_SCATTER_GATHER
:
212 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
215 map
->offset
+= (unsigned long)dev
->sg
->virtual;
217 case _DRM_CONSISTENT
:
218 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
219 * As we're limiting the address to 2^32-1 (or less),
220 * casting it down to 32 bits is no problem, but we
221 * need to point to a 64bit variable first. */
222 dmah
= drm_pci_alloc(dev
, map
->size
, map
->size
, 0xffffffffUL
);
224 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
227 map
->handle
= dmah
->vaddr
;
228 map
->offset
= (unsigned long)dmah
->busaddr
;
232 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
236 list
= drm_alloc(sizeof(*list
), DRM_MEM_MAPS
);
238 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
241 memset(list
, 0, sizeof(*list
));
244 mutex_lock(&dev
->struct_mutex
);
245 list_add(&list
->head
, &dev
->maplist
->head
);
247 /* Assign a 32-bit handle */
248 /* We do it here so that dev->struct_mutex protects the increment */
249 user_token
= (map
->type
== _DRM_SHM
) ? (unsigned long)map
->handle
:
251 ret
= drm_map_handle(dev
, &list
->hash
, user_token
, 0);
253 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
254 drm_free(list
, sizeof(*list
), DRM_MEM_MAPS
);
255 mutex_unlock(&dev
->struct_mutex
);
259 list
->user_token
= list
->hash
.key
;
260 mutex_unlock(&dev
->struct_mutex
);
266 int drm_addmap(drm_device_t
* dev
, unsigned int offset
,
267 unsigned int size
, drm_map_type_t type
,
268 drm_map_flags_t flags
, drm_local_map_t
** map_ptr
)
270 drm_map_list_t
*list
;
273 rc
= drm_addmap_core(dev
, offset
, size
, type
, flags
, &list
);
275 *map_ptr
= list
->map
;
279 EXPORT_SYMBOL(drm_addmap
);
281 int drm_addmap_ioctl(struct inode
*inode
, struct file
*filp
,
282 unsigned int cmd
, unsigned long arg
)
284 drm_file_t
*priv
= filp
->private_data
;
285 drm_device_t
*dev
= priv
->head
->dev
;
287 drm_map_list_t
*maplist
;
288 drm_map_t __user
*argp
= (void __user
*)arg
;
291 if (!(filp
->f_mode
& 3))
292 return -EACCES
; /* Require read/write */
294 if (copy_from_user(&map
, argp
, sizeof(map
))) {
298 if (!(capable(CAP_SYS_ADMIN
) || map
.type
== _DRM_AGP
))
301 err
= drm_addmap_core(dev
, map
.offset
, map
.size
, map
.type
, map
.flags
,
307 if (copy_to_user(argp
, maplist
->map
, sizeof(drm_map_t
)))
310 /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
311 if (put_user((void *)(unsigned long)maplist
->user_token
, &argp
->handle
))
317 * Remove a map private from list and deallocate resources if the mapping
320 * \param inode device inode.
321 * \param filp file pointer.
322 * \param cmd command.
323 * \param arg pointer to a drm_map_t structure.
324 * \return zero on success or a negative value on error.
326 * Searches the map on drm_device::maplist, removes it from the list, see if
327 * its being used, and free any associate resource (such as MTRR's) if it's not
332 int drm_rmmap_locked(drm_device_t
*dev
, drm_local_map_t
*map
)
334 struct list_head
*list
;
335 drm_map_list_t
*r_list
= NULL
;
336 drm_dma_handle_t dmah
;
338 /* Find the list entry for the map and remove it */
339 list_for_each(list
, &dev
->maplist
->head
) {
340 r_list
= list_entry(list
, drm_map_list_t
, head
);
342 if (r_list
->map
== map
) {
344 drm_ht_remove_key(&dev
->map_hash
, r_list
->user_token
);
345 drm_free(list
, sizeof(*list
), DRM_MEM_MAPS
);
350 /* List has wrapped around to the head pointer, or it's empty and we
351 * didn't find anything.
353 if (list
== (&dev
->maplist
->head
)) {
359 drm_ioremapfree(map
->handle
, map
->size
, dev
);
361 case _DRM_FRAME_BUFFER
:
362 if (drm_core_has_MTRR(dev
) && map
->mtrr
>= 0) {
364 retcode
= mtrr_del(map
->mtrr
, map
->offset
, map
->size
);
365 DRM_DEBUG("mtrr_del=%d\n", retcode
);
372 case _DRM_SCATTER_GATHER
:
374 case _DRM_CONSISTENT
:
375 dmah
.vaddr
= map
->handle
;
376 dmah
.busaddr
= map
->offset
;
377 dmah
.size
= map
->size
;
378 __drm_pci_free(dev
, &dmah
);
381 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
386 int drm_rmmap(drm_device_t
*dev
, drm_local_map_t
*map
)
390 mutex_lock(&dev
->struct_mutex
);
391 ret
= drm_rmmap_locked(dev
, map
);
392 mutex_unlock(&dev
->struct_mutex
);
397 /* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
398 * the last close of the device, and this is necessary for cleanup when things
399 * exit uncleanly. Therefore, having userland manually remove mappings seems
400 * like a pointless exercise since they're going away anyway.
402 * One use case might be after addmap is allowed for normal users for SHM and
403 * gets used by drivers that the server doesn't need to care about. This seems
406 int drm_rmmap_ioctl(struct inode
*inode
, struct file
*filp
,
407 unsigned int cmd
, unsigned long arg
)
409 drm_file_t
*priv
= filp
->private_data
;
410 drm_device_t
*dev
= priv
->head
->dev
;
412 drm_local_map_t
*map
= NULL
;
413 struct list_head
*list
;
416 if (copy_from_user(&request
, (drm_map_t __user
*) arg
, sizeof(request
))) {
420 mutex_lock(&dev
->struct_mutex
);
421 list_for_each(list
, &dev
->maplist
->head
) {
422 drm_map_list_t
*r_list
= list_entry(list
, drm_map_list_t
, head
);
425 r_list
->user_token
== (unsigned long)request
.handle
&&
426 r_list
->map
->flags
& _DRM_REMOVABLE
) {
432 /* List has wrapped around to the head pointer, or its empty we didn't
435 if (list
== (&dev
->maplist
->head
)) {
436 mutex_unlock(&dev
->struct_mutex
);
441 mutex_unlock(&dev
->struct_mutex
);
445 /* Register and framebuffer maps are permanent */
446 if ((map
->type
== _DRM_REGISTERS
) || (map
->type
== _DRM_FRAME_BUFFER
)) {
447 mutex_unlock(&dev
->struct_mutex
);
451 ret
= drm_rmmap_locked(dev
, map
);
453 mutex_unlock(&dev
->struct_mutex
);
459 * Cleanup after an error on one of the addbufs() functions.
461 * \param dev DRM device.
462 * \param entry buffer entry where the error occurred.
464 * Frees any pages and buffers associated with the given entry.
466 static void drm_cleanup_buf_error(drm_device_t
* dev
, drm_buf_entry_t
* entry
)
470 if (entry
->seg_count
) {
471 for (i
= 0; i
< entry
->seg_count
; i
++) {
472 if (entry
->seglist
[i
]) {
473 drm_pci_free(dev
, entry
->seglist
[i
]);
476 drm_free(entry
->seglist
,
478 sizeof(*entry
->seglist
), DRM_MEM_SEGS
);
480 entry
->seg_count
= 0;
483 if (entry
->buf_count
) {
484 for (i
= 0; i
< entry
->buf_count
; i
++) {
485 if (entry
->buflist
[i
].dev_private
) {
486 drm_free(entry
->buflist
[i
].dev_private
,
487 entry
->buflist
[i
].dev_priv_size
,
491 drm_free(entry
->buflist
,
493 sizeof(*entry
->buflist
), DRM_MEM_BUFS
);
495 entry
->buf_count
= 0;
501 * Add AGP buffers for DMA transfers.
503 * \param dev drm_device_t to which the buffers are to be added.
504 * \param request pointer to a drm_buf_desc_t describing the request.
505 * \return zero on success or a negative number on failure.
507 * After some sanity checks creates a drm_buf structure for each buffer and
508 * reallocates the buffer list of the same size order to accommodate the new
511 int drm_addbufs_agp(drm_device_t
* dev
, drm_buf_desc_t
* request
)
513 drm_device_dma_t
*dma
= dev
->dma
;
514 drm_buf_entry_t
*entry
;
516 unsigned long offset
;
517 unsigned long agp_offset
;
526 drm_buf_t
**temp_buflist
;
531 count
= request
->count
;
532 order
= drm_order(request
->size
);
535 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
536 ? PAGE_ALIGN(size
) : size
;
537 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
538 total
= PAGE_SIZE
<< page_order
;
541 agp_offset
= dev
->agp
->base
+ request
->agp_start
;
543 DRM_DEBUG("count: %d\n", count
);
544 DRM_DEBUG("order: %d\n", order
);
545 DRM_DEBUG("size: %d\n", size
);
546 DRM_DEBUG("agp_offset: %lx\n", agp_offset
);
547 DRM_DEBUG("alignment: %d\n", alignment
);
548 DRM_DEBUG("page_order: %d\n", page_order
);
549 DRM_DEBUG("total: %d\n", total
);
551 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
553 if (dev
->queue_count
)
554 return -EBUSY
; /* Not while in use */
556 spin_lock(&dev
->count_lock
);
558 spin_unlock(&dev
->count_lock
);
561 atomic_inc(&dev
->buf_alloc
);
562 spin_unlock(&dev
->count_lock
);
564 mutex_lock(&dev
->struct_mutex
);
565 entry
= &dma
->bufs
[order
];
566 if (entry
->buf_count
) {
567 mutex_unlock(&dev
->struct_mutex
);
568 atomic_dec(&dev
->buf_alloc
);
569 return -ENOMEM
; /* May only call once for each order */
572 if (count
< 0 || count
> 4096) {
573 mutex_unlock(&dev
->struct_mutex
);
574 atomic_dec(&dev
->buf_alloc
);
578 entry
->buflist
= drm_alloc(count
* sizeof(*entry
->buflist
),
580 if (!entry
->buflist
) {
581 mutex_unlock(&dev
->struct_mutex
);
582 atomic_dec(&dev
->buf_alloc
);
585 memset(entry
->buflist
, 0, count
* sizeof(*entry
->buflist
));
587 entry
->buf_size
= size
;
588 entry
->page_order
= page_order
;
592 while (entry
->buf_count
< count
) {
593 buf
= &entry
->buflist
[entry
->buf_count
];
594 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
595 buf
->total
= alignment
;
599 buf
->offset
= (dma
->byte_count
+ offset
);
600 buf
->bus_address
= agp_offset
+ offset
;
601 buf
->address
= (void *)(agp_offset
+ offset
);
605 init_waitqueue_head(&buf
->dma_wait
);
608 buf
->dev_priv_size
= dev
->driver
->dev_priv_size
;
609 buf
->dev_private
= drm_alloc(buf
->dev_priv_size
, DRM_MEM_BUFS
);
610 if (!buf
->dev_private
) {
611 /* Set count correctly so we free the proper amount. */
612 entry
->buf_count
= count
;
613 drm_cleanup_buf_error(dev
, entry
);
614 mutex_unlock(&dev
->struct_mutex
);
615 atomic_dec(&dev
->buf_alloc
);
618 memset(buf
->dev_private
, 0, buf
->dev_priv_size
);
620 DRM_DEBUG("buffer %d @ %p\n", entry
->buf_count
, buf
->address
);
624 byte_count
+= PAGE_SIZE
<< page_order
;
627 DRM_DEBUG("byte_count: %d\n", byte_count
);
629 temp_buflist
= drm_realloc(dma
->buflist
,
630 dma
->buf_count
* sizeof(*dma
->buflist
),
631 (dma
->buf_count
+ entry
->buf_count
)
632 * sizeof(*dma
->buflist
), DRM_MEM_BUFS
);
634 /* Free the entry because it isn't valid */
635 drm_cleanup_buf_error(dev
, entry
);
636 mutex_unlock(&dev
->struct_mutex
);
637 atomic_dec(&dev
->buf_alloc
);
640 dma
->buflist
= temp_buflist
;
642 for (i
= 0; i
< entry
->buf_count
; i
++) {
643 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
646 dma
->buf_count
+= entry
->buf_count
;
647 dma
->seg_count
+= entry
->seg_count
;
648 dma
->page_count
+= byte_count
>> PAGE_SHIFT
;
649 dma
->byte_count
+= byte_count
;
651 DRM_DEBUG("dma->buf_count : %d\n", dma
->buf_count
);
652 DRM_DEBUG("entry->buf_count : %d\n", entry
->buf_count
);
654 mutex_unlock(&dev
->struct_mutex
);
656 request
->count
= entry
->buf_count
;
657 request
->size
= size
;
659 dma
->flags
= _DRM_DMA_USE_AGP
;
661 atomic_dec(&dev
->buf_alloc
);
664 EXPORT_SYMBOL(drm_addbufs_agp
);
665 #endif /* __OS_HAS_AGP */
667 int drm_addbufs_pci(drm_device_t
* dev
, drm_buf_desc_t
* request
)
669 drm_device_dma_t
*dma
= dev
->dma
;
675 drm_buf_entry_t
*entry
;
676 drm_dma_handle_t
*dmah
;
679 unsigned long offset
;
683 unsigned long *temp_pagelist
;
684 drm_buf_t
**temp_buflist
;
686 if (!drm_core_check_feature(dev
, DRIVER_PCI_DMA
))
692 if (!capable(CAP_SYS_ADMIN
))
695 count
= request
->count
;
696 order
= drm_order(request
->size
);
699 DRM_DEBUG("count=%d, size=%d (%d), order=%d, queue_count=%d\n",
700 request
->count
, request
->size
, size
, order
, dev
->queue_count
);
702 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
704 if (dev
->queue_count
)
705 return -EBUSY
; /* Not while in use */
707 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
708 ? PAGE_ALIGN(size
) : size
;
709 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
710 total
= PAGE_SIZE
<< page_order
;
712 spin_lock(&dev
->count_lock
);
714 spin_unlock(&dev
->count_lock
);
717 atomic_inc(&dev
->buf_alloc
);
718 spin_unlock(&dev
->count_lock
);
720 mutex_lock(&dev
->struct_mutex
);
721 entry
= &dma
->bufs
[order
];
722 if (entry
->buf_count
) {
723 mutex_unlock(&dev
->struct_mutex
);
724 atomic_dec(&dev
->buf_alloc
);
725 return -ENOMEM
; /* May only call once for each order */
728 if (count
< 0 || count
> 4096) {
729 mutex_unlock(&dev
->struct_mutex
);
730 atomic_dec(&dev
->buf_alloc
);
734 entry
->buflist
= drm_alloc(count
* sizeof(*entry
->buflist
),
736 if (!entry
->buflist
) {
737 mutex_unlock(&dev
->struct_mutex
);
738 atomic_dec(&dev
->buf_alloc
);
741 memset(entry
->buflist
, 0, count
* sizeof(*entry
->buflist
));
743 entry
->seglist
= drm_alloc(count
* sizeof(*entry
->seglist
),
745 if (!entry
->seglist
) {
746 drm_free(entry
->buflist
,
747 count
* sizeof(*entry
->buflist
), DRM_MEM_BUFS
);
748 mutex_unlock(&dev
->struct_mutex
);
749 atomic_dec(&dev
->buf_alloc
);
752 memset(entry
->seglist
, 0, count
* sizeof(*entry
->seglist
));
754 /* Keep the original pagelist until we know all the allocations
757 temp_pagelist
= drm_alloc((dma
->page_count
+ (count
<< page_order
))
758 * sizeof(*dma
->pagelist
), DRM_MEM_PAGES
);
759 if (!temp_pagelist
) {
760 drm_free(entry
->buflist
,
761 count
* sizeof(*entry
->buflist
), DRM_MEM_BUFS
);
762 drm_free(entry
->seglist
,
763 count
* sizeof(*entry
->seglist
), DRM_MEM_SEGS
);
764 mutex_unlock(&dev
->struct_mutex
);
765 atomic_dec(&dev
->buf_alloc
);
768 memcpy(temp_pagelist
,
769 dma
->pagelist
, dma
->page_count
* sizeof(*dma
->pagelist
));
770 DRM_DEBUG("pagelist: %d entries\n",
771 dma
->page_count
+ (count
<< page_order
));
773 entry
->buf_size
= size
;
774 entry
->page_order
= page_order
;
778 while (entry
->buf_count
< count
) {
780 dmah
= drm_pci_alloc(dev
, PAGE_SIZE
<< page_order
, 0x1000, 0xfffffffful
);
783 /* Set count correctly so we free the proper amount. */
784 entry
->buf_count
= count
;
785 entry
->seg_count
= count
;
786 drm_cleanup_buf_error(dev
, entry
);
787 drm_free(temp_pagelist
,
788 (dma
->page_count
+ (count
<< page_order
))
789 * sizeof(*dma
->pagelist
), DRM_MEM_PAGES
);
790 mutex_unlock(&dev
->struct_mutex
);
791 atomic_dec(&dev
->buf_alloc
);
794 entry
->seglist
[entry
->seg_count
++] = dmah
;
795 for (i
= 0; i
< (1 << page_order
); i
++) {
796 DRM_DEBUG("page %d @ 0x%08lx\n",
797 dma
->page_count
+ page_count
,
798 (unsigned long)dmah
->vaddr
+ PAGE_SIZE
* i
);
799 temp_pagelist
[dma
->page_count
+ page_count
++]
800 = (unsigned long)dmah
->vaddr
+ PAGE_SIZE
* i
;
803 offset
+ size
<= total
&& entry
->buf_count
< count
;
804 offset
+= alignment
, ++entry
->buf_count
) {
805 buf
= &entry
->buflist
[entry
->buf_count
];
806 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
807 buf
->total
= alignment
;
810 buf
->offset
= (dma
->byte_count
+ byte_count
+ offset
);
811 buf
->address
= (void *)(dmah
->vaddr
+ offset
);
812 buf
->bus_address
= dmah
->busaddr
+ offset
;
816 init_waitqueue_head(&buf
->dma_wait
);
819 buf
->dev_priv_size
= dev
->driver
->dev_priv_size
;
820 buf
->dev_private
= drm_alloc(buf
->dev_priv_size
,
822 if (!buf
->dev_private
) {
823 /* Set count correctly so we free the proper amount. */
824 entry
->buf_count
= count
;
825 entry
->seg_count
= count
;
826 drm_cleanup_buf_error(dev
, entry
);
827 drm_free(temp_pagelist
,
829 (count
<< page_order
))
830 * sizeof(*dma
->pagelist
),
832 mutex_unlock(&dev
->struct_mutex
);
833 atomic_dec(&dev
->buf_alloc
);
836 memset(buf
->dev_private
, 0, buf
->dev_priv_size
);
838 DRM_DEBUG("buffer %d @ %p\n",
839 entry
->buf_count
, buf
->address
);
841 byte_count
+= PAGE_SIZE
<< page_order
;
844 temp_buflist
= drm_realloc(dma
->buflist
,
845 dma
->buf_count
* sizeof(*dma
->buflist
),
846 (dma
->buf_count
+ entry
->buf_count
)
847 * sizeof(*dma
->buflist
), DRM_MEM_BUFS
);
849 /* Free the entry because it isn't valid */
850 drm_cleanup_buf_error(dev
, entry
);
851 drm_free(temp_pagelist
,
852 (dma
->page_count
+ (count
<< page_order
))
853 * sizeof(*dma
->pagelist
), DRM_MEM_PAGES
);
854 mutex_unlock(&dev
->struct_mutex
);
855 atomic_dec(&dev
->buf_alloc
);
858 dma
->buflist
= temp_buflist
;
860 for (i
= 0; i
< entry
->buf_count
; i
++) {
861 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
864 /* No allocations failed, so now we can replace the orginal pagelist
867 if (dma
->page_count
) {
868 drm_free(dma
->pagelist
,
869 dma
->page_count
* sizeof(*dma
->pagelist
),
872 dma
->pagelist
= temp_pagelist
;
874 dma
->buf_count
+= entry
->buf_count
;
875 dma
->seg_count
+= entry
->seg_count
;
876 dma
->page_count
+= entry
->seg_count
<< page_order
;
877 dma
->byte_count
+= PAGE_SIZE
* (entry
->seg_count
<< page_order
);
879 mutex_unlock(&dev
->struct_mutex
);
881 request
->count
= entry
->buf_count
;
882 request
->size
= size
;
884 atomic_dec(&dev
->buf_alloc
);
888 EXPORT_SYMBOL(drm_addbufs_pci
);
890 static int drm_addbufs_sg(drm_device_t
* dev
, drm_buf_desc_t
* request
)
892 drm_device_dma_t
*dma
= dev
->dma
;
893 drm_buf_entry_t
*entry
;
895 unsigned long offset
;
896 unsigned long agp_offset
;
905 drm_buf_t
**temp_buflist
;
907 if (!drm_core_check_feature(dev
, DRIVER_SG
))
913 if (!capable(CAP_SYS_ADMIN
))
916 count
= request
->count
;
917 order
= drm_order(request
->size
);
920 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
921 ? PAGE_ALIGN(size
) : size
;
922 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
923 total
= PAGE_SIZE
<< page_order
;
926 agp_offset
= request
->agp_start
;
928 DRM_DEBUG("count: %d\n", count
);
929 DRM_DEBUG("order: %d\n", order
);
930 DRM_DEBUG("size: %d\n", size
);
931 DRM_DEBUG("agp_offset: %lu\n", agp_offset
);
932 DRM_DEBUG("alignment: %d\n", alignment
);
933 DRM_DEBUG("page_order: %d\n", page_order
);
934 DRM_DEBUG("total: %d\n", total
);
936 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
938 if (dev
->queue_count
)
939 return -EBUSY
; /* Not while in use */
941 spin_lock(&dev
->count_lock
);
943 spin_unlock(&dev
->count_lock
);
946 atomic_inc(&dev
->buf_alloc
);
947 spin_unlock(&dev
->count_lock
);
949 mutex_lock(&dev
->struct_mutex
);
950 entry
= &dma
->bufs
[order
];
951 if (entry
->buf_count
) {
952 mutex_unlock(&dev
->struct_mutex
);
953 atomic_dec(&dev
->buf_alloc
);
954 return -ENOMEM
; /* May only call once for each order */
957 if (count
< 0 || count
> 4096) {
958 mutex_unlock(&dev
->struct_mutex
);
959 atomic_dec(&dev
->buf_alloc
);
963 entry
->buflist
= drm_alloc(count
* sizeof(*entry
->buflist
),
965 if (!entry
->buflist
) {
966 mutex_unlock(&dev
->struct_mutex
);
967 atomic_dec(&dev
->buf_alloc
);
970 memset(entry
->buflist
, 0, count
* sizeof(*entry
->buflist
));
972 entry
->buf_size
= size
;
973 entry
->page_order
= page_order
;
977 while (entry
->buf_count
< count
) {
978 buf
= &entry
->buflist
[entry
->buf_count
];
979 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
980 buf
->total
= alignment
;
984 buf
->offset
= (dma
->byte_count
+ offset
);
985 buf
->bus_address
= agp_offset
+ offset
;
986 buf
->address
= (void *)(agp_offset
+ offset
987 + (unsigned long)dev
->sg
->virtual);
991 init_waitqueue_head(&buf
->dma_wait
);
994 buf
->dev_priv_size
= dev
->driver
->dev_priv_size
;
995 buf
->dev_private
= drm_alloc(buf
->dev_priv_size
, DRM_MEM_BUFS
);
996 if (!buf
->dev_private
) {
997 /* Set count correctly so we free the proper amount. */
998 entry
->buf_count
= count
;
999 drm_cleanup_buf_error(dev
, entry
);
1000 mutex_unlock(&dev
->struct_mutex
);
1001 atomic_dec(&dev
->buf_alloc
);
1005 memset(buf
->dev_private
, 0, buf
->dev_priv_size
);
1007 DRM_DEBUG("buffer %d @ %p\n", entry
->buf_count
, buf
->address
);
1009 offset
+= alignment
;
1011 byte_count
+= PAGE_SIZE
<< page_order
;
1014 DRM_DEBUG("byte_count: %d\n", byte_count
);
1016 temp_buflist
= drm_realloc(dma
->buflist
,
1017 dma
->buf_count
* sizeof(*dma
->buflist
),
1018 (dma
->buf_count
+ entry
->buf_count
)
1019 * sizeof(*dma
->buflist
), DRM_MEM_BUFS
);
1020 if (!temp_buflist
) {
1021 /* Free the entry because it isn't valid */
1022 drm_cleanup_buf_error(dev
, entry
);
1023 mutex_unlock(&dev
->struct_mutex
);
1024 atomic_dec(&dev
->buf_alloc
);
1027 dma
->buflist
= temp_buflist
;
1029 for (i
= 0; i
< entry
->buf_count
; i
++) {
1030 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
1033 dma
->buf_count
+= entry
->buf_count
;
1034 dma
->seg_count
+= entry
->seg_count
;
1035 dma
->page_count
+= byte_count
>> PAGE_SHIFT
;
1036 dma
->byte_count
+= byte_count
;
1038 DRM_DEBUG("dma->buf_count : %d\n", dma
->buf_count
);
1039 DRM_DEBUG("entry->buf_count : %d\n", entry
->buf_count
);
1041 mutex_unlock(&dev
->struct_mutex
);
1043 request
->count
= entry
->buf_count
;
1044 request
->size
= size
;
1046 dma
->flags
= _DRM_DMA_USE_SG
;
1048 atomic_dec(&dev
->buf_alloc
);
1052 static int drm_addbufs_fb(drm_device_t
* dev
, drm_buf_desc_t
* request
)
1054 drm_device_dma_t
*dma
= dev
->dma
;
1055 drm_buf_entry_t
*entry
;
1057 unsigned long offset
;
1058 unsigned long agp_offset
;
1067 drm_buf_t
**temp_buflist
;
1069 if (!drm_core_check_feature(dev
, DRIVER_FB_DMA
))
1075 if (!capable(CAP_SYS_ADMIN
))
1078 count
= request
->count
;
1079 order
= drm_order(request
->size
);
1082 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
1083 ? PAGE_ALIGN(size
) : size
;
1084 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
1085 total
= PAGE_SIZE
<< page_order
;
1088 agp_offset
= request
->agp_start
;
1090 DRM_DEBUG("count: %d\n", count
);
1091 DRM_DEBUG("order: %d\n", order
);
1092 DRM_DEBUG("size: %d\n", size
);
1093 DRM_DEBUG("agp_offset: %lu\n", agp_offset
);
1094 DRM_DEBUG("alignment: %d\n", alignment
);
1095 DRM_DEBUG("page_order: %d\n", page_order
);
1096 DRM_DEBUG("total: %d\n", total
);
1098 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
1100 if (dev
->queue_count
)
1101 return -EBUSY
; /* Not while in use */
1103 spin_lock(&dev
->count_lock
);
1105 spin_unlock(&dev
->count_lock
);
1108 atomic_inc(&dev
->buf_alloc
);
1109 spin_unlock(&dev
->count_lock
);
1111 mutex_lock(&dev
->struct_mutex
);
1112 entry
= &dma
->bufs
[order
];
1113 if (entry
->buf_count
) {
1114 mutex_unlock(&dev
->struct_mutex
);
1115 atomic_dec(&dev
->buf_alloc
);
1116 return -ENOMEM
; /* May only call once for each order */
1119 if (count
< 0 || count
> 4096) {
1120 mutex_unlock(&dev
->struct_mutex
);
1121 atomic_dec(&dev
->buf_alloc
);
1125 entry
->buflist
= drm_alloc(count
* sizeof(*entry
->buflist
),
1127 if (!entry
->buflist
) {
1128 mutex_unlock(&dev
->struct_mutex
);
1129 atomic_dec(&dev
->buf_alloc
);
1132 memset(entry
->buflist
, 0, count
* sizeof(*entry
->buflist
));
1134 entry
->buf_size
= size
;
1135 entry
->page_order
= page_order
;
1139 while (entry
->buf_count
< count
) {
1140 buf
= &entry
->buflist
[entry
->buf_count
];
1141 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
1142 buf
->total
= alignment
;
1146 buf
->offset
= (dma
->byte_count
+ offset
);
1147 buf
->bus_address
= agp_offset
+ offset
;
1148 buf
->address
= (void *)(agp_offset
+ offset
);
1152 init_waitqueue_head(&buf
->dma_wait
);
1155 buf
->dev_priv_size
= dev
->driver
->dev_priv_size
;
1156 buf
->dev_private
= drm_alloc(buf
->dev_priv_size
, DRM_MEM_BUFS
);
1157 if (!buf
->dev_private
) {
1158 /* Set count correctly so we free the proper amount. */
1159 entry
->buf_count
= count
;
1160 drm_cleanup_buf_error(dev
, entry
);
1161 mutex_unlock(&dev
->struct_mutex
);
1162 atomic_dec(&dev
->buf_alloc
);
1165 memset(buf
->dev_private
, 0, buf
->dev_priv_size
);
1167 DRM_DEBUG("buffer %d @ %p\n", entry
->buf_count
, buf
->address
);
1169 offset
+= alignment
;
1171 byte_count
+= PAGE_SIZE
<< page_order
;
1174 DRM_DEBUG("byte_count: %d\n", byte_count
);
1176 temp_buflist
= drm_realloc(dma
->buflist
,
1177 dma
->buf_count
* sizeof(*dma
->buflist
),
1178 (dma
->buf_count
+ entry
->buf_count
)
1179 * sizeof(*dma
->buflist
), DRM_MEM_BUFS
);
1180 if (!temp_buflist
) {
1181 /* Free the entry because it isn't valid */
1182 drm_cleanup_buf_error(dev
, entry
);
1183 mutex_unlock(&dev
->struct_mutex
);
1184 atomic_dec(&dev
->buf_alloc
);
1187 dma
->buflist
= temp_buflist
;
1189 for (i
= 0; i
< entry
->buf_count
; i
++) {
1190 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
1193 dma
->buf_count
+= entry
->buf_count
;
1194 dma
->seg_count
+= entry
->seg_count
;
1195 dma
->page_count
+= byte_count
>> PAGE_SHIFT
;
1196 dma
->byte_count
+= byte_count
;
1198 DRM_DEBUG("dma->buf_count : %d\n", dma
->buf_count
);
1199 DRM_DEBUG("entry->buf_count : %d\n", entry
->buf_count
);
1201 mutex_unlock(&dev
->struct_mutex
);
1203 request
->count
= entry
->buf_count
;
1204 request
->size
= size
;
1206 dma
->flags
= _DRM_DMA_USE_FB
;
1208 atomic_dec(&dev
->buf_alloc
);
1214 * Add buffers for DMA transfers (ioctl).
1216 * \param inode device inode.
1217 * \param filp file pointer.
1218 * \param cmd command.
1219 * \param arg pointer to a drm_buf_desc_t request.
1220 * \return zero on success or a negative number on failure.
1222 * According with the memory type specified in drm_buf_desc::flags and the
1223 * build options, it dispatches the call either to addbufs_agp(),
1224 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1225 * PCI memory respectively.
1227 int drm_addbufs(struct inode
*inode
, struct file
*filp
,
1228 unsigned int cmd
, unsigned long arg
)
1230 drm_buf_desc_t request
;
1231 drm_file_t
*priv
= filp
->private_data
;
1232 drm_device_t
*dev
= priv
->head
->dev
;
1235 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1238 if (copy_from_user(&request
, (drm_buf_desc_t __user
*) arg
,
1243 if (request
.flags
& _DRM_AGP_BUFFER
)
1244 ret
= drm_addbufs_agp(dev
, &request
);
1247 if (request
.flags
& _DRM_SG_BUFFER
)
1248 ret
= drm_addbufs_sg(dev
, &request
);
1249 else if (request
.flags
& _DRM_FB_BUFFER
)
1250 ret
= drm_addbufs_fb(dev
, &request
);
1252 ret
= drm_addbufs_pci(dev
, &request
);
1255 if (copy_to_user((void __user
*)arg
, &request
, sizeof(request
))) {
1263 * Get information about the buffer mappings.
1265 * This was originally mean for debugging purposes, or by a sophisticated
1266 * client library to determine how best to use the available buffers (e.g.,
1267 * large buffers can be used for image transfer).
1269 * \param inode device inode.
1270 * \param filp file pointer.
1271 * \param cmd command.
1272 * \param arg pointer to a drm_buf_info structure.
1273 * \return zero on success or a negative number on failure.
1275 * Increments drm_device::buf_use while holding the drm_device::count_lock
1276 * lock, preventing of allocating more buffers after this call. Information
1277 * about each requested buffer is then copied into user space.
1279 int drm_infobufs(struct inode
*inode
, struct file
*filp
,
1280 unsigned int cmd
, unsigned long arg
)
1282 drm_file_t
*priv
= filp
->private_data
;
1283 drm_device_t
*dev
= priv
->head
->dev
;
1284 drm_device_dma_t
*dma
= dev
->dma
;
1285 drm_buf_info_t request
;
1286 drm_buf_info_t __user
*argp
= (void __user
*)arg
;
1290 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1296 spin_lock(&dev
->count_lock
);
1297 if (atomic_read(&dev
->buf_alloc
)) {
1298 spin_unlock(&dev
->count_lock
);
1301 ++dev
->buf_use
; /* Can't allocate more after this call */
1302 spin_unlock(&dev
->count_lock
);
1304 if (copy_from_user(&request
, argp
, sizeof(request
)))
1307 for (i
= 0, count
= 0; i
< DRM_MAX_ORDER
+ 1; i
++) {
1308 if (dma
->bufs
[i
].buf_count
)
1312 DRM_DEBUG("count = %d\n", count
);
1314 if (request
.count
>= count
) {
1315 for (i
= 0, count
= 0; i
< DRM_MAX_ORDER
+ 1; i
++) {
1316 if (dma
->bufs
[i
].buf_count
) {
1317 drm_buf_desc_t __user
*to
=
1318 &request
.list
[count
];
1319 drm_buf_entry_t
*from
= &dma
->bufs
[i
];
1320 drm_freelist_t
*list
= &dma
->bufs
[i
].freelist
;
1321 if (copy_to_user(&to
->count
,
1323 sizeof(from
->buf_count
)) ||
1324 copy_to_user(&to
->size
,
1326 sizeof(from
->buf_size
)) ||
1327 copy_to_user(&to
->low_mark
,
1329 sizeof(list
->low_mark
)) ||
1330 copy_to_user(&to
->high_mark
,
1332 sizeof(list
->high_mark
)))
1335 DRM_DEBUG("%d %d %d %d %d\n",
1337 dma
->bufs
[i
].buf_count
,
1338 dma
->bufs
[i
].buf_size
,
1339 dma
->bufs
[i
].freelist
.low_mark
,
1340 dma
->bufs
[i
].freelist
.high_mark
);
1345 request
.count
= count
;
1347 if (copy_to_user(argp
, &request
, sizeof(request
)))
1354 * Specifies a low and high water mark for buffer allocation
1356 * \param inode device inode.
1357 * \param filp file pointer.
1358 * \param cmd command.
1359 * \param arg a pointer to a drm_buf_desc structure.
1360 * \return zero on success or a negative number on failure.
1362 * Verifies that the size order is bounded between the admissible orders and
1363 * updates the respective drm_device_dma::bufs entry low and high water mark.
1365 * \note This ioctl is deprecated and mostly never used.
1367 int drm_markbufs(struct inode
*inode
, struct file
*filp
,
1368 unsigned int cmd
, unsigned long arg
)
1370 drm_file_t
*priv
= filp
->private_data
;
1371 drm_device_t
*dev
= priv
->head
->dev
;
1372 drm_device_dma_t
*dma
= dev
->dma
;
1373 drm_buf_desc_t request
;
1375 drm_buf_entry_t
*entry
;
1377 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1383 if (copy_from_user(&request
,
1384 (drm_buf_desc_t __user
*) arg
, sizeof(request
)))
1387 DRM_DEBUG("%d, %d, %d\n",
1388 request
.size
, request
.low_mark
, request
.high_mark
);
1389 order
= drm_order(request
.size
);
1390 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
1392 entry
= &dma
->bufs
[order
];
1394 if (request
.low_mark
< 0 || request
.low_mark
> entry
->buf_count
)
1396 if (request
.high_mark
< 0 || request
.high_mark
> entry
->buf_count
)
1399 entry
->freelist
.low_mark
= request
.low_mark
;
1400 entry
->freelist
.high_mark
= request
.high_mark
;
1406 * Unreserve the buffers in list, previously reserved using drmDMA.
1408 * \param inode device inode.
1409 * \param filp file pointer.
1410 * \param cmd command.
1411 * \param arg pointer to a drm_buf_free structure.
1412 * \return zero on success or a negative number on failure.
1414 * Calls free_buffer() for each used buffer.
1415 * This function is primarily used for debugging.
1417 int drm_freebufs(struct inode
*inode
, struct file
*filp
,
1418 unsigned int cmd
, unsigned long arg
)
1420 drm_file_t
*priv
= filp
->private_data
;
1421 drm_device_t
*dev
= priv
->head
->dev
;
1422 drm_device_dma_t
*dma
= dev
->dma
;
1423 drm_buf_free_t request
;
1428 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1434 if (copy_from_user(&request
,
1435 (drm_buf_free_t __user
*) arg
, sizeof(request
)))
1438 DRM_DEBUG("%d\n", request
.count
);
1439 for (i
= 0; i
< request
.count
; i
++) {
1440 if (copy_from_user(&idx
, &request
.list
[i
], sizeof(idx
)))
1442 if (idx
< 0 || idx
>= dma
->buf_count
) {
1443 DRM_ERROR("Index %d (of %d max)\n",
1444 idx
, dma
->buf_count
- 1);
1447 buf
= dma
->buflist
[idx
];
1448 if (buf
->filp
!= filp
) {
1449 DRM_ERROR("Process %d freeing buffer not owned\n",
1453 drm_free_buffer(dev
, buf
);
1460 * Maps all of the DMA buffers into client-virtual space (ioctl).
1462 * \param inode device inode.
1463 * \param filp file pointer.
1464 * \param cmd command.
1465 * \param arg pointer to a drm_buf_map structure.
1466 * \return zero on success or a negative number on failure.
1468 * Maps the AGP or SG buffer region with do_mmap(), and copies information
1469 * about each buffer into user space. The PCI buffers are already mapped on the
1470 * addbufs_pci() call.
1472 int drm_mapbufs(struct inode
*inode
, struct file
*filp
,
1473 unsigned int cmd
, unsigned long arg
)
1475 drm_file_t
*priv
= filp
->private_data
;
1476 drm_device_t
*dev
= priv
->head
->dev
;
1477 drm_device_dma_t
*dma
= dev
->dma
;
1478 drm_buf_map_t __user
*argp
= (void __user
*)arg
;
1481 unsigned long virtual;
1482 unsigned long address
;
1483 drm_buf_map_t request
;
1486 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1492 spin_lock(&dev
->count_lock
);
1493 if (atomic_read(&dev
->buf_alloc
)) {
1494 spin_unlock(&dev
->count_lock
);
1497 dev
->buf_use
++; /* Can't allocate more after this call */
1498 spin_unlock(&dev
->count_lock
);
1500 if (copy_from_user(&request
, argp
, sizeof(request
)))
1503 if (request
.count
>= dma
->buf_count
) {
1504 if ((drm_core_has_AGP(dev
) && (dma
->flags
& _DRM_DMA_USE_AGP
))
1505 || (drm_core_check_feature(dev
, DRIVER_SG
)
1506 && (dma
->flags
& _DRM_DMA_USE_SG
))
1507 || (drm_core_check_feature(dev
, DRIVER_FB_DMA
)
1508 && (dma
->flags
& _DRM_DMA_USE_FB
))) {
1509 drm_map_t
*map
= dev
->agp_buffer_map
;
1510 unsigned long token
= dev
->agp_buffer_token
;
1517 down_write(¤t
->mm
->mmap_sem
);
1518 virtual = do_mmap(filp
, 0, map
->size
,
1519 PROT_READ
| PROT_WRITE
,
1521 up_write(¤t
->mm
->mmap_sem
);
1523 down_write(¤t
->mm
->mmap_sem
);
1524 virtual = do_mmap(filp
, 0, dma
->byte_count
,
1525 PROT_READ
| PROT_WRITE
,
1527 up_write(¤t
->mm
->mmap_sem
);
1529 if (virtual > -1024UL) {
1531 retcode
= (signed long)virtual;
1534 request
.virtual = (void __user
*)virtual;
1536 for (i
= 0; i
< dma
->buf_count
; i
++) {
1537 if (copy_to_user(&request
.list
[i
].idx
,
1538 &dma
->buflist
[i
]->idx
,
1539 sizeof(request
.list
[0].idx
))) {
1543 if (copy_to_user(&request
.list
[i
].total
,
1544 &dma
->buflist
[i
]->total
,
1545 sizeof(request
.list
[0].total
))) {
1549 if (copy_to_user(&request
.list
[i
].used
,
1550 &zero
, sizeof(zero
))) {
1554 address
= virtual + dma
->buflist
[i
]->offset
; /* *** */
1555 if (copy_to_user(&request
.list
[i
].address
,
1556 &address
, sizeof(address
))) {
1563 request
.count
= dma
->buf_count
;
1564 DRM_DEBUG("%d buffers, retcode = %d\n", request
.count
, retcode
);
1566 if (copy_to_user(argp
, &request
, sizeof(request
)))
1573 * Compute size order. Returns the exponent of the smaller power of two which
1574 * is greater or equal to given number.
1579 * \todo Can be made faster.
1581 int drm_order(unsigned long size
)
1586 for (order
= 0, tmp
= size
>> 1; tmp
; tmp
>>= 1, order
++) ;
1588 if (size
& (size
- 1))
1593 EXPORT_SYMBOL(drm_order
);
This page took 0.089356 seconds and 6 git commands to generate.