drm: GEM mmap support
[deliverable/linux.git] / drivers / gpu / drm / drm_bufs.c
1 /**
2 * \file drm_bufs.c
3 * Generic buffer template
4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
7 */
8
9 /*
10 * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
11 *
12 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
22 *
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
25 * Software.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
34 */
35
36 #include <linux/vmalloc.h>
37 #include "drmP.h"
38
39 unsigned long drm_get_resource_start(struct drm_device *dev, unsigned int resource)
40 {
41 return pci_resource_start(dev->pdev, resource);
42 }
43 EXPORT_SYMBOL(drm_get_resource_start);
44
45 unsigned long drm_get_resource_len(struct drm_device *dev, unsigned int resource)
46 {
47 return pci_resource_len(dev->pdev, resource);
48 }
49
50 EXPORT_SYMBOL(drm_get_resource_len);
51
52 static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
53 drm_local_map_t *map)
54 {
55 struct drm_map_list *entry;
56 list_for_each_entry(entry, &dev->maplist, head) {
57 if (entry->map && (entry->master == dev->primary->master) && (map->type == entry->map->type) &&
58 ((entry->map->offset == map->offset) ||
59 ((map->type == _DRM_SHM) && (map->flags&_DRM_CONTAINS_LOCK)))) {
60 return entry;
61 }
62 }
63
64 return NULL;
65 }
66
67 static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
68 unsigned long user_token, int hashed_handle)
69 {
70 int use_hashed_handle;
71 #if (BITS_PER_LONG == 64)
72 use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle);
73 #elif (BITS_PER_LONG == 32)
74 use_hashed_handle = hashed_handle;
75 #else
76 #error Unsupported long size. Neither 64 nor 32 bits.
77 #endif
78
79 if (!use_hashed_handle) {
80 int ret;
81 hash->key = user_token >> PAGE_SHIFT;
82 ret = drm_ht_insert_item(&dev->map_hash, hash);
83 if (ret != -EINVAL)
84 return ret;
85 }
86 return drm_ht_just_insert_please(&dev->map_hash, hash,
87 user_token, 32 - PAGE_SHIFT - 3,
88 0, DRM_MAP_HASH_OFFSET >> PAGE_SHIFT);
89 }
90
91 /**
92 * Ioctl to specify a range of memory that is available for mapping by a non-root process.
93 *
94 * \param inode device inode.
95 * \param file_priv DRM file private.
96 * \param cmd command.
97 * \param arg pointer to a drm_map structure.
98 * \return zero on success or a negative value on error.
99 *
100 * Adjusts the memory offset to its absolute value according to the mapping
101 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
102 * applicable and if supported by the kernel.
103 */
104 static int drm_addmap_core(struct drm_device * dev, unsigned int offset,
105 unsigned int size, enum drm_map_type type,
106 enum drm_map_flags flags,
107 struct drm_map_list ** maplist)
108 {
109 struct drm_map *map;
110 struct drm_map_list *list;
111 drm_dma_handle_t *dmah;
112 unsigned long user_token;
113 int ret;
114
115 map = drm_alloc(sizeof(*map), DRM_MEM_MAPS);
116 if (!map)
117 return -ENOMEM;
118
119 map->offset = offset;
120 map->size = size;
121 map->flags = flags;
122 map->type = type;
123
124 /* Only allow shared memory to be removable since we only keep enough
125 * book keeping information about shared memory to allow for removal
126 * when processes fork.
127 */
128 if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
129 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
130 return -EINVAL;
131 }
132 DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n",
133 map->offset, map->size, map->type);
134 if ((map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
135 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
136 return -EINVAL;
137 }
138 map->mtrr = -1;
139 map->handle = NULL;
140
141 switch (map->type) {
142 case _DRM_REGISTERS:
143 case _DRM_FRAME_BUFFER:
144 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__)
145 if (map->offset + (map->size-1) < map->offset ||
146 map->offset < virt_to_phys(high_memory)) {
147 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
148 return -EINVAL;
149 }
150 #endif
151 #ifdef __alpha__
152 map->offset += dev->hose->mem_space->start;
153 #endif
154 /* Some drivers preinitialize some maps, without the X Server
155 * needing to be aware of it. Therefore, we just return success
156 * when the server tries to create a duplicate map.
157 */
158 list = drm_find_matching_map(dev, map);
159 if (list != NULL) {
160 if (list->map->size != map->size) {
161 DRM_DEBUG("Matching maps of type %d with "
162 "mismatched sizes, (%ld vs %ld)\n",
163 map->type, map->size,
164 list->map->size);
165 list->map->size = map->size;
166 }
167
168 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
169 *maplist = list;
170 return 0;
171 }
172
173 if (drm_core_has_MTRR(dev)) {
174 if (map->type == _DRM_FRAME_BUFFER ||
175 (map->flags & _DRM_WRITE_COMBINING)) {
176 map->mtrr = mtrr_add(map->offset, map->size,
177 MTRR_TYPE_WRCOMB, 1);
178 }
179 }
180 if (map->type == _DRM_REGISTERS) {
181 map->handle = ioremap(map->offset, map->size);
182 if (!map->handle) {
183 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
184 return -ENOMEM;
185 }
186 }
187
188 break;
189 case _DRM_SHM:
190 list = drm_find_matching_map(dev, map);
191 if (list != NULL) {
192 if(list->map->size != map->size) {
193 DRM_DEBUG("Matching maps of type %d with "
194 "mismatched sizes, (%ld vs %ld)\n",
195 map->type, map->size, list->map->size);
196 list->map->size = map->size;
197 }
198
199 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
200 *maplist = list;
201 return 0;
202 }
203 map->handle = vmalloc_user(map->size);
204 DRM_DEBUG("%lu %d %p\n",
205 map->size, drm_order(map->size), map->handle);
206 if (!map->handle) {
207 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
208 return -ENOMEM;
209 }
210 map->offset = (unsigned long)map->handle;
211 if (map->flags & _DRM_CONTAINS_LOCK) {
212 /* Prevent a 2nd X Server from creating a 2nd lock */
213 if (dev->primary->master->lock.hw_lock != NULL) {
214 vfree(map->handle);
215 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
216 return -EBUSY;
217 }
218 dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle; /* Pointer to lock */
219 }
220 break;
221 case _DRM_AGP: {
222 struct drm_agp_mem *entry;
223 int valid = 0;
224
225 if (!drm_core_has_AGP(dev)) {
226 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
227 return -EINVAL;
228 }
229 #ifdef __alpha__
230 map->offset += dev->hose->mem_space->start;
231 #endif
232 /* In some cases (i810 driver), user space may have already
233 * added the AGP base itself, because dev->agp->base previously
234 * only got set during AGP enable. So, only add the base
235 * address if the map's offset isn't already within the
236 * aperture.
237 */
238 if (map->offset < dev->agp->base ||
239 map->offset > dev->agp->base +
240 dev->agp->agp_info.aper_size * 1024 * 1024 - 1) {
241 map->offset += dev->agp->base;
242 }
243 map->mtrr = dev->agp->agp_mtrr; /* for getmap */
244
245 /* This assumes the DRM is in total control of AGP space.
246 * It's not always the case as AGP can be in the control
247 * of user space (i.e. i810 driver). So this loop will get
248 * skipped and we double check that dev->agp->memory is
249 * actually set as well as being invalid before EPERM'ing
250 */
251 list_for_each_entry(entry, &dev->agp->memory, head) {
252 if ((map->offset >= entry->bound) &&
253 (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
254 valid = 1;
255 break;
256 }
257 }
258 if (!list_empty(&dev->agp->memory) && !valid) {
259 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
260 return -EPERM;
261 }
262 DRM_DEBUG("AGP offset = 0x%08lx, size = 0x%08lx\n", map->offset, map->size);
263
264 break;
265 case _DRM_GEM:
266 DRM_ERROR("tried to rmmap GEM object\n");
267 break;
268 }
269 case _DRM_SCATTER_GATHER:
270 if (!dev->sg) {
271 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
272 return -EINVAL;
273 }
274 map->offset += (unsigned long)dev->sg->virtual;
275 break;
276 case _DRM_CONSISTENT:
277 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
278 * As we're limiting the address to 2^32-1 (or less),
279 * casting it down to 32 bits is no problem, but we
280 * need to point to a 64bit variable first. */
281 dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL);
282 if (!dmah) {
283 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
284 return -ENOMEM;
285 }
286 map->handle = dmah->vaddr;
287 map->offset = (unsigned long)dmah->busaddr;
288 kfree(dmah);
289 break;
290 default:
291 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
292 return -EINVAL;
293 }
294
295 list = drm_alloc(sizeof(*list), DRM_MEM_MAPS);
296 if (!list) {
297 if (map->type == _DRM_REGISTERS)
298 iounmap(map->handle);
299 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
300 return -EINVAL;
301 }
302 memset(list, 0, sizeof(*list));
303 list->map = map;
304
305 mutex_lock(&dev->struct_mutex);
306 list_add(&list->head, &dev->maplist);
307
308 /* Assign a 32-bit handle */
309 /* We do it here so that dev->struct_mutex protects the increment */
310 user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
311 map->offset;
312 ret = drm_map_handle(dev, &list->hash, user_token, 0);
313 if (ret) {
314 if (map->type == _DRM_REGISTERS)
315 iounmap(map->handle);
316 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
317 drm_free(list, sizeof(*list), DRM_MEM_MAPS);
318 mutex_unlock(&dev->struct_mutex);
319 return ret;
320 }
321
322 list->user_token = list->hash.key << PAGE_SHIFT;
323 mutex_unlock(&dev->struct_mutex);
324
325 list->master = dev->primary->master;
326 *maplist = list;
327 return 0;
328 }
329
330 int drm_addmap(struct drm_device * dev, unsigned int offset,
331 unsigned int size, enum drm_map_type type,
332 enum drm_map_flags flags, drm_local_map_t ** map_ptr)
333 {
334 struct drm_map_list *list;
335 int rc;
336
337 rc = drm_addmap_core(dev, offset, size, type, flags, &list);
338 if (!rc)
339 *map_ptr = list->map;
340 return rc;
341 }
342
343 EXPORT_SYMBOL(drm_addmap);
344
345 int drm_addmap_ioctl(struct drm_device *dev, void *data,
346 struct drm_file *file_priv)
347 {
348 struct drm_map *map = data;
349 struct drm_map_list *maplist;
350 int err;
351
352 if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM))
353 return -EPERM;
354
355 err = drm_addmap_core(dev, map->offset, map->size, map->type,
356 map->flags, &maplist);
357
358 if (err)
359 return err;
360
361 /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
362 map->handle = (void *)(unsigned long)maplist->user_token;
363 return 0;
364 }
365
366 /**
367 * Remove a map private from list and deallocate resources if the mapping
368 * isn't in use.
369 *
370 * \param inode device inode.
371 * \param file_priv DRM file private.
372 * \param cmd command.
373 * \param arg pointer to a struct drm_map structure.
374 * \return zero on success or a negative value on error.
375 *
376 * Searches the map on drm_device::maplist, removes it from the list, see if
377 * its being used, and free any associate resource (such as MTRR's) if it's not
378 * being on use.
379 *
380 * \sa drm_addmap
381 */
382 int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map)
383 {
384 struct drm_map_list *r_list = NULL, *list_t;
385 drm_dma_handle_t dmah;
386 int found = 0;
387 struct drm_master *master;
388
389 /* Find the list entry for the map and remove it */
390 list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
391 if (r_list->map == map) {
392 master = r_list->master;
393 list_del(&r_list->head);
394 drm_ht_remove_key(&dev->map_hash,
395 r_list->user_token >> PAGE_SHIFT);
396 drm_free(r_list, sizeof(*r_list), DRM_MEM_MAPS);
397 found = 1;
398 break;
399 }
400 }
401
402 if (!found)
403 return -EINVAL;
404
405 switch (map->type) {
406 case _DRM_REGISTERS:
407 iounmap(map->handle);
408 /* FALLTHROUGH */
409 case _DRM_FRAME_BUFFER:
410 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
411 int retcode;
412 retcode = mtrr_del(map->mtrr, map->offset, map->size);
413 DRM_DEBUG("mtrr_del=%d\n", retcode);
414 }
415 break;
416 case _DRM_SHM:
417 vfree(map->handle);
418 if (master) {
419 if (dev->sigdata.lock == master->lock.hw_lock)
420 dev->sigdata.lock = NULL;
421 master->lock.hw_lock = NULL; /* SHM removed */
422 master->lock.file_priv = NULL;
423 wake_up_interruptible(&master->lock.lock_queue);
424 }
425 break;
426 case _DRM_AGP:
427 case _DRM_SCATTER_GATHER:
428 break;
429 case _DRM_CONSISTENT:
430 dmah.vaddr = map->handle;
431 dmah.busaddr = map->offset;
432 dmah.size = map->size;
433 __drm_pci_free(dev, &dmah);
434 break;
435 case _DRM_GEM:
436 DRM_ERROR("tried to rmmap GEM object\n");
437 break;
438 }
439 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
440
441 return 0;
442 }
443
444 int drm_rmmap(struct drm_device *dev, drm_local_map_t *map)
445 {
446 int ret;
447
448 mutex_lock(&dev->struct_mutex);
449 ret = drm_rmmap_locked(dev, map);
450 mutex_unlock(&dev->struct_mutex);
451
452 return ret;
453 }
454 EXPORT_SYMBOL(drm_rmmap);
455
456 /* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
457 * the last close of the device, and this is necessary for cleanup when things
458 * exit uncleanly. Therefore, having userland manually remove mappings seems
459 * like a pointless exercise since they're going away anyway.
460 *
461 * One use case might be after addmap is allowed for normal users for SHM and
462 * gets used by drivers that the server doesn't need to care about. This seems
463 * unlikely.
464 */
465 int drm_rmmap_ioctl(struct drm_device *dev, void *data,
466 struct drm_file *file_priv)
467 {
468 struct drm_map *request = data;
469 drm_local_map_t *map = NULL;
470 struct drm_map_list *r_list;
471 int ret;
472
473 mutex_lock(&dev->struct_mutex);
474 list_for_each_entry(r_list, &dev->maplist, head) {
475 if (r_list->map &&
476 r_list->user_token == (unsigned long)request->handle &&
477 r_list->map->flags & _DRM_REMOVABLE) {
478 map = r_list->map;
479 break;
480 }
481 }
482
483 /* List has wrapped around to the head pointer, or its empty we didn't
484 * find anything.
485 */
486 if (list_empty(&dev->maplist) || !map) {
487 mutex_unlock(&dev->struct_mutex);
488 return -EINVAL;
489 }
490
491 /* Register and framebuffer maps are permanent */
492 if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
493 mutex_unlock(&dev->struct_mutex);
494 return 0;
495 }
496
497 ret = drm_rmmap_locked(dev, map);
498
499 mutex_unlock(&dev->struct_mutex);
500
501 return ret;
502 }
503
504 /**
505 * Cleanup after an error on one of the addbufs() functions.
506 *
507 * \param dev DRM device.
508 * \param entry buffer entry where the error occurred.
509 *
510 * Frees any pages and buffers associated with the given entry.
511 */
512 static void drm_cleanup_buf_error(struct drm_device * dev,
513 struct drm_buf_entry * entry)
514 {
515 int i;
516
517 if (entry->seg_count) {
518 for (i = 0; i < entry->seg_count; i++) {
519 if (entry->seglist[i]) {
520 drm_pci_free(dev, entry->seglist[i]);
521 }
522 }
523 drm_free(entry->seglist,
524 entry->seg_count *
525 sizeof(*entry->seglist), DRM_MEM_SEGS);
526
527 entry->seg_count = 0;
528 }
529
530 if (entry->buf_count) {
531 for (i = 0; i < entry->buf_count; i++) {
532 if (entry->buflist[i].dev_private) {
533 drm_free(entry->buflist[i].dev_private,
534 entry->buflist[i].dev_priv_size,
535 DRM_MEM_BUFS);
536 }
537 }
538 drm_free(entry->buflist,
539 entry->buf_count *
540 sizeof(*entry->buflist), DRM_MEM_BUFS);
541
542 entry->buf_count = 0;
543 }
544 }
545
546 #if __OS_HAS_AGP
547 /**
548 * Add AGP buffers for DMA transfers.
549 *
550 * \param dev struct drm_device to which the buffers are to be added.
551 * \param request pointer to a struct drm_buf_desc describing the request.
552 * \return zero on success or a negative number on failure.
553 *
554 * After some sanity checks creates a drm_buf structure for each buffer and
555 * reallocates the buffer list of the same size order to accommodate the new
556 * buffers.
557 */
558 int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
559 {
560 struct drm_device_dma *dma = dev->dma;
561 struct drm_buf_entry *entry;
562 struct drm_agp_mem *agp_entry;
563 struct drm_buf *buf;
564 unsigned long offset;
565 unsigned long agp_offset;
566 int count;
567 int order;
568 int size;
569 int alignment;
570 int page_order;
571 int total;
572 int byte_count;
573 int i, valid;
574 struct drm_buf **temp_buflist;
575
576 if (!dma)
577 return -EINVAL;
578
579 count = request->count;
580 order = drm_order(request->size);
581 size = 1 << order;
582
583 alignment = (request->flags & _DRM_PAGE_ALIGN)
584 ? PAGE_ALIGN(size) : size;
585 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
586 total = PAGE_SIZE << page_order;
587
588 byte_count = 0;
589 agp_offset = dev->agp->base + request->agp_start;
590
591 DRM_DEBUG("count: %d\n", count);
592 DRM_DEBUG("order: %d\n", order);
593 DRM_DEBUG("size: %d\n", size);
594 DRM_DEBUG("agp_offset: %lx\n", agp_offset);
595 DRM_DEBUG("alignment: %d\n", alignment);
596 DRM_DEBUG("page_order: %d\n", page_order);
597 DRM_DEBUG("total: %d\n", total);
598
599 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
600 return -EINVAL;
601 if (dev->queue_count)
602 return -EBUSY; /* Not while in use */
603
604 /* Make sure buffers are located in AGP memory that we own */
605 valid = 0;
606 list_for_each_entry(agp_entry, &dev->agp->memory, head) {
607 if ((agp_offset >= agp_entry->bound) &&
608 (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
609 valid = 1;
610 break;
611 }
612 }
613 if (!list_empty(&dev->agp->memory) && !valid) {
614 DRM_DEBUG("zone invalid\n");
615 return -EINVAL;
616 }
617 spin_lock(&dev->count_lock);
618 if (dev->buf_use) {
619 spin_unlock(&dev->count_lock);
620 return -EBUSY;
621 }
622 atomic_inc(&dev->buf_alloc);
623 spin_unlock(&dev->count_lock);
624
625 mutex_lock(&dev->struct_mutex);
626 entry = &dma->bufs[order];
627 if (entry->buf_count) {
628 mutex_unlock(&dev->struct_mutex);
629 atomic_dec(&dev->buf_alloc);
630 return -ENOMEM; /* May only call once for each order */
631 }
632
633 if (count < 0 || count > 4096) {
634 mutex_unlock(&dev->struct_mutex);
635 atomic_dec(&dev->buf_alloc);
636 return -EINVAL;
637 }
638
639 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
640 DRM_MEM_BUFS);
641 if (!entry->buflist) {
642 mutex_unlock(&dev->struct_mutex);
643 atomic_dec(&dev->buf_alloc);
644 return -ENOMEM;
645 }
646 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
647
648 entry->buf_size = size;
649 entry->page_order = page_order;
650
651 offset = 0;
652
653 while (entry->buf_count < count) {
654 buf = &entry->buflist[entry->buf_count];
655 buf->idx = dma->buf_count + entry->buf_count;
656 buf->total = alignment;
657 buf->order = order;
658 buf->used = 0;
659
660 buf->offset = (dma->byte_count + offset);
661 buf->bus_address = agp_offset + offset;
662 buf->address = (void *)(agp_offset + offset);
663 buf->next = NULL;
664 buf->waiting = 0;
665 buf->pending = 0;
666 init_waitqueue_head(&buf->dma_wait);
667 buf->file_priv = NULL;
668
669 buf->dev_priv_size = dev->driver->dev_priv_size;
670 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
671 if (!buf->dev_private) {
672 /* Set count correctly so we free the proper amount. */
673 entry->buf_count = count;
674 drm_cleanup_buf_error(dev, entry);
675 mutex_unlock(&dev->struct_mutex);
676 atomic_dec(&dev->buf_alloc);
677 return -ENOMEM;
678 }
679 memset(buf->dev_private, 0, buf->dev_priv_size);
680
681 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
682
683 offset += alignment;
684 entry->buf_count++;
685 byte_count += PAGE_SIZE << page_order;
686 }
687
688 DRM_DEBUG("byte_count: %d\n", byte_count);
689
690 temp_buflist = drm_realloc(dma->buflist,
691 dma->buf_count * sizeof(*dma->buflist),
692 (dma->buf_count + entry->buf_count)
693 * sizeof(*dma->buflist), DRM_MEM_BUFS);
694 if (!temp_buflist) {
695 /* Free the entry because it isn't valid */
696 drm_cleanup_buf_error(dev, entry);
697 mutex_unlock(&dev->struct_mutex);
698 atomic_dec(&dev->buf_alloc);
699 return -ENOMEM;
700 }
701 dma->buflist = temp_buflist;
702
703 for (i = 0; i < entry->buf_count; i++) {
704 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
705 }
706
707 dma->buf_count += entry->buf_count;
708 dma->seg_count += entry->seg_count;
709 dma->page_count += byte_count >> PAGE_SHIFT;
710 dma->byte_count += byte_count;
711
712 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
713 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
714
715 mutex_unlock(&dev->struct_mutex);
716
717 request->count = entry->buf_count;
718 request->size = size;
719
720 dma->flags = _DRM_DMA_USE_AGP;
721
722 atomic_dec(&dev->buf_alloc);
723 return 0;
724 }
725 EXPORT_SYMBOL(drm_addbufs_agp);
726 #endif /* __OS_HAS_AGP */
727
728 int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
729 {
730 struct drm_device_dma *dma = dev->dma;
731 int count;
732 int order;
733 int size;
734 int total;
735 int page_order;
736 struct drm_buf_entry *entry;
737 drm_dma_handle_t *dmah;
738 struct drm_buf *buf;
739 int alignment;
740 unsigned long offset;
741 int i;
742 int byte_count;
743 int page_count;
744 unsigned long *temp_pagelist;
745 struct drm_buf **temp_buflist;
746
747 if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
748 return -EINVAL;
749
750 if (!dma)
751 return -EINVAL;
752
753 if (!capable(CAP_SYS_ADMIN))
754 return -EPERM;
755
756 count = request->count;
757 order = drm_order(request->size);
758 size = 1 << order;
759
760 DRM_DEBUG("count=%d, size=%d (%d), order=%d, queue_count=%d\n",
761 request->count, request->size, size, order, dev->queue_count);
762
763 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
764 return -EINVAL;
765 if (dev->queue_count)
766 return -EBUSY; /* Not while in use */
767
768 alignment = (request->flags & _DRM_PAGE_ALIGN)
769 ? PAGE_ALIGN(size) : size;
770 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
771 total = PAGE_SIZE << page_order;
772
773 spin_lock(&dev->count_lock);
774 if (dev->buf_use) {
775 spin_unlock(&dev->count_lock);
776 return -EBUSY;
777 }
778 atomic_inc(&dev->buf_alloc);
779 spin_unlock(&dev->count_lock);
780
781 mutex_lock(&dev->struct_mutex);
782 entry = &dma->bufs[order];
783 if (entry->buf_count) {
784 mutex_unlock(&dev->struct_mutex);
785 atomic_dec(&dev->buf_alloc);
786 return -ENOMEM; /* May only call once for each order */
787 }
788
789 if (count < 0 || count > 4096) {
790 mutex_unlock(&dev->struct_mutex);
791 atomic_dec(&dev->buf_alloc);
792 return -EINVAL;
793 }
794
795 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
796 DRM_MEM_BUFS);
797 if (!entry->buflist) {
798 mutex_unlock(&dev->struct_mutex);
799 atomic_dec(&dev->buf_alloc);
800 return -ENOMEM;
801 }
802 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
803
804 entry->seglist = drm_alloc(count * sizeof(*entry->seglist),
805 DRM_MEM_SEGS);
806 if (!entry->seglist) {
807 drm_free(entry->buflist,
808 count * sizeof(*entry->buflist), DRM_MEM_BUFS);
809 mutex_unlock(&dev->struct_mutex);
810 atomic_dec(&dev->buf_alloc);
811 return -ENOMEM;
812 }
813 memset(entry->seglist, 0, count * sizeof(*entry->seglist));
814
815 /* Keep the original pagelist until we know all the allocations
816 * have succeeded
817 */
818 temp_pagelist = drm_alloc((dma->page_count + (count << page_order))
819 * sizeof(*dma->pagelist), DRM_MEM_PAGES);
820 if (!temp_pagelist) {
821 drm_free(entry->buflist,
822 count * sizeof(*entry->buflist), DRM_MEM_BUFS);
823 drm_free(entry->seglist,
824 count * sizeof(*entry->seglist), DRM_MEM_SEGS);
825 mutex_unlock(&dev->struct_mutex);
826 atomic_dec(&dev->buf_alloc);
827 return -ENOMEM;
828 }
829 memcpy(temp_pagelist,
830 dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
831 DRM_DEBUG("pagelist: %d entries\n",
832 dma->page_count + (count << page_order));
833
834 entry->buf_size = size;
835 entry->page_order = page_order;
836 byte_count = 0;
837 page_count = 0;
838
839 while (entry->buf_count < count) {
840
841 dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000, 0xfffffffful);
842
843 if (!dmah) {
844 /* Set count correctly so we free the proper amount. */
845 entry->buf_count = count;
846 entry->seg_count = count;
847 drm_cleanup_buf_error(dev, entry);
848 drm_free(temp_pagelist,
849 (dma->page_count + (count << page_order))
850 * sizeof(*dma->pagelist), DRM_MEM_PAGES);
851 mutex_unlock(&dev->struct_mutex);
852 atomic_dec(&dev->buf_alloc);
853 return -ENOMEM;
854 }
855 entry->seglist[entry->seg_count++] = dmah;
856 for (i = 0; i < (1 << page_order); i++) {
857 DRM_DEBUG("page %d @ 0x%08lx\n",
858 dma->page_count + page_count,
859 (unsigned long)dmah->vaddr + PAGE_SIZE * i);
860 temp_pagelist[dma->page_count + page_count++]
861 = (unsigned long)dmah->vaddr + PAGE_SIZE * i;
862 }
863 for (offset = 0;
864 offset + size <= total && entry->buf_count < count;
865 offset += alignment, ++entry->buf_count) {
866 buf = &entry->buflist[entry->buf_count];
867 buf->idx = dma->buf_count + entry->buf_count;
868 buf->total = alignment;
869 buf->order = order;
870 buf->used = 0;
871 buf->offset = (dma->byte_count + byte_count + offset);
872 buf->address = (void *)(dmah->vaddr + offset);
873 buf->bus_address = dmah->busaddr + offset;
874 buf->next = NULL;
875 buf->waiting = 0;
876 buf->pending = 0;
877 init_waitqueue_head(&buf->dma_wait);
878 buf->file_priv = NULL;
879
880 buf->dev_priv_size = dev->driver->dev_priv_size;
881 buf->dev_private = drm_alloc(buf->dev_priv_size,
882 DRM_MEM_BUFS);
883 if (!buf->dev_private) {
884 /* Set count correctly so we free the proper amount. */
885 entry->buf_count = count;
886 entry->seg_count = count;
887 drm_cleanup_buf_error(dev, entry);
888 drm_free(temp_pagelist,
889 (dma->page_count +
890 (count << page_order))
891 * sizeof(*dma->pagelist),
892 DRM_MEM_PAGES);
893 mutex_unlock(&dev->struct_mutex);
894 atomic_dec(&dev->buf_alloc);
895 return -ENOMEM;
896 }
897 memset(buf->dev_private, 0, buf->dev_priv_size);
898
899 DRM_DEBUG("buffer %d @ %p\n",
900 entry->buf_count, buf->address);
901 }
902 byte_count += PAGE_SIZE << page_order;
903 }
904
905 temp_buflist = drm_realloc(dma->buflist,
906 dma->buf_count * sizeof(*dma->buflist),
907 (dma->buf_count + entry->buf_count)
908 * sizeof(*dma->buflist), DRM_MEM_BUFS);
909 if (!temp_buflist) {
910 /* Free the entry because it isn't valid */
911 drm_cleanup_buf_error(dev, entry);
912 drm_free(temp_pagelist,
913 (dma->page_count + (count << page_order))
914 * sizeof(*dma->pagelist), DRM_MEM_PAGES);
915 mutex_unlock(&dev->struct_mutex);
916 atomic_dec(&dev->buf_alloc);
917 return -ENOMEM;
918 }
919 dma->buflist = temp_buflist;
920
921 for (i = 0; i < entry->buf_count; i++) {
922 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
923 }
924
925 /* No allocations failed, so now we can replace the orginal pagelist
926 * with the new one.
927 */
928 if (dma->page_count) {
929 drm_free(dma->pagelist,
930 dma->page_count * sizeof(*dma->pagelist),
931 DRM_MEM_PAGES);
932 }
933 dma->pagelist = temp_pagelist;
934
935 dma->buf_count += entry->buf_count;
936 dma->seg_count += entry->seg_count;
937 dma->page_count += entry->seg_count << page_order;
938 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
939
940 mutex_unlock(&dev->struct_mutex);
941
942 request->count = entry->buf_count;
943 request->size = size;
944
945 if (request->flags & _DRM_PCI_BUFFER_RO)
946 dma->flags = _DRM_DMA_USE_PCI_RO;
947
948 atomic_dec(&dev->buf_alloc);
949 return 0;
950
951 }
952 EXPORT_SYMBOL(drm_addbufs_pci);
953
954 static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request)
955 {
956 struct drm_device_dma *dma = dev->dma;
957 struct drm_buf_entry *entry;
958 struct drm_buf *buf;
959 unsigned long offset;
960 unsigned long agp_offset;
961 int count;
962 int order;
963 int size;
964 int alignment;
965 int page_order;
966 int total;
967 int byte_count;
968 int i;
969 struct drm_buf **temp_buflist;
970
971 if (!drm_core_check_feature(dev, DRIVER_SG))
972 return -EINVAL;
973
974 if (!dma)
975 return -EINVAL;
976
977 if (!capable(CAP_SYS_ADMIN))
978 return -EPERM;
979
980 count = request->count;
981 order = drm_order(request->size);
982 size = 1 << order;
983
984 alignment = (request->flags & _DRM_PAGE_ALIGN)
985 ? PAGE_ALIGN(size) : size;
986 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
987 total = PAGE_SIZE << page_order;
988
989 byte_count = 0;
990 agp_offset = request->agp_start;
991
992 DRM_DEBUG("count: %d\n", count);
993 DRM_DEBUG("order: %d\n", order);
994 DRM_DEBUG("size: %d\n", size);
995 DRM_DEBUG("agp_offset: %lu\n", agp_offset);
996 DRM_DEBUG("alignment: %d\n", alignment);
997 DRM_DEBUG("page_order: %d\n", page_order);
998 DRM_DEBUG("total: %d\n", total);
999
1000 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1001 return -EINVAL;
1002 if (dev->queue_count)
1003 return -EBUSY; /* Not while in use */
1004
1005 spin_lock(&dev->count_lock);
1006 if (dev->buf_use) {
1007 spin_unlock(&dev->count_lock);
1008 return -EBUSY;
1009 }
1010 atomic_inc(&dev->buf_alloc);
1011 spin_unlock(&dev->count_lock);
1012
1013 mutex_lock(&dev->struct_mutex);
1014 entry = &dma->bufs[order];
1015 if (entry->buf_count) {
1016 mutex_unlock(&dev->struct_mutex);
1017 atomic_dec(&dev->buf_alloc);
1018 return -ENOMEM; /* May only call once for each order */
1019 }
1020
1021 if (count < 0 || count > 4096) {
1022 mutex_unlock(&dev->struct_mutex);
1023 atomic_dec(&dev->buf_alloc);
1024 return -EINVAL;
1025 }
1026
1027 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
1028 DRM_MEM_BUFS);
1029 if (!entry->buflist) {
1030 mutex_unlock(&dev->struct_mutex);
1031 atomic_dec(&dev->buf_alloc);
1032 return -ENOMEM;
1033 }
1034 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
1035
1036 entry->buf_size = size;
1037 entry->page_order = page_order;
1038
1039 offset = 0;
1040
1041 while (entry->buf_count < count) {
1042 buf = &entry->buflist[entry->buf_count];
1043 buf->idx = dma->buf_count + entry->buf_count;
1044 buf->total = alignment;
1045 buf->order = order;
1046 buf->used = 0;
1047
1048 buf->offset = (dma->byte_count + offset);
1049 buf->bus_address = agp_offset + offset;
1050 buf->address = (void *)(agp_offset + offset
1051 + (unsigned long)dev->sg->virtual);
1052 buf->next = NULL;
1053 buf->waiting = 0;
1054 buf->pending = 0;
1055 init_waitqueue_head(&buf->dma_wait);
1056 buf->file_priv = NULL;
1057
1058 buf->dev_priv_size = dev->driver->dev_priv_size;
1059 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
1060 if (!buf->dev_private) {
1061 /* Set count correctly so we free the proper amount. */
1062 entry->buf_count = count;
1063 drm_cleanup_buf_error(dev, entry);
1064 mutex_unlock(&dev->struct_mutex);
1065 atomic_dec(&dev->buf_alloc);
1066 return -ENOMEM;
1067 }
1068
1069 memset(buf->dev_private, 0, buf->dev_priv_size);
1070
1071 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1072
1073 offset += alignment;
1074 entry->buf_count++;
1075 byte_count += PAGE_SIZE << page_order;
1076 }
1077
1078 DRM_DEBUG("byte_count: %d\n", byte_count);
1079
1080 temp_buflist = drm_realloc(dma->buflist,
1081 dma->buf_count * sizeof(*dma->buflist),
1082 (dma->buf_count + entry->buf_count)
1083 * sizeof(*dma->buflist), DRM_MEM_BUFS);
1084 if (!temp_buflist) {
1085 /* Free the entry because it isn't valid */
1086 drm_cleanup_buf_error(dev, entry);
1087 mutex_unlock(&dev->struct_mutex);
1088 atomic_dec(&dev->buf_alloc);
1089 return -ENOMEM;
1090 }
1091 dma->buflist = temp_buflist;
1092
1093 for (i = 0; i < entry->buf_count; i++) {
1094 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1095 }
1096
1097 dma->buf_count += entry->buf_count;
1098 dma->seg_count += entry->seg_count;
1099 dma->page_count += byte_count >> PAGE_SHIFT;
1100 dma->byte_count += byte_count;
1101
1102 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1103 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1104
1105 mutex_unlock(&dev->struct_mutex);
1106
1107 request->count = entry->buf_count;
1108 request->size = size;
1109
1110 dma->flags = _DRM_DMA_USE_SG;
1111
1112 atomic_dec(&dev->buf_alloc);
1113 return 0;
1114 }
1115
1116 static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request)
1117 {
1118 struct drm_device_dma *dma = dev->dma;
1119 struct drm_buf_entry *entry;
1120 struct drm_buf *buf;
1121 unsigned long offset;
1122 unsigned long agp_offset;
1123 int count;
1124 int order;
1125 int size;
1126 int alignment;
1127 int page_order;
1128 int total;
1129 int byte_count;
1130 int i;
1131 struct drm_buf **temp_buflist;
1132
1133 if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
1134 return -EINVAL;
1135
1136 if (!dma)
1137 return -EINVAL;
1138
1139 if (!capable(CAP_SYS_ADMIN))
1140 return -EPERM;
1141
1142 count = request->count;
1143 order = drm_order(request->size);
1144 size = 1 << order;
1145
1146 alignment = (request->flags & _DRM_PAGE_ALIGN)
1147 ? PAGE_ALIGN(size) : size;
1148 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1149 total = PAGE_SIZE << page_order;
1150
1151 byte_count = 0;
1152 agp_offset = request->agp_start;
1153
1154 DRM_DEBUG("count: %d\n", count);
1155 DRM_DEBUG("order: %d\n", order);
1156 DRM_DEBUG("size: %d\n", size);
1157 DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1158 DRM_DEBUG("alignment: %d\n", alignment);
1159 DRM_DEBUG("page_order: %d\n", page_order);
1160 DRM_DEBUG("total: %d\n", total);
1161
1162 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1163 return -EINVAL;
1164 if (dev->queue_count)
1165 return -EBUSY; /* Not while in use */
1166
1167 spin_lock(&dev->count_lock);
1168 if (dev->buf_use) {
1169 spin_unlock(&dev->count_lock);
1170 return -EBUSY;
1171 }
1172 atomic_inc(&dev->buf_alloc);
1173 spin_unlock(&dev->count_lock);
1174
1175 mutex_lock(&dev->struct_mutex);
1176 entry = &dma->bufs[order];
1177 if (entry->buf_count) {
1178 mutex_unlock(&dev->struct_mutex);
1179 atomic_dec(&dev->buf_alloc);
1180 return -ENOMEM; /* May only call once for each order */
1181 }
1182
1183 if (count < 0 || count > 4096) {
1184 mutex_unlock(&dev->struct_mutex);
1185 atomic_dec(&dev->buf_alloc);
1186 return -EINVAL;
1187 }
1188
1189 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
1190 DRM_MEM_BUFS);
1191 if (!entry->buflist) {
1192 mutex_unlock(&dev->struct_mutex);
1193 atomic_dec(&dev->buf_alloc);
1194 return -ENOMEM;
1195 }
1196 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
1197
1198 entry->buf_size = size;
1199 entry->page_order = page_order;
1200
1201 offset = 0;
1202
1203 while (entry->buf_count < count) {
1204 buf = &entry->buflist[entry->buf_count];
1205 buf->idx = dma->buf_count + entry->buf_count;
1206 buf->total = alignment;
1207 buf->order = order;
1208 buf->used = 0;
1209
1210 buf->offset = (dma->byte_count + offset);
1211 buf->bus_address = agp_offset + offset;
1212 buf->address = (void *)(agp_offset + offset);
1213 buf->next = NULL;
1214 buf->waiting = 0;
1215 buf->pending = 0;
1216 init_waitqueue_head(&buf->dma_wait);
1217 buf->file_priv = NULL;
1218
1219 buf->dev_priv_size = dev->driver->dev_priv_size;
1220 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
1221 if (!buf->dev_private) {
1222 /* Set count correctly so we free the proper amount. */
1223 entry->buf_count = count;
1224 drm_cleanup_buf_error(dev, entry);
1225 mutex_unlock(&dev->struct_mutex);
1226 atomic_dec(&dev->buf_alloc);
1227 return -ENOMEM;
1228 }
1229 memset(buf->dev_private, 0, buf->dev_priv_size);
1230
1231 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1232
1233 offset += alignment;
1234 entry->buf_count++;
1235 byte_count += PAGE_SIZE << page_order;
1236 }
1237
1238 DRM_DEBUG("byte_count: %d\n", byte_count);
1239
1240 temp_buflist = drm_realloc(dma->buflist,
1241 dma->buf_count * sizeof(*dma->buflist),
1242 (dma->buf_count + entry->buf_count)
1243 * sizeof(*dma->buflist), DRM_MEM_BUFS);
1244 if (!temp_buflist) {
1245 /* Free the entry because it isn't valid */
1246 drm_cleanup_buf_error(dev, entry);
1247 mutex_unlock(&dev->struct_mutex);
1248 atomic_dec(&dev->buf_alloc);
1249 return -ENOMEM;
1250 }
1251 dma->buflist = temp_buflist;
1252
1253 for (i = 0; i < entry->buf_count; i++) {
1254 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1255 }
1256
1257 dma->buf_count += entry->buf_count;
1258 dma->seg_count += entry->seg_count;
1259 dma->page_count += byte_count >> PAGE_SHIFT;
1260 dma->byte_count += byte_count;
1261
1262 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1263 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1264
1265 mutex_unlock(&dev->struct_mutex);
1266
1267 request->count = entry->buf_count;
1268 request->size = size;
1269
1270 dma->flags = _DRM_DMA_USE_FB;
1271
1272 atomic_dec(&dev->buf_alloc);
1273 return 0;
1274 }
1275
1276
1277 /**
1278 * Add buffers for DMA transfers (ioctl).
1279 *
1280 * \param inode device inode.
1281 * \param file_priv DRM file private.
1282 * \param cmd command.
1283 * \param arg pointer to a struct drm_buf_desc request.
1284 * \return zero on success or a negative number on failure.
1285 *
1286 * According with the memory type specified in drm_buf_desc::flags and the
1287 * build options, it dispatches the call either to addbufs_agp(),
1288 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1289 * PCI memory respectively.
1290 */
1291 int drm_addbufs(struct drm_device *dev, void *data,
1292 struct drm_file *file_priv)
1293 {
1294 struct drm_buf_desc *request = data;
1295 int ret;
1296
1297 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1298 return -EINVAL;
1299
1300 #if __OS_HAS_AGP
1301 if (request->flags & _DRM_AGP_BUFFER)
1302 ret = drm_addbufs_agp(dev, request);
1303 else
1304 #endif
1305 if (request->flags & _DRM_SG_BUFFER)
1306 ret = drm_addbufs_sg(dev, request);
1307 else if (request->flags & _DRM_FB_BUFFER)
1308 ret = drm_addbufs_fb(dev, request);
1309 else
1310 ret = drm_addbufs_pci(dev, request);
1311
1312 return ret;
1313 }
1314
1315 /**
1316 * Get information about the buffer mappings.
1317 *
1318 * This was originally mean for debugging purposes, or by a sophisticated
1319 * client library to determine how best to use the available buffers (e.g.,
1320 * large buffers can be used for image transfer).
1321 *
1322 * \param inode device inode.
1323 * \param file_priv DRM file private.
1324 * \param cmd command.
1325 * \param arg pointer to a drm_buf_info structure.
1326 * \return zero on success or a negative number on failure.
1327 *
1328 * Increments drm_device::buf_use while holding the drm_device::count_lock
1329 * lock, preventing of allocating more buffers after this call. Information
1330 * about each requested buffer is then copied into user space.
1331 */
1332 int drm_infobufs(struct drm_device *dev, void *data,
1333 struct drm_file *file_priv)
1334 {
1335 struct drm_device_dma *dma = dev->dma;
1336 struct drm_buf_info *request = data;
1337 int i;
1338 int count;
1339
1340 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1341 return -EINVAL;
1342
1343 if (!dma)
1344 return -EINVAL;
1345
1346 spin_lock(&dev->count_lock);
1347 if (atomic_read(&dev->buf_alloc)) {
1348 spin_unlock(&dev->count_lock);
1349 return -EBUSY;
1350 }
1351 ++dev->buf_use; /* Can't allocate more after this call */
1352 spin_unlock(&dev->count_lock);
1353
1354 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1355 if (dma->bufs[i].buf_count)
1356 ++count;
1357 }
1358
1359 DRM_DEBUG("count = %d\n", count);
1360
1361 if (request->count >= count) {
1362 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1363 if (dma->bufs[i].buf_count) {
1364 struct drm_buf_desc __user *to =
1365 &request->list[count];
1366 struct drm_buf_entry *from = &dma->bufs[i];
1367 struct drm_freelist *list = &dma->bufs[i].freelist;
1368 if (copy_to_user(&to->count,
1369 &from->buf_count,
1370 sizeof(from->buf_count)) ||
1371 copy_to_user(&to->size,
1372 &from->buf_size,
1373 sizeof(from->buf_size)) ||
1374 copy_to_user(&to->low_mark,
1375 &list->low_mark,
1376 sizeof(list->low_mark)) ||
1377 copy_to_user(&to->high_mark,
1378 &list->high_mark,
1379 sizeof(list->high_mark)))
1380 return -EFAULT;
1381
1382 DRM_DEBUG("%d %d %d %d %d\n",
1383 i,
1384 dma->bufs[i].buf_count,
1385 dma->bufs[i].buf_size,
1386 dma->bufs[i].freelist.low_mark,
1387 dma->bufs[i].freelist.high_mark);
1388 ++count;
1389 }
1390 }
1391 }
1392 request->count = count;
1393
1394 return 0;
1395 }
1396
1397 /**
1398 * Specifies a low and high water mark for buffer allocation
1399 *
1400 * \param inode device inode.
1401 * \param file_priv DRM file private.
1402 * \param cmd command.
1403 * \param arg a pointer to a drm_buf_desc structure.
1404 * \return zero on success or a negative number on failure.
1405 *
1406 * Verifies that the size order is bounded between the admissible orders and
1407 * updates the respective drm_device_dma::bufs entry low and high water mark.
1408 *
1409 * \note This ioctl is deprecated and mostly never used.
1410 */
1411 int drm_markbufs(struct drm_device *dev, void *data,
1412 struct drm_file *file_priv)
1413 {
1414 struct drm_device_dma *dma = dev->dma;
1415 struct drm_buf_desc *request = data;
1416 int order;
1417 struct drm_buf_entry *entry;
1418
1419 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1420 return -EINVAL;
1421
1422 if (!dma)
1423 return -EINVAL;
1424
1425 DRM_DEBUG("%d, %d, %d\n",
1426 request->size, request->low_mark, request->high_mark);
1427 order = drm_order(request->size);
1428 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1429 return -EINVAL;
1430 entry = &dma->bufs[order];
1431
1432 if (request->low_mark < 0 || request->low_mark > entry->buf_count)
1433 return -EINVAL;
1434 if (request->high_mark < 0 || request->high_mark > entry->buf_count)
1435 return -EINVAL;
1436
1437 entry->freelist.low_mark = request->low_mark;
1438 entry->freelist.high_mark = request->high_mark;
1439
1440 return 0;
1441 }
1442
1443 /**
1444 * Unreserve the buffers in list, previously reserved using drmDMA.
1445 *
1446 * \param inode device inode.
1447 * \param file_priv DRM file private.
1448 * \param cmd command.
1449 * \param arg pointer to a drm_buf_free structure.
1450 * \return zero on success or a negative number on failure.
1451 *
1452 * Calls free_buffer() for each used buffer.
1453 * This function is primarily used for debugging.
1454 */
1455 int drm_freebufs(struct drm_device *dev, void *data,
1456 struct drm_file *file_priv)
1457 {
1458 struct drm_device_dma *dma = dev->dma;
1459 struct drm_buf_free *request = data;
1460 int i;
1461 int idx;
1462 struct drm_buf *buf;
1463
1464 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1465 return -EINVAL;
1466
1467 if (!dma)
1468 return -EINVAL;
1469
1470 DRM_DEBUG("%d\n", request->count);
1471 for (i = 0; i < request->count; i++) {
1472 if (copy_from_user(&idx, &request->list[i], sizeof(idx)))
1473 return -EFAULT;
1474 if (idx < 0 || idx >= dma->buf_count) {
1475 DRM_ERROR("Index %d (of %d max)\n",
1476 idx, dma->buf_count - 1);
1477 return -EINVAL;
1478 }
1479 buf = dma->buflist[idx];
1480 if (buf->file_priv != file_priv) {
1481 DRM_ERROR("Process %d freeing buffer not owned\n",
1482 task_pid_nr(current));
1483 return -EINVAL;
1484 }
1485 drm_free_buffer(dev, buf);
1486 }
1487
1488 return 0;
1489 }
1490
1491 /**
1492 * Maps all of the DMA buffers into client-virtual space (ioctl).
1493 *
1494 * \param inode device inode.
1495 * \param file_priv DRM file private.
1496 * \param cmd command.
1497 * \param arg pointer to a drm_buf_map structure.
1498 * \return zero on success or a negative number on failure.
1499 *
1500 * Maps the AGP, SG or PCI buffer region with do_mmap(), and copies information
1501 * about each buffer into user space. For PCI buffers, it calls do_mmap() with
1502 * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1503 * drm_mmap_dma().
1504 */
1505 int drm_mapbufs(struct drm_device *dev, void *data,
1506 struct drm_file *file_priv)
1507 {
1508 struct drm_device_dma *dma = dev->dma;
1509 int retcode = 0;
1510 const int zero = 0;
1511 unsigned long virtual;
1512 unsigned long address;
1513 struct drm_buf_map *request = data;
1514 int i;
1515
1516 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1517 return -EINVAL;
1518
1519 if (!dma)
1520 return -EINVAL;
1521
1522 spin_lock(&dev->count_lock);
1523 if (atomic_read(&dev->buf_alloc)) {
1524 spin_unlock(&dev->count_lock);
1525 return -EBUSY;
1526 }
1527 dev->buf_use++; /* Can't allocate more after this call */
1528 spin_unlock(&dev->count_lock);
1529
1530 if (request->count >= dma->buf_count) {
1531 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
1532 || (drm_core_check_feature(dev, DRIVER_SG)
1533 && (dma->flags & _DRM_DMA_USE_SG))
1534 || (drm_core_check_feature(dev, DRIVER_FB_DMA)
1535 && (dma->flags & _DRM_DMA_USE_FB))) {
1536 struct drm_map *map = dev->agp_buffer_map;
1537 unsigned long token = dev->agp_buffer_token;
1538
1539 if (!map) {
1540 retcode = -EINVAL;
1541 goto done;
1542 }
1543 down_write(&current->mm->mmap_sem);
1544 virtual = do_mmap(file_priv->filp, 0, map->size,
1545 PROT_READ | PROT_WRITE,
1546 MAP_SHARED,
1547 token);
1548 up_write(&current->mm->mmap_sem);
1549 } else {
1550 down_write(&current->mm->mmap_sem);
1551 virtual = do_mmap(file_priv->filp, 0, dma->byte_count,
1552 PROT_READ | PROT_WRITE,
1553 MAP_SHARED, 0);
1554 up_write(&current->mm->mmap_sem);
1555 }
1556 if (virtual > -1024UL) {
1557 /* Real error */
1558 retcode = (signed long)virtual;
1559 goto done;
1560 }
1561 request->virtual = (void __user *)virtual;
1562
1563 for (i = 0; i < dma->buf_count; i++) {
1564 if (copy_to_user(&request->list[i].idx,
1565 &dma->buflist[i]->idx,
1566 sizeof(request->list[0].idx))) {
1567 retcode = -EFAULT;
1568 goto done;
1569 }
1570 if (copy_to_user(&request->list[i].total,
1571 &dma->buflist[i]->total,
1572 sizeof(request->list[0].total))) {
1573 retcode = -EFAULT;
1574 goto done;
1575 }
1576 if (copy_to_user(&request->list[i].used,
1577 &zero, sizeof(zero))) {
1578 retcode = -EFAULT;
1579 goto done;
1580 }
1581 address = virtual + dma->buflist[i]->offset; /* *** */
1582 if (copy_to_user(&request->list[i].address,
1583 &address, sizeof(address))) {
1584 retcode = -EFAULT;
1585 goto done;
1586 }
1587 }
1588 }
1589 done:
1590 request->count = dma->buf_count;
1591 DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
1592
1593 return retcode;
1594 }
1595
1596 /**
1597 * Compute size order. Returns the exponent of the smaller power of two which
1598 * is greater or equal to given number.
1599 *
1600 * \param size size.
1601 * \return order.
1602 *
1603 * \todo Can be made faster.
1604 */
1605 int drm_order(unsigned long size)
1606 {
1607 int order;
1608 unsigned long tmp;
1609
1610 for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
1611
1612 if (size & (size - 1))
1613 ++order;
1614
1615 return order;
1616 }
1617 EXPORT_SYMBOL(drm_order);
This page took 0.08402 seconds and 5 git commands to generate.