Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /** |
2 | * \file drm_bufs.h | |
3 | * Generic buffer template | |
4 | * | |
5 | * \author Rickard E. (Rik) Faith <faith@valinux.com> | |
6 | * \author Gareth Hughes <gareth@valinux.com> | |
7 | */ | |
8 | ||
9 | /* | |
10 | * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com | |
11 | * | |
12 | * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. | |
13 | * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. | |
14 | * All Rights Reserved. | |
15 | * | |
16 | * Permission is hereby granted, free of charge, to any person obtaining a | |
17 | * copy of this software and associated documentation files (the "Software"), | |
18 | * to deal in the Software without restriction, including without limitation | |
19 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
20 | * and/or sell copies of the Software, and to permit persons to whom the | |
21 | * Software is furnished to do so, subject to the following conditions: | |
22 | * | |
23 | * The above copyright notice and this permission notice (including the next | |
24 | * paragraph) shall be included in all copies or substantial portions of the | |
25 | * Software. | |
26 | * | |
27 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
28 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
29 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
30 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
31 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
32 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
33 | * OTHER DEALINGS IN THE SOFTWARE. | |
34 | */ | |
35 | ||
36 | #include <linux/vmalloc.h> | |
37 | #include "drmP.h" | |
38 | ||
39 | /** | |
40 | * Compute size order. Returns the exponent of the smaller power of two which | |
41 | * is greater or equal to given number. | |
42 | * | |
43 | * \param size size. | |
44 | * \return order. | |
45 | * | |
46 | * \todo Can be made faster. | |
47 | */ | |
48 | int drm_order( unsigned long size ) | |
49 | { | |
50 | int order; | |
51 | unsigned long tmp; | |
52 | ||
53 | for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) | |
54 | ; | |
55 | ||
56 | if (size & (size - 1)) | |
57 | ++order; | |
58 | ||
59 | return order; | |
60 | } | |
61 | EXPORT_SYMBOL(drm_order); | |
62 | ||
9a186645 DA |
63 | #ifdef CONFIG_COMPAT |
64 | /* | |
65 | * Used to allocate 32-bit handles for _DRM_SHM regions | |
66 | * The 0x10000000 value is chosen to be out of the way of | |
67 | * FB/register and GART physical addresses. | |
68 | */ | |
69 | static unsigned int map32_handle = 0x10000000; | |
70 | #endif | |
71 | ||
1da177e4 LT |
72 | /** |
73 | * Ioctl to specify a range of memory that is available for mapping by a non-root process. | |
74 | * | |
75 | * \param inode device inode. | |
76 | * \param filp file pointer. | |
77 | * \param cmd command. | |
78 | * \param arg pointer to a drm_map structure. | |
79 | * \return zero on success or a negative value on error. | |
80 | * | |
81 | * Adjusts the memory offset to its absolute value according to the mapping | |
82 | * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where | |
83 | * applicable and if supported by the kernel. | |
84 | */ | |
85 | int drm_addmap( struct inode *inode, struct file *filp, | |
86 | unsigned int cmd, unsigned long arg ) | |
87 | { | |
88 | drm_file_t *priv = filp->private_data; | |
89 | drm_device_t *dev = priv->head->dev; | |
90 | drm_map_t *map; | |
91 | drm_map_t __user *argp = (void __user *)arg; | |
92 | drm_map_list_t *list; | |
93 | ||
94 | if ( !(filp->f_mode & 3) ) return -EACCES; /* Require read/write */ | |
95 | ||
96 | map = drm_alloc( sizeof(*map), DRM_MEM_MAPS ); | |
97 | if ( !map ) | |
98 | return -ENOMEM; | |
99 | ||
100 | if ( copy_from_user( map, argp, sizeof(*map) ) ) { | |
101 | drm_free( map, sizeof(*map), DRM_MEM_MAPS ); | |
102 | return -EFAULT; | |
103 | } | |
104 | ||
105 | /* Only allow shared memory to be removable since we only keep enough | |
106 | * book keeping information about shared memory to allow for removal | |
107 | * when processes fork. | |
108 | */ | |
109 | if ( (map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM ) { | |
110 | drm_free( map, sizeof(*map), DRM_MEM_MAPS ); | |
111 | return -EINVAL; | |
112 | } | |
113 | DRM_DEBUG( "offset = 0x%08lx, size = 0x%08lx, type = %d\n", | |
114 | map->offset, map->size, map->type ); | |
115 | if ( (map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK)) ) { | |
116 | drm_free( map, sizeof(*map), DRM_MEM_MAPS ); | |
117 | return -EINVAL; | |
118 | } | |
119 | map->mtrr = -1; | |
120 | map->handle = NULL; | |
121 | ||
122 | switch ( map->type ) { | |
123 | case _DRM_REGISTERS: | |
124 | case _DRM_FRAME_BUFFER: | |
125 | #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) | |
126 | if ( map->offset + map->size < map->offset || | |
127 | map->offset < virt_to_phys(high_memory) ) { | |
128 | drm_free( map, sizeof(*map), DRM_MEM_MAPS ); | |
129 | return -EINVAL; | |
130 | } | |
131 | #endif | |
132 | #ifdef __alpha__ | |
133 | map->offset += dev->hose->mem_space->start; | |
134 | #endif | |
135 | if (drm_core_has_MTRR(dev)) { | |
136 | if ( map->type == _DRM_FRAME_BUFFER || | |
137 | (map->flags & _DRM_WRITE_COMBINING) ) { | |
138 | map->mtrr = mtrr_add( map->offset, map->size, | |
139 | MTRR_TYPE_WRCOMB, 1 ); | |
140 | } | |
141 | } | |
142 | if (map->type == _DRM_REGISTERS) | |
143 | map->handle = drm_ioremap( map->offset, map->size, | |
144 | dev ); | |
145 | break; | |
146 | ||
147 | case _DRM_SHM: | |
148 | map->handle = vmalloc_32(map->size); | |
149 | DRM_DEBUG( "%lu %d %p\n", | |
150 | map->size, drm_order( map->size ), map->handle ); | |
151 | if ( !map->handle ) { | |
152 | drm_free( map, sizeof(*map), DRM_MEM_MAPS ); | |
153 | return -ENOMEM; | |
154 | } | |
155 | map->offset = (unsigned long)map->handle; | |
156 | if ( map->flags & _DRM_CONTAINS_LOCK ) { | |
157 | /* Prevent a 2nd X Server from creating a 2nd lock */ | |
158 | if (dev->lock.hw_lock != NULL) { | |
159 | vfree( map->handle ); | |
160 | drm_free( map, sizeof(*map), DRM_MEM_MAPS ); | |
161 | return -EBUSY; | |
162 | } | |
163 | dev->sigdata.lock = | |
164 | dev->lock.hw_lock = map->handle; /* Pointer to lock */ | |
165 | } | |
166 | break; | |
167 | case _DRM_AGP: | |
168 | if (drm_core_has_AGP(dev)) { | |
169 | #ifdef __alpha__ | |
170 | map->offset += dev->hose->mem_space->start; | |
171 | #endif | |
172 | map->offset += dev->agp->base; | |
173 | map->mtrr = dev->agp->agp_mtrr; /* for getmap */ | |
174 | } | |
175 | break; | |
176 | case _DRM_SCATTER_GATHER: | |
177 | if (!dev->sg) { | |
178 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); | |
179 | return -EINVAL; | |
180 | } | |
181 | map->offset += dev->sg->handle; | |
182 | break; | |
2d0f9eaf DA |
183 | case _DRM_CONSISTENT: |
184 | { | |
185 | /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G, | |
186 | * As we're limit the address to 2^32-1 (or lses), | |
187 | * casting it down to 32 bits is no problem, but we | |
188 | * need to point to a 64bit variable first. */ | |
189 | dma_addr_t bus_addr; | |
190 | map->handle = drm_pci_alloc(dev, map->size, map->size, | |
191 | 0xffffffffUL, &bus_addr); | |
192 | map->offset = (unsigned long)bus_addr; | |
193 | if (!map->handle) { | |
194 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); | |
195 | return -ENOMEM; | |
196 | } | |
197 | break; | |
198 | } | |
1da177e4 LT |
199 | default: |
200 | drm_free( map, sizeof(*map), DRM_MEM_MAPS ); | |
201 | return -EINVAL; | |
202 | } | |
203 | ||
204 | list = drm_alloc(sizeof(*list), DRM_MEM_MAPS); | |
205 | if(!list) { | |
206 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); | |
207 | return -EINVAL; | |
208 | } | |
209 | memset(list, 0, sizeof(*list)); | |
210 | list->map = map; | |
211 | ||
212 | down(&dev->struct_sem); | |
213 | list_add(&list->head, &dev->maplist->head); | |
9a186645 DA |
214 | #ifdef CONFIG_COMPAT |
215 | /* Assign a 32-bit handle for _DRM_SHM mappings */ | |
216 | /* We do it here so that dev->struct_sem protects the increment */ | |
217 | if (map->type == _DRM_SHM) | |
218 | map->offset = map32_handle += PAGE_SIZE; | |
219 | #endif | |
1da177e4 LT |
220 | up(&dev->struct_sem); |
221 | ||
222 | if ( copy_to_user( argp, map, sizeof(*map) ) ) | |
223 | return -EFAULT; | |
9a186645 DA |
224 | if (copy_to_user(&argp->handle, &map->offset, sizeof(map->offset))) |
225 | return -EFAULT; | |
1da177e4 LT |
226 | return 0; |
227 | } | |
228 | ||
229 | ||
230 | /** | |
231 | * Remove a map private from list and deallocate resources if the mapping | |
232 | * isn't in use. | |
233 | * | |
234 | * \param inode device inode. | |
235 | * \param filp file pointer. | |
236 | * \param cmd command. | |
237 | * \param arg pointer to a drm_map_t structure. | |
238 | * \return zero on success or a negative value on error. | |
239 | * | |
240 | * Searches the map on drm_device::maplist, removes it from the list, see if | |
241 | * its being used, and free any associate resource (such as MTRR's) if it's not | |
242 | * being on use. | |
243 | * | |
244 | * \sa addmap(). | |
245 | */ | |
246 | int drm_rmmap(struct inode *inode, struct file *filp, | |
247 | unsigned int cmd, unsigned long arg) | |
248 | { | |
249 | drm_file_t *priv = filp->private_data; | |
250 | drm_device_t *dev = priv->head->dev; | |
251 | struct list_head *list; | |
252 | drm_map_list_t *r_list = NULL; | |
253 | drm_vma_entry_t *pt, *prev; | |
254 | drm_map_t *map; | |
255 | drm_map_t request; | |
256 | int found_maps = 0; | |
257 | ||
258 | if (copy_from_user(&request, (drm_map_t __user *)arg, | |
259 | sizeof(request))) { | |
260 | return -EFAULT; | |
261 | } | |
262 | ||
263 | down(&dev->struct_sem); | |
264 | list = &dev->maplist->head; | |
265 | list_for_each(list, &dev->maplist->head) { | |
266 | r_list = list_entry(list, drm_map_list_t, head); | |
267 | ||
268 | if(r_list->map && | |
9a186645 | 269 | r_list->map->offset == (unsigned long) request.handle && |
1da177e4 LT |
270 | r_list->map->flags & _DRM_REMOVABLE) break; |
271 | } | |
272 | ||
273 | /* List has wrapped around to the head pointer, or its empty we didn't | |
274 | * find anything. | |
275 | */ | |
276 | if(list == (&dev->maplist->head)) { | |
277 | up(&dev->struct_sem); | |
278 | return -EINVAL; | |
279 | } | |
280 | map = r_list->map; | |
281 | list_del(list); | |
282 | drm_free(list, sizeof(*list), DRM_MEM_MAPS); | |
283 | ||
284 | for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) { | |
285 | if (pt->vma->vm_private_data == map) found_maps++; | |
286 | } | |
287 | ||
288 | if(!found_maps) { | |
289 | switch (map->type) { | |
290 | case _DRM_REGISTERS: | |
291 | case _DRM_FRAME_BUFFER: | |
292 | if (drm_core_has_MTRR(dev)) { | |
293 | if (map->mtrr >= 0) { | |
294 | int retcode; | |
295 | retcode = mtrr_del(map->mtrr, | |
296 | map->offset, | |
297 | map->size); | |
298 | DRM_DEBUG("mtrr_del = %d\n", retcode); | |
299 | } | |
300 | } | |
301 | drm_ioremapfree(map->handle, map->size, dev); | |
302 | break; | |
303 | case _DRM_SHM: | |
304 | vfree(map->handle); | |
305 | break; | |
306 | case _DRM_AGP: | |
307 | case _DRM_SCATTER_GATHER: | |
308 | break; | |
2d0f9eaf DA |
309 | case _DRM_CONSISTENT: |
310 | drm_pci_free(dev, map->size, map->handle, map->offset); | |
311 | break; | |
1da177e4 LT |
312 | } |
313 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); | |
314 | } | |
315 | up(&dev->struct_sem); | |
316 | return 0; | |
317 | } | |
318 | ||
319 | /** | |
320 | * Cleanup after an error on one of the addbufs() functions. | |
321 | * | |
322 | * \param entry buffer entry where the error occurred. | |
323 | * | |
324 | * Frees any pages and buffers associated with the given entry. | |
325 | */ | |
326 | static void drm_cleanup_buf_error(drm_device_t *dev, drm_buf_entry_t *entry) | |
327 | { | |
328 | int i; | |
329 | ||
330 | if (entry->seg_count) { | |
331 | for (i = 0; i < entry->seg_count; i++) { | |
332 | if (entry->seglist[i]) { | |
333 | drm_free_pages(entry->seglist[i], | |
334 | entry->page_order, | |
335 | DRM_MEM_DMA); | |
336 | } | |
337 | } | |
338 | drm_free(entry->seglist, | |
339 | entry->seg_count * | |
340 | sizeof(*entry->seglist), | |
341 | DRM_MEM_SEGS); | |
342 | ||
343 | entry->seg_count = 0; | |
344 | } | |
345 | ||
346 | if (entry->buf_count) { | |
347 | for (i = 0; i < entry->buf_count; i++) { | |
348 | if (entry->buflist[i].dev_private) { | |
349 | drm_free(entry->buflist[i].dev_private, | |
350 | entry->buflist[i].dev_priv_size, | |
351 | DRM_MEM_BUFS); | |
352 | } | |
353 | } | |
354 | drm_free(entry->buflist, | |
355 | entry->buf_count * | |
356 | sizeof(*entry->buflist), | |
357 | DRM_MEM_BUFS); | |
358 | ||
359 | entry->buf_count = 0; | |
360 | } | |
361 | } | |
362 | ||
363 | #if __OS_HAS_AGP | |
364 | /** | |
365 | * Add AGP buffers for DMA transfers (ioctl). | |
366 | * | |
367 | * \param inode device inode. | |
368 | * \param filp file pointer. | |
369 | * \param cmd command. | |
370 | * \param arg pointer to a drm_buf_desc_t request. | |
371 | * \return zero on success or a negative number on failure. | |
372 | * | |
373 | * After some sanity checks creates a drm_buf structure for each buffer and | |
374 | * reallocates the buffer list of the same size order to accommodate the new | |
375 | * buffers. | |
376 | */ | |
c94f7029 DA |
377 | static int drm_addbufs_agp( struct inode *inode, struct file *filp, |
378 | unsigned int cmd, unsigned long arg ) | |
1da177e4 LT |
379 | { |
380 | drm_file_t *priv = filp->private_data; | |
381 | drm_device_t *dev = priv->head->dev; | |
382 | drm_device_dma_t *dma = dev->dma; | |
383 | drm_buf_desc_t request; | |
384 | drm_buf_entry_t *entry; | |
385 | drm_buf_t *buf; | |
386 | unsigned long offset; | |
387 | unsigned long agp_offset; | |
388 | int count; | |
389 | int order; | |
390 | int size; | |
391 | int alignment; | |
392 | int page_order; | |
393 | int total; | |
394 | int byte_count; | |
395 | int i; | |
396 | drm_buf_t **temp_buflist; | |
397 | drm_buf_desc_t __user *argp = (void __user *)arg; | |
398 | ||
399 | if ( !dma ) return -EINVAL; | |
400 | ||
401 | if ( copy_from_user( &request, argp, | |
402 | sizeof(request) ) ) | |
403 | return -EFAULT; | |
404 | ||
405 | count = request.count; | |
406 | order = drm_order( request.size ); | |
407 | size = 1 << order; | |
408 | ||
409 | alignment = (request.flags & _DRM_PAGE_ALIGN) | |
410 | ? PAGE_ALIGN(size) : size; | |
411 | page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; | |
412 | total = PAGE_SIZE << page_order; | |
413 | ||
414 | byte_count = 0; | |
415 | agp_offset = dev->agp->base + request.agp_start; | |
416 | ||
417 | DRM_DEBUG( "count: %d\n", count ); | |
418 | DRM_DEBUG( "order: %d\n", order ); | |
419 | DRM_DEBUG( "size: %d\n", size ); | |
420 | DRM_DEBUG( "agp_offset: %lu\n", agp_offset ); | |
421 | DRM_DEBUG( "alignment: %d\n", alignment ); | |
422 | DRM_DEBUG( "page_order: %d\n", page_order ); | |
423 | DRM_DEBUG( "total: %d\n", total ); | |
424 | ||
425 | if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL; | |
426 | if ( dev->queue_count ) return -EBUSY; /* Not while in use */ | |
427 | ||
428 | spin_lock( &dev->count_lock ); | |
429 | if ( dev->buf_use ) { | |
430 | spin_unlock( &dev->count_lock ); | |
431 | return -EBUSY; | |
432 | } | |
433 | atomic_inc( &dev->buf_alloc ); | |
434 | spin_unlock( &dev->count_lock ); | |
435 | ||
436 | down( &dev->struct_sem ); | |
437 | entry = &dma->bufs[order]; | |
438 | if ( entry->buf_count ) { | |
439 | up( &dev->struct_sem ); | |
440 | atomic_dec( &dev->buf_alloc ); | |
441 | return -ENOMEM; /* May only call once for each order */ | |
442 | } | |
443 | ||
444 | if (count < 0 || count > 4096) { | |
445 | up( &dev->struct_sem ); | |
446 | atomic_dec( &dev->buf_alloc ); | |
447 | return -EINVAL; | |
448 | } | |
449 | ||
450 | entry->buflist = drm_alloc( count * sizeof(*entry->buflist), | |
451 | DRM_MEM_BUFS ); | |
452 | if ( !entry->buflist ) { | |
453 | up( &dev->struct_sem ); | |
454 | atomic_dec( &dev->buf_alloc ); | |
455 | return -ENOMEM; | |
456 | } | |
457 | memset( entry->buflist, 0, count * sizeof(*entry->buflist) ); | |
458 | ||
459 | entry->buf_size = size; | |
460 | entry->page_order = page_order; | |
461 | ||
462 | offset = 0; | |
463 | ||
464 | while ( entry->buf_count < count ) { | |
465 | buf = &entry->buflist[entry->buf_count]; | |
466 | buf->idx = dma->buf_count + entry->buf_count; | |
467 | buf->total = alignment; | |
468 | buf->order = order; | |
469 | buf->used = 0; | |
470 | ||
471 | buf->offset = (dma->byte_count + offset); | |
472 | buf->bus_address = agp_offset + offset; | |
473 | buf->address = (void *)(agp_offset + offset); | |
474 | buf->next = NULL; | |
475 | buf->waiting = 0; | |
476 | buf->pending = 0; | |
477 | init_waitqueue_head( &buf->dma_wait ); | |
478 | buf->filp = NULL; | |
479 | ||
480 | buf->dev_priv_size = dev->driver->dev_priv_size; | |
481 | buf->dev_private = drm_alloc( buf->dev_priv_size, | |
482 | DRM_MEM_BUFS ); | |
483 | if(!buf->dev_private) { | |
484 | /* Set count correctly so we free the proper amount. */ | |
485 | entry->buf_count = count; | |
486 | drm_cleanup_buf_error(dev,entry); | |
487 | up( &dev->struct_sem ); | |
488 | atomic_dec( &dev->buf_alloc ); | |
489 | return -ENOMEM; | |
490 | } | |
491 | memset( buf->dev_private, 0, buf->dev_priv_size ); | |
492 | ||
493 | DRM_DEBUG( "buffer %d @ %p\n", | |
494 | entry->buf_count, buf->address ); | |
495 | ||
496 | offset += alignment; | |
497 | entry->buf_count++; | |
498 | byte_count += PAGE_SIZE << page_order; | |
499 | } | |
500 | ||
501 | DRM_DEBUG( "byte_count: %d\n", byte_count ); | |
502 | ||
503 | temp_buflist = drm_realloc( dma->buflist, | |
504 | dma->buf_count * sizeof(*dma->buflist), | |
505 | (dma->buf_count + entry->buf_count) | |
506 | * sizeof(*dma->buflist), | |
507 | DRM_MEM_BUFS ); | |
508 | if(!temp_buflist) { | |
509 | /* Free the entry because it isn't valid */ | |
510 | drm_cleanup_buf_error(dev,entry); | |
511 | up( &dev->struct_sem ); | |
512 | atomic_dec( &dev->buf_alloc ); | |
513 | return -ENOMEM; | |
514 | } | |
515 | dma->buflist = temp_buflist; | |
516 | ||
517 | for ( i = 0 ; i < entry->buf_count ; i++ ) { | |
518 | dma->buflist[i + dma->buf_count] = &entry->buflist[i]; | |
519 | } | |
520 | ||
521 | dma->buf_count += entry->buf_count; | |
522 | dma->byte_count += byte_count; | |
523 | ||
524 | DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count ); | |
525 | DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count ); | |
526 | ||
527 | up( &dev->struct_sem ); | |
528 | ||
529 | request.count = entry->buf_count; | |
530 | request.size = size; | |
531 | ||
532 | if ( copy_to_user( argp, &request, sizeof(request) ) ) | |
533 | return -EFAULT; | |
534 | ||
535 | dma->flags = _DRM_DMA_USE_AGP; | |
536 | ||
537 | atomic_dec( &dev->buf_alloc ); | |
538 | return 0; | |
539 | } | |
540 | #endif /* __OS_HAS_AGP */ | |
541 | ||
c94f7029 DA |
542 | static int drm_addbufs_pci( struct inode *inode, struct file *filp, |
543 | unsigned int cmd, unsigned long arg ) | |
1da177e4 LT |
544 | { |
545 | drm_file_t *priv = filp->private_data; | |
546 | drm_device_t *dev = priv->head->dev; | |
547 | drm_device_dma_t *dma = dev->dma; | |
548 | drm_buf_desc_t request; | |
549 | int count; | |
550 | int order; | |
551 | int size; | |
552 | int total; | |
553 | int page_order; | |
554 | drm_buf_entry_t *entry; | |
555 | unsigned long page; | |
556 | drm_buf_t *buf; | |
557 | int alignment; | |
558 | unsigned long offset; | |
559 | int i; | |
560 | int byte_count; | |
561 | int page_count; | |
562 | unsigned long *temp_pagelist; | |
563 | drm_buf_t **temp_buflist; | |
564 | drm_buf_desc_t __user *argp = (void __user *)arg; | |
565 | ||
566 | if (!drm_core_check_feature(dev, DRIVER_PCI_DMA)) return -EINVAL; | |
567 | if ( !dma ) return -EINVAL; | |
568 | ||
569 | if ( copy_from_user( &request, argp, sizeof(request) ) ) | |
570 | return -EFAULT; | |
571 | ||
572 | count = request.count; | |
573 | order = drm_order( request.size ); | |
574 | size = 1 << order; | |
575 | ||
576 | DRM_DEBUG( "count=%d, size=%d (%d), order=%d, queue_count=%d\n", | |
577 | request.count, request.size, size, | |
578 | order, dev->queue_count ); | |
579 | ||
580 | if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL; | |
581 | if ( dev->queue_count ) return -EBUSY; /* Not while in use */ | |
582 | ||
583 | alignment = (request.flags & _DRM_PAGE_ALIGN) | |
584 | ? PAGE_ALIGN(size) : size; | |
585 | page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; | |
586 | total = PAGE_SIZE << page_order; | |
587 | ||
588 | spin_lock( &dev->count_lock ); | |
589 | if ( dev->buf_use ) { | |
590 | spin_unlock( &dev->count_lock ); | |
591 | return -EBUSY; | |
592 | } | |
593 | atomic_inc( &dev->buf_alloc ); | |
594 | spin_unlock( &dev->count_lock ); | |
595 | ||
596 | down( &dev->struct_sem ); | |
597 | entry = &dma->bufs[order]; | |
598 | if ( entry->buf_count ) { | |
599 | up( &dev->struct_sem ); | |
600 | atomic_dec( &dev->buf_alloc ); | |
601 | return -ENOMEM; /* May only call once for each order */ | |
602 | } | |
603 | ||
604 | if (count < 0 || count > 4096) { | |
605 | up( &dev->struct_sem ); | |
606 | atomic_dec( &dev->buf_alloc ); | |
607 | return -EINVAL; | |
608 | } | |
609 | ||
610 | entry->buflist = drm_alloc( count * sizeof(*entry->buflist), | |
611 | DRM_MEM_BUFS ); | |
612 | if ( !entry->buflist ) { | |
613 | up( &dev->struct_sem ); | |
614 | atomic_dec( &dev->buf_alloc ); | |
615 | return -ENOMEM; | |
616 | } | |
617 | memset( entry->buflist, 0, count * sizeof(*entry->buflist) ); | |
618 | ||
619 | entry->seglist = drm_alloc( count * sizeof(*entry->seglist), | |
620 | DRM_MEM_SEGS ); | |
621 | if ( !entry->seglist ) { | |
622 | drm_free( entry->buflist, | |
623 | count * sizeof(*entry->buflist), | |
624 | DRM_MEM_BUFS ); | |
625 | up( &dev->struct_sem ); | |
626 | atomic_dec( &dev->buf_alloc ); | |
627 | return -ENOMEM; | |
628 | } | |
629 | memset( entry->seglist, 0, count * sizeof(*entry->seglist) ); | |
630 | ||
631 | /* Keep the original pagelist until we know all the allocations | |
632 | * have succeeded | |
633 | */ | |
634 | temp_pagelist = drm_alloc( (dma->page_count + (count << page_order)) | |
635 | * sizeof(*dma->pagelist), | |
636 | DRM_MEM_PAGES ); | |
637 | if (!temp_pagelist) { | |
638 | drm_free( entry->buflist, | |
639 | count * sizeof(*entry->buflist), | |
640 | DRM_MEM_BUFS ); | |
641 | drm_free( entry->seglist, | |
642 | count * sizeof(*entry->seglist), | |
643 | DRM_MEM_SEGS ); | |
644 | up( &dev->struct_sem ); | |
645 | atomic_dec( &dev->buf_alloc ); | |
646 | return -ENOMEM; | |
647 | } | |
648 | memcpy(temp_pagelist, | |
649 | dma->pagelist, | |
650 | dma->page_count * sizeof(*dma->pagelist)); | |
651 | DRM_DEBUG( "pagelist: %d entries\n", | |
652 | dma->page_count + (count << page_order) ); | |
653 | ||
654 | entry->buf_size = size; | |
655 | entry->page_order = page_order; | |
656 | byte_count = 0; | |
657 | page_count = 0; | |
658 | ||
659 | while ( entry->buf_count < count ) { | |
660 | page = drm_alloc_pages( page_order, DRM_MEM_DMA ); | |
661 | if ( !page ) { | |
662 | /* Set count correctly so we free the proper amount. */ | |
663 | entry->buf_count = count; | |
664 | entry->seg_count = count; | |
665 | drm_cleanup_buf_error(dev, entry); | |
666 | drm_free( temp_pagelist, | |
667 | (dma->page_count + (count << page_order)) | |
668 | * sizeof(*dma->pagelist), | |
669 | DRM_MEM_PAGES ); | |
670 | up( &dev->struct_sem ); | |
671 | atomic_dec( &dev->buf_alloc ); | |
672 | return -ENOMEM; | |
673 | } | |
674 | entry->seglist[entry->seg_count++] = page; | |
675 | for ( i = 0 ; i < (1 << page_order) ; i++ ) { | |
676 | DRM_DEBUG( "page %d @ 0x%08lx\n", | |
677 | dma->page_count + page_count, | |
678 | page + PAGE_SIZE * i ); | |
679 | temp_pagelist[dma->page_count + page_count++] | |
680 | = page + PAGE_SIZE * i; | |
681 | } | |
682 | for ( offset = 0 ; | |
683 | offset + size <= total && entry->buf_count < count ; | |
684 | offset += alignment, ++entry->buf_count ) { | |
685 | buf = &entry->buflist[entry->buf_count]; | |
686 | buf->idx = dma->buf_count + entry->buf_count; | |
687 | buf->total = alignment; | |
688 | buf->order = order; | |
689 | buf->used = 0; | |
690 | buf->offset = (dma->byte_count + byte_count + offset); | |
691 | buf->address = (void *)(page + offset); | |
692 | buf->next = NULL; | |
693 | buf->waiting = 0; | |
694 | buf->pending = 0; | |
695 | init_waitqueue_head( &buf->dma_wait ); | |
696 | buf->filp = NULL; | |
697 | ||
698 | buf->dev_priv_size = dev->driver->dev_priv_size; | |
699 | buf->dev_private = drm_alloc( buf->dev_priv_size, | |
700 | DRM_MEM_BUFS ); | |
701 | if(!buf->dev_private) { | |
702 | /* Set count correctly so we free the proper amount. */ | |
703 | entry->buf_count = count; | |
704 | entry->seg_count = count; | |
705 | drm_cleanup_buf_error(dev,entry); | |
706 | drm_free( temp_pagelist, | |
707 | (dma->page_count + (count << page_order)) | |
708 | * sizeof(*dma->pagelist), | |
709 | DRM_MEM_PAGES ); | |
710 | up( &dev->struct_sem ); | |
711 | atomic_dec( &dev->buf_alloc ); | |
712 | return -ENOMEM; | |
713 | } | |
714 | memset( buf->dev_private, 0, buf->dev_priv_size ); | |
715 | ||
716 | DRM_DEBUG( "buffer %d @ %p\n", | |
717 | entry->buf_count, buf->address ); | |
718 | } | |
719 | byte_count += PAGE_SIZE << page_order; | |
720 | } | |
721 | ||
722 | temp_buflist = drm_realloc( dma->buflist, | |
723 | dma->buf_count * sizeof(*dma->buflist), | |
724 | (dma->buf_count + entry->buf_count) | |
725 | * sizeof(*dma->buflist), | |
726 | DRM_MEM_BUFS ); | |
727 | if (!temp_buflist) { | |
728 | /* Free the entry because it isn't valid */ | |
729 | drm_cleanup_buf_error(dev,entry); | |
730 | drm_free( temp_pagelist, | |
731 | (dma->page_count + (count << page_order)) | |
732 | * sizeof(*dma->pagelist), | |
733 | DRM_MEM_PAGES ); | |
734 | up( &dev->struct_sem ); | |
735 | atomic_dec( &dev->buf_alloc ); | |
736 | return -ENOMEM; | |
737 | } | |
738 | dma->buflist = temp_buflist; | |
739 | ||
740 | for ( i = 0 ; i < entry->buf_count ; i++ ) { | |
741 | dma->buflist[i + dma->buf_count] = &entry->buflist[i]; | |
742 | } | |
743 | ||
744 | /* No allocations failed, so now we can replace the orginal pagelist | |
745 | * with the new one. | |
746 | */ | |
747 | if (dma->page_count) { | |
748 | drm_free(dma->pagelist, | |
749 | dma->page_count * sizeof(*dma->pagelist), | |
750 | DRM_MEM_PAGES); | |
751 | } | |
752 | dma->pagelist = temp_pagelist; | |
753 | ||
754 | dma->buf_count += entry->buf_count; | |
755 | dma->seg_count += entry->seg_count; | |
756 | dma->page_count += entry->seg_count << page_order; | |
757 | dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order); | |
758 | ||
759 | up( &dev->struct_sem ); | |
760 | ||
761 | request.count = entry->buf_count; | |
762 | request.size = size; | |
763 | ||
764 | if ( copy_to_user( argp, &request, sizeof(request) ) ) | |
765 | return -EFAULT; | |
766 | ||
767 | atomic_dec( &dev->buf_alloc ); | |
768 | return 0; | |
769 | ||
770 | } | |
771 | ||
c94f7029 DA |
772 | static int drm_addbufs_sg( struct inode *inode, struct file *filp, |
773 | unsigned int cmd, unsigned long arg ) | |
1da177e4 LT |
774 | { |
775 | drm_file_t *priv = filp->private_data; | |
776 | drm_device_t *dev = priv->head->dev; | |
777 | drm_device_dma_t *dma = dev->dma; | |
778 | drm_buf_desc_t __user *argp = (void __user *)arg; | |
779 | drm_buf_desc_t request; | |
780 | drm_buf_entry_t *entry; | |
781 | drm_buf_t *buf; | |
782 | unsigned long offset; | |
783 | unsigned long agp_offset; | |
784 | int count; | |
785 | int order; | |
786 | int size; | |
787 | int alignment; | |
788 | int page_order; | |
789 | int total; | |
790 | int byte_count; | |
791 | int i; | |
792 | drm_buf_t **temp_buflist; | |
793 | ||
794 | if (!drm_core_check_feature(dev, DRIVER_SG)) return -EINVAL; | |
795 | ||
796 | if ( !dma ) return -EINVAL; | |
797 | ||
798 | if ( copy_from_user( &request, argp, sizeof(request) ) ) | |
799 | return -EFAULT; | |
800 | ||
801 | count = request.count; | |
802 | order = drm_order( request.size ); | |
803 | size = 1 << order; | |
804 | ||
805 | alignment = (request.flags & _DRM_PAGE_ALIGN) | |
806 | ? PAGE_ALIGN(size) : size; | |
807 | page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; | |
808 | total = PAGE_SIZE << page_order; | |
809 | ||
810 | byte_count = 0; | |
811 | agp_offset = request.agp_start; | |
812 | ||
813 | DRM_DEBUG( "count: %d\n", count ); | |
814 | DRM_DEBUG( "order: %d\n", order ); | |
815 | DRM_DEBUG( "size: %d\n", size ); | |
816 | DRM_DEBUG( "agp_offset: %lu\n", agp_offset ); | |
817 | DRM_DEBUG( "alignment: %d\n", alignment ); | |
818 | DRM_DEBUG( "page_order: %d\n", page_order ); | |
819 | DRM_DEBUG( "total: %d\n", total ); | |
820 | ||
821 | if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL; | |
822 | if ( dev->queue_count ) return -EBUSY; /* Not while in use */ | |
823 | ||
824 | spin_lock( &dev->count_lock ); | |
825 | if ( dev->buf_use ) { | |
826 | spin_unlock( &dev->count_lock ); | |
827 | return -EBUSY; | |
828 | } | |
829 | atomic_inc( &dev->buf_alloc ); | |
830 | spin_unlock( &dev->count_lock ); | |
831 | ||
832 | down( &dev->struct_sem ); | |
833 | entry = &dma->bufs[order]; | |
834 | if ( entry->buf_count ) { | |
835 | up( &dev->struct_sem ); | |
836 | atomic_dec( &dev->buf_alloc ); | |
837 | return -ENOMEM; /* May only call once for each order */ | |
838 | } | |
839 | ||
840 | if (count < 0 || count > 4096) { | |
841 | up( &dev->struct_sem ); | |
842 | atomic_dec( &dev->buf_alloc ); | |
843 | return -EINVAL; | |
844 | } | |
845 | ||
846 | entry->buflist = drm_alloc( count * sizeof(*entry->buflist), | |
847 | DRM_MEM_BUFS ); | |
848 | if ( !entry->buflist ) { | |
849 | up( &dev->struct_sem ); | |
850 | atomic_dec( &dev->buf_alloc ); | |
851 | return -ENOMEM; | |
852 | } | |
853 | memset( entry->buflist, 0, count * sizeof(*entry->buflist) ); | |
854 | ||
855 | entry->buf_size = size; | |
856 | entry->page_order = page_order; | |
857 | ||
858 | offset = 0; | |
859 | ||
860 | while ( entry->buf_count < count ) { | |
861 | buf = &entry->buflist[entry->buf_count]; | |
862 | buf->idx = dma->buf_count + entry->buf_count; | |
863 | buf->total = alignment; | |
864 | buf->order = order; | |
865 | buf->used = 0; | |
866 | ||
867 | buf->offset = (dma->byte_count + offset); | |
868 | buf->bus_address = agp_offset + offset; | |
869 | buf->address = (void *)(agp_offset + offset + dev->sg->handle); | |
870 | buf->next = NULL; | |
871 | buf->waiting = 0; | |
872 | buf->pending = 0; | |
873 | init_waitqueue_head( &buf->dma_wait ); | |
874 | buf->filp = NULL; | |
875 | ||
876 | buf->dev_priv_size = dev->driver->dev_priv_size; | |
877 | buf->dev_private = drm_alloc( buf->dev_priv_size, | |
878 | DRM_MEM_BUFS ); | |
879 | if(!buf->dev_private) { | |
880 | /* Set count correctly so we free the proper amount. */ | |
881 | entry->buf_count = count; | |
882 | drm_cleanup_buf_error(dev,entry); | |
883 | up( &dev->struct_sem ); | |
884 | atomic_dec( &dev->buf_alloc ); | |
885 | return -ENOMEM; | |
886 | } | |
887 | ||
888 | memset( buf->dev_private, 0, buf->dev_priv_size ); | |
889 | ||
890 | DRM_DEBUG( "buffer %d @ %p\n", | |
891 | entry->buf_count, buf->address ); | |
892 | ||
893 | offset += alignment; | |
894 | entry->buf_count++; | |
895 | byte_count += PAGE_SIZE << page_order; | |
896 | } | |
897 | ||
898 | DRM_DEBUG( "byte_count: %d\n", byte_count ); | |
899 | ||
900 | temp_buflist = drm_realloc( dma->buflist, | |
901 | dma->buf_count * sizeof(*dma->buflist), | |
902 | (dma->buf_count + entry->buf_count) | |
903 | * sizeof(*dma->buflist), | |
904 | DRM_MEM_BUFS ); | |
905 | if(!temp_buflist) { | |
906 | /* Free the entry because it isn't valid */ | |
907 | drm_cleanup_buf_error(dev,entry); | |
908 | up( &dev->struct_sem ); | |
909 | atomic_dec( &dev->buf_alloc ); | |
910 | return -ENOMEM; | |
911 | } | |
912 | dma->buflist = temp_buflist; | |
913 | ||
914 | for ( i = 0 ; i < entry->buf_count ; i++ ) { | |
915 | dma->buflist[i + dma->buf_count] = &entry->buflist[i]; | |
916 | } | |
917 | ||
918 | dma->buf_count += entry->buf_count; | |
919 | dma->byte_count += byte_count; | |
920 | ||
921 | DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count ); | |
922 | DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count ); | |
923 | ||
924 | up( &dev->struct_sem ); | |
925 | ||
926 | request.count = entry->buf_count; | |
927 | request.size = size; | |
928 | ||
929 | if ( copy_to_user( argp, &request, sizeof(request) ) ) | |
930 | return -EFAULT; | |
931 | ||
932 | dma->flags = _DRM_DMA_USE_SG; | |
933 | ||
934 | atomic_dec( &dev->buf_alloc ); | |
935 | return 0; | |
936 | } | |
937 | ||
b84397d6 DA |
938 | int drm_addbufs_fb(struct inode *inode, struct file *filp, |
939 | unsigned int cmd, unsigned long arg) | |
940 | { | |
941 | drm_file_t *priv = filp->private_data; | |
942 | drm_device_t *dev = priv->head->dev; | |
943 | drm_device_dma_t *dma = dev->dma; | |
944 | drm_buf_desc_t request; | |
945 | drm_buf_entry_t *entry; | |
946 | drm_buf_t *buf; | |
947 | unsigned long offset; | |
948 | unsigned long agp_offset; | |
949 | int count; | |
950 | int order; | |
951 | int size; | |
952 | int alignment; | |
953 | int page_order; | |
954 | int total; | |
955 | int byte_count; | |
956 | int i; | |
957 | drm_buf_t **temp_buflist; | |
958 | drm_buf_desc_t __user *argp = (void __user *)arg; | |
959 | ||
960 | if (!drm_core_check_feature(dev, DRIVER_FB_DMA)) | |
961 | return -EINVAL; | |
962 | ||
963 | if (!dma) | |
964 | return -EINVAL; | |
965 | ||
966 | if (copy_from_user(&request, argp, sizeof(request))) | |
967 | return -EFAULT; | |
968 | ||
969 | count = request.count; | |
970 | order = drm_order(request.size); | |
971 | size = 1 << order; | |
972 | ||
973 | alignment = (request.flags & _DRM_PAGE_ALIGN) | |
974 | ? PAGE_ALIGN(size) : size; | |
975 | page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; | |
976 | total = PAGE_SIZE << page_order; | |
977 | ||
978 | byte_count = 0; | |
979 | agp_offset = request.agp_start; | |
980 | ||
981 | DRM_DEBUG("count: %d\n", count); | |
982 | DRM_DEBUG("order: %d\n", order); | |
983 | DRM_DEBUG("size: %d\n", size); | |
984 | DRM_DEBUG("agp_offset: %lu\n", agp_offset); | |
985 | DRM_DEBUG("alignment: %d\n", alignment); | |
986 | DRM_DEBUG("page_order: %d\n", page_order); | |
987 | DRM_DEBUG("total: %d\n", total); | |
988 | ||
989 | if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) | |
990 | return -EINVAL; | |
991 | if (dev->queue_count) | |
992 | return -EBUSY; /* Not while in use */ | |
993 | ||
994 | spin_lock(&dev->count_lock); | |
995 | if (dev->buf_use) { | |
996 | spin_unlock(&dev->count_lock); | |
997 | return -EBUSY; | |
998 | } | |
999 | atomic_inc(&dev->buf_alloc); | |
1000 | spin_unlock(&dev->count_lock); | |
1001 | ||
1002 | down(&dev->struct_sem); | |
1003 | entry = &dma->bufs[order]; | |
1004 | if (entry->buf_count) { | |
1005 | up(&dev->struct_sem); | |
1006 | atomic_dec(&dev->buf_alloc); | |
1007 | return -ENOMEM; /* May only call once for each order */ | |
1008 | } | |
1009 | ||
1010 | if (count < 0 || count > 4096) { | |
1011 | up(&dev->struct_sem); | |
1012 | atomic_dec(&dev->buf_alloc); | |
1013 | return -EINVAL; | |
1014 | } | |
1015 | ||
1016 | entry->buflist = drm_alloc(count * sizeof(*entry->buflist), | |
1017 | DRM_MEM_BUFS); | |
1018 | if (!entry->buflist) { | |
1019 | up(&dev->struct_sem); | |
1020 | atomic_dec(&dev->buf_alloc); | |
1021 | return -ENOMEM; | |
1022 | } | |
1023 | memset(entry->buflist, 0, count * sizeof(*entry->buflist)); | |
1024 | ||
1025 | entry->buf_size = size; | |
1026 | entry->page_order = page_order; | |
1027 | ||
1028 | offset = 0; | |
1029 | ||
1030 | while (entry->buf_count < count) { | |
1031 | buf = &entry->buflist[entry->buf_count]; | |
1032 | buf->idx = dma->buf_count + entry->buf_count; | |
1033 | buf->total = alignment; | |
1034 | buf->order = order; | |
1035 | buf->used = 0; | |
1036 | ||
1037 | buf->offset = (dma->byte_count + offset); | |
1038 | buf->bus_address = agp_offset + offset; | |
1039 | buf->address = (void *)(agp_offset + offset); | |
1040 | buf->next = NULL; | |
1041 | buf->waiting = 0; | |
1042 | buf->pending = 0; | |
1043 | init_waitqueue_head(&buf->dma_wait); | |
1044 | buf->filp = NULL; | |
1045 | ||
1046 | buf->dev_priv_size = dev->driver->dev_priv_size; | |
1047 | buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS); | |
1048 | if (!buf->dev_private) { | |
1049 | /* Set count correctly so we free the proper amount. */ | |
1050 | entry->buf_count = count; | |
1051 | drm_cleanup_buf_error(dev, entry); | |
1052 | up(&dev->struct_sem); | |
1053 | atomic_dec(&dev->buf_alloc); | |
1054 | return -ENOMEM; | |
1055 | } | |
1056 | memset(buf->dev_private, 0, buf->dev_priv_size); | |
1057 | ||
1058 | DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); | |
1059 | ||
1060 | offset += alignment; | |
1061 | entry->buf_count++; | |
1062 | byte_count += PAGE_SIZE << page_order; | |
1063 | } | |
1064 | ||
1065 | DRM_DEBUG("byte_count: %d\n", byte_count); | |
1066 | ||
1067 | temp_buflist = drm_realloc(dma->buflist, | |
1068 | dma->buf_count * sizeof(*dma->buflist), | |
1069 | (dma->buf_count + entry->buf_count) | |
1070 | * sizeof(*dma->buflist), DRM_MEM_BUFS); | |
1071 | if (!temp_buflist) { | |
1072 | /* Free the entry because it isn't valid */ | |
1073 | drm_cleanup_buf_error(dev, entry); | |
1074 | up(&dev->struct_sem); | |
1075 | atomic_dec(&dev->buf_alloc); | |
1076 | return -ENOMEM; | |
1077 | } | |
1078 | dma->buflist = temp_buflist; | |
1079 | ||
1080 | for (i = 0; i < entry->buf_count; i++) { | |
1081 | dma->buflist[i + dma->buf_count] = &entry->buflist[i]; | |
1082 | } | |
1083 | ||
1084 | dma->buf_count += entry->buf_count; | |
1085 | dma->byte_count += byte_count; | |
1086 | ||
1087 | DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); | |
1088 | DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); | |
1089 | ||
1090 | up(&dev->struct_sem); | |
1091 | ||
1092 | request.count = entry->buf_count; | |
1093 | request.size = size; | |
1094 | ||
1095 | if (copy_to_user(argp, &request, sizeof(request))) | |
1096 | return -EFAULT; | |
1097 | ||
1098 | dma->flags = _DRM_DMA_USE_FB; | |
1099 | ||
1100 | atomic_dec(&dev->buf_alloc); | |
1101 | return 0; | |
1102 | } | |
1103 | ||
1da177e4 LT |
1104 | /** |
1105 | * Add buffers for DMA transfers (ioctl). | |
1106 | * | |
1107 | * \param inode device inode. | |
1108 | * \param filp file pointer. | |
1109 | * \param cmd command. | |
1110 | * \param arg pointer to a drm_buf_desc_t request. | |
1111 | * \return zero on success or a negative number on failure. | |
1112 | * | |
1113 | * According with the memory type specified in drm_buf_desc::flags and the | |
1114 | * build options, it dispatches the call either to addbufs_agp(), | |
1115 | * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent | |
1116 | * PCI memory respectively. | |
1117 | */ | |
1118 | int drm_addbufs( struct inode *inode, struct file *filp, | |
1119 | unsigned int cmd, unsigned long arg ) | |
1120 | { | |
1121 | drm_buf_desc_t request; | |
1122 | drm_file_t *priv = filp->private_data; | |
1123 | drm_device_t *dev = priv->head->dev; | |
1124 | ||
1125 | if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) | |
1126 | return -EINVAL; | |
1127 | ||
1128 | if ( copy_from_user( &request, (drm_buf_desc_t __user *)arg, | |
1129 | sizeof(request) ) ) | |
1130 | return -EFAULT; | |
1131 | ||
1132 | #if __OS_HAS_AGP | |
1133 | if ( request.flags & _DRM_AGP_BUFFER ) | |
1134 | return drm_addbufs_agp( inode, filp, cmd, arg ); | |
1135 | else | |
1136 | #endif | |
1137 | if ( request.flags & _DRM_SG_BUFFER ) | |
1138 | return drm_addbufs_sg( inode, filp, cmd, arg ); | |
b84397d6 DA |
1139 | else if ( request.flags & _DRM_FB_BUFFER) |
1140 | return drm_addbufs_fb( inode, filp, cmd, arg ); | |
1da177e4 LT |
1141 | else |
1142 | return drm_addbufs_pci( inode, filp, cmd, arg ); | |
1143 | } | |
1144 | ||
1145 | ||
1146 | /** | |
1147 | * Get information about the buffer mappings. | |
1148 | * | |
1149 | * This was originally mean for debugging purposes, or by a sophisticated | |
1150 | * client library to determine how best to use the available buffers (e.g., | |
1151 | * large buffers can be used for image transfer). | |
1152 | * | |
1153 | * \param inode device inode. | |
1154 | * \param filp file pointer. | |
1155 | * \param cmd command. | |
1156 | * \param arg pointer to a drm_buf_info structure. | |
1157 | * \return zero on success or a negative number on failure. | |
1158 | * | |
1159 | * Increments drm_device::buf_use while holding the drm_device::count_lock | |
1160 | * lock, preventing of allocating more buffers after this call. Information | |
1161 | * about each requested buffer is then copied into user space. | |
1162 | */ | |
1163 | int drm_infobufs( struct inode *inode, struct file *filp, | |
1164 | unsigned int cmd, unsigned long arg ) | |
1165 | { | |
1166 | drm_file_t *priv = filp->private_data; | |
1167 | drm_device_t *dev = priv->head->dev; | |
1168 | drm_device_dma_t *dma = dev->dma; | |
1169 | drm_buf_info_t request; | |
1170 | drm_buf_info_t __user *argp = (void __user *)arg; | |
1171 | int i; | |
1172 | int count; | |
1173 | ||
1174 | if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) | |
1175 | return -EINVAL; | |
1176 | ||
1177 | if ( !dma ) return -EINVAL; | |
1178 | ||
1179 | spin_lock( &dev->count_lock ); | |
1180 | if ( atomic_read( &dev->buf_alloc ) ) { | |
1181 | spin_unlock( &dev->count_lock ); | |
1182 | return -EBUSY; | |
1183 | } | |
1184 | ++dev->buf_use; /* Can't allocate more after this call */ | |
1185 | spin_unlock( &dev->count_lock ); | |
1186 | ||
1187 | if ( copy_from_user( &request, argp, sizeof(request) ) ) | |
1188 | return -EFAULT; | |
1189 | ||
1190 | for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) { | |
1191 | if ( dma->bufs[i].buf_count ) ++count; | |
1192 | } | |
1193 | ||
1194 | DRM_DEBUG( "count = %d\n", count ); | |
1195 | ||
1196 | if ( request.count >= count ) { | |
1197 | for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) { | |
1198 | if ( dma->bufs[i].buf_count ) { | |
1199 | drm_buf_desc_t __user *to = &request.list[count]; | |
1200 | drm_buf_entry_t *from = &dma->bufs[i]; | |
1201 | drm_freelist_t *list = &dma->bufs[i].freelist; | |
1202 | if ( copy_to_user( &to->count, | |
1203 | &from->buf_count, | |
1204 | sizeof(from->buf_count) ) || | |
1205 | copy_to_user( &to->size, | |
1206 | &from->buf_size, | |
1207 | sizeof(from->buf_size) ) || | |
1208 | copy_to_user( &to->low_mark, | |
1209 | &list->low_mark, | |
1210 | sizeof(list->low_mark) ) || | |
1211 | copy_to_user( &to->high_mark, | |
1212 | &list->high_mark, | |
1213 | sizeof(list->high_mark) ) ) | |
1214 | return -EFAULT; | |
1215 | ||
1216 | DRM_DEBUG( "%d %d %d %d %d\n", | |
1217 | i, | |
1218 | dma->bufs[i].buf_count, | |
1219 | dma->bufs[i].buf_size, | |
1220 | dma->bufs[i].freelist.low_mark, | |
1221 | dma->bufs[i].freelist.high_mark ); | |
1222 | ++count; | |
1223 | } | |
1224 | } | |
1225 | } | |
1226 | request.count = count; | |
1227 | ||
1228 | if ( copy_to_user( argp, &request, sizeof(request) ) ) | |
1229 | return -EFAULT; | |
1230 | ||
1231 | return 0; | |
1232 | } | |
1233 | ||
1234 | /** | |
1235 | * Specifies a low and high water mark for buffer allocation | |
1236 | * | |
1237 | * \param inode device inode. | |
1238 | * \param filp file pointer. | |
1239 | * \param cmd command. | |
1240 | * \param arg a pointer to a drm_buf_desc structure. | |
1241 | * \return zero on success or a negative number on failure. | |
1242 | * | |
1243 | * Verifies that the size order is bounded between the admissible orders and | |
1244 | * updates the respective drm_device_dma::bufs entry low and high water mark. | |
1245 | * | |
1246 | * \note This ioctl is deprecated and mostly never used. | |
1247 | */ | |
1248 | int drm_markbufs( struct inode *inode, struct file *filp, | |
1249 | unsigned int cmd, unsigned long arg ) | |
1250 | { | |
1251 | drm_file_t *priv = filp->private_data; | |
1252 | drm_device_t *dev = priv->head->dev; | |
1253 | drm_device_dma_t *dma = dev->dma; | |
1254 | drm_buf_desc_t request; | |
1255 | int order; | |
1256 | drm_buf_entry_t *entry; | |
1257 | ||
1258 | if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) | |
1259 | return -EINVAL; | |
1260 | ||
1261 | if ( !dma ) return -EINVAL; | |
1262 | ||
1263 | if ( copy_from_user( &request, | |
1264 | (drm_buf_desc_t __user *)arg, | |
1265 | sizeof(request) ) ) | |
1266 | return -EFAULT; | |
1267 | ||
1268 | DRM_DEBUG( "%d, %d, %d\n", | |
1269 | request.size, request.low_mark, request.high_mark ); | |
1270 | order = drm_order( request.size ); | |
1271 | if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL; | |
1272 | entry = &dma->bufs[order]; | |
1273 | ||
1274 | if ( request.low_mark < 0 || request.low_mark > entry->buf_count ) | |
1275 | return -EINVAL; | |
1276 | if ( request.high_mark < 0 || request.high_mark > entry->buf_count ) | |
1277 | return -EINVAL; | |
1278 | ||
1279 | entry->freelist.low_mark = request.low_mark; | |
1280 | entry->freelist.high_mark = request.high_mark; | |
1281 | ||
1282 | return 0; | |
1283 | } | |
1284 | ||
1285 | /** | |
1286 | * Unreserve the buffers in list, previously reserved using drmDMA. | |
1287 | * | |
1288 | * \param inode device inode. | |
1289 | * \param filp file pointer. | |
1290 | * \param cmd command. | |
1291 | * \param arg pointer to a drm_buf_free structure. | |
1292 | * \return zero on success or a negative number on failure. | |
1293 | * | |
1294 | * Calls free_buffer() for each used buffer. | |
1295 | * This function is primarily used for debugging. | |
1296 | */ | |
1297 | int drm_freebufs( struct inode *inode, struct file *filp, | |
1298 | unsigned int cmd, unsigned long arg ) | |
1299 | { | |
1300 | drm_file_t *priv = filp->private_data; | |
1301 | drm_device_t *dev = priv->head->dev; | |
1302 | drm_device_dma_t *dma = dev->dma; | |
1303 | drm_buf_free_t request; | |
1304 | int i; | |
1305 | int idx; | |
1306 | drm_buf_t *buf; | |
1307 | ||
1308 | if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) | |
1309 | return -EINVAL; | |
1310 | ||
1311 | if ( !dma ) return -EINVAL; | |
1312 | ||
1313 | if ( copy_from_user( &request, | |
1314 | (drm_buf_free_t __user *)arg, | |
1315 | sizeof(request) ) ) | |
1316 | return -EFAULT; | |
1317 | ||
1318 | DRM_DEBUG( "%d\n", request.count ); | |
1319 | for ( i = 0 ; i < request.count ; i++ ) { | |
1320 | if ( copy_from_user( &idx, | |
1321 | &request.list[i], | |
1322 | sizeof(idx) ) ) | |
1323 | return -EFAULT; | |
1324 | if ( idx < 0 || idx >= dma->buf_count ) { | |
1325 | DRM_ERROR( "Index %d (of %d max)\n", | |
1326 | idx, dma->buf_count - 1 ); | |
1327 | return -EINVAL; | |
1328 | } | |
1329 | buf = dma->buflist[idx]; | |
1330 | if ( buf->filp != filp ) { | |
1331 | DRM_ERROR( "Process %d freeing buffer not owned\n", | |
1332 | current->pid ); | |
1333 | return -EINVAL; | |
1334 | } | |
1335 | drm_free_buffer( dev, buf ); | |
1336 | } | |
1337 | ||
1338 | return 0; | |
1339 | } | |
1340 | ||
1341 | /** | |
1342 | * Maps all of the DMA buffers into client-virtual space (ioctl). | |
1343 | * | |
1344 | * \param inode device inode. | |
1345 | * \param filp file pointer. | |
1346 | * \param cmd command. | |
1347 | * \param arg pointer to a drm_buf_map structure. | |
1348 | * \return zero on success or a negative number on failure. | |
1349 | * | |
1350 | * Maps the AGP or SG buffer region with do_mmap(), and copies information | |
1351 | * about each buffer into user space. The PCI buffers are already mapped on the | |
1352 | * addbufs_pci() call. | |
1353 | */ | |
1354 | int drm_mapbufs( struct inode *inode, struct file *filp, | |
1355 | unsigned int cmd, unsigned long arg ) | |
1356 | { | |
1357 | drm_file_t *priv = filp->private_data; | |
1358 | drm_device_t *dev = priv->head->dev; | |
1359 | drm_device_dma_t *dma = dev->dma; | |
1360 | drm_buf_map_t __user *argp = (void __user *)arg; | |
1361 | int retcode = 0; | |
1362 | const int zero = 0; | |
1363 | unsigned long virtual; | |
1364 | unsigned long address; | |
1365 | drm_buf_map_t request; | |
1366 | int i; | |
1367 | ||
1368 | if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) | |
1369 | return -EINVAL; | |
1370 | ||
1371 | if ( !dma ) return -EINVAL; | |
1372 | ||
1373 | spin_lock( &dev->count_lock ); | |
1374 | if ( atomic_read( &dev->buf_alloc ) ) { | |
1375 | spin_unlock( &dev->count_lock ); | |
1376 | return -EBUSY; | |
1377 | } | |
1378 | dev->buf_use++; /* Can't allocate more after this call */ | |
1379 | spin_unlock( &dev->count_lock ); | |
1380 | ||
1381 | if ( copy_from_user( &request, argp, sizeof(request) ) ) | |
1382 | return -EFAULT; | |
1383 | ||
1384 | if ( request.count >= dma->buf_count ) { | |
b84397d6 DA |
1385 | if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) |
1386 | || (drm_core_check_feature(dev, DRIVER_SG) | |
1387 | && (dma->flags & _DRM_DMA_USE_SG)) | |
1388 | || (drm_core_check_feature(dev, DRIVER_FB_DMA) | |
1389 | && (dma->flags & _DRM_DMA_USE_FB))) { | |
1da177e4 LT |
1390 | drm_map_t *map = dev->agp_buffer_map; |
1391 | ||
1392 | if ( !map ) { | |
1393 | retcode = -EINVAL; | |
1394 | goto done; | |
1395 | } | |
1396 | ||
1397 | #if LINUX_VERSION_CODE <= 0x020402 | |
1398 | down( ¤t->mm->mmap_sem ); | |
1399 | #else | |
1400 | down_write( ¤t->mm->mmap_sem ); | |
1401 | #endif | |
1402 | virtual = do_mmap( filp, 0, map->size, | |
1403 | PROT_READ | PROT_WRITE, | |
1404 | MAP_SHARED, | |
1405 | (unsigned long)map->offset ); | |
1406 | #if LINUX_VERSION_CODE <= 0x020402 | |
1407 | up( ¤t->mm->mmap_sem ); | |
1408 | #else | |
1409 | up_write( ¤t->mm->mmap_sem ); | |
1410 | #endif | |
1411 | } else { | |
1412 | #if LINUX_VERSION_CODE <= 0x020402 | |
1413 | down( ¤t->mm->mmap_sem ); | |
1414 | #else | |
1415 | down_write( ¤t->mm->mmap_sem ); | |
1416 | #endif | |
1417 | virtual = do_mmap( filp, 0, dma->byte_count, | |
1418 | PROT_READ | PROT_WRITE, | |
1419 | MAP_SHARED, 0 ); | |
1420 | #if LINUX_VERSION_CODE <= 0x020402 | |
1421 | up( ¤t->mm->mmap_sem ); | |
1422 | #else | |
1423 | up_write( ¤t->mm->mmap_sem ); | |
1424 | #endif | |
1425 | } | |
1426 | if ( virtual > -1024UL ) { | |
1427 | /* Real error */ | |
1428 | retcode = (signed long)virtual; | |
1429 | goto done; | |
1430 | } | |
1431 | request.virtual = (void __user *)virtual; | |
1432 | ||
1433 | for ( i = 0 ; i < dma->buf_count ; i++ ) { | |
1434 | if ( copy_to_user( &request.list[i].idx, | |
1435 | &dma->buflist[i]->idx, | |
1436 | sizeof(request.list[0].idx) ) ) { | |
1437 | retcode = -EFAULT; | |
1438 | goto done; | |
1439 | } | |
1440 | if ( copy_to_user( &request.list[i].total, | |
1441 | &dma->buflist[i]->total, | |
1442 | sizeof(request.list[0].total) ) ) { | |
1443 | retcode = -EFAULT; | |
1444 | goto done; | |
1445 | } | |
1446 | if ( copy_to_user( &request.list[i].used, | |
1447 | &zero, | |
1448 | sizeof(zero) ) ) { | |
1449 | retcode = -EFAULT; | |
1450 | goto done; | |
1451 | } | |
1452 | address = virtual + dma->buflist[i]->offset; /* *** */ | |
1453 | if ( copy_to_user( &request.list[i].address, | |
1454 | &address, | |
1455 | sizeof(address) ) ) { | |
1456 | retcode = -EFAULT; | |
1457 | goto done; | |
1458 | } | |
1459 | } | |
1460 | } | |
1461 | done: | |
1462 | request.count = dma->buf_count; | |
1463 | DRM_DEBUG( "%d buffers, retcode = %d\n", request.count, retcode ); | |
1464 | ||
1465 | if ( copy_to_user( argp, &request, sizeof(request) ) ) | |
1466 | return -EFAULT; | |
1467 | ||
1468 | return retcode; | |
1469 | } | |
1470 |