2 * drm gem CMA (contiguous memory allocator) helper functions
4 * Copyright (C) 2012 Sascha Hauer, Pengutronix
6 * Based on Samsung Exynos code
8 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 2
13 * of the License, or (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
21 #include <linux/slab.h>
22 #include <linux/mutex.h>
23 #include <linux/export.h>
24 #include <linux/dma-buf.h>
25 #include <linux/dma-mapping.h>
29 #include <drm/drm_gem_cma_helper.h>
30 #include <drm/drm_vma_manager.h>
35 * The Contiguous Memory Allocator reserves a pool of memory at early boot
36 * that is used to service requests for large blocks of contiguous memory.
38 * The DRM GEM/CMA helpers use this allocator as a means to provide buffer
39 * objects that are physically contiguous in memory. This is useful for
40 * display drivers that are unable to map scattered buffers via an IOMMU.
44 * __drm_gem_cma_create - Create a GEM CMA object without allocating memory
46 * @size: size of the object to allocate
48 * This function creates and initializes a GEM CMA object of the given size,
49 * but doesn't allocate any memory to back the object.
52 * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
53 * error code on failure.
55 static struct drm_gem_cma_object
*
56 __drm_gem_cma_create(struct drm_device
*drm
, size_t size
)
58 struct drm_gem_cma_object
*cma_obj
;
59 struct drm_gem_object
*gem_obj
;
62 cma_obj
= kzalloc(sizeof(*cma_obj
), GFP_KERNEL
);
64 return ERR_PTR(-ENOMEM
);
66 gem_obj
= &cma_obj
->base
;
68 ret
= drm_gem_object_init(drm
, gem_obj
, size
);
72 ret
= drm_gem_create_mmap_offset(gem_obj
);
74 drm_gem_object_release(gem_obj
);
86 * drm_gem_cma_create - allocate an object with the given size
88 * @size: size of the object to allocate
90 * This function creates a CMA GEM object and allocates a contiguous chunk of
91 * memory as backing store. The backing memory has the writecombine attribute
95 * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
96 * error code on failure.
98 struct drm_gem_cma_object
*drm_gem_cma_create(struct drm_device
*drm
,
101 struct drm_gem_cma_object
*cma_obj
;
104 size
= round_up(size
, PAGE_SIZE
);
106 cma_obj
= __drm_gem_cma_create(drm
, size
);
110 cma_obj
->vaddr
= dma_alloc_writecombine(drm
->dev
, size
,
111 &cma_obj
->paddr
, GFP_KERNEL
| __GFP_NOWARN
);
112 if (!cma_obj
->vaddr
) {
113 dev_err(drm
->dev
, "failed to allocate buffer with size %d\n",
122 drm_gem_cma_free_object(&cma_obj
->base
);
125 EXPORT_SYMBOL_GPL(drm_gem_cma_create
);
128 * drm_gem_cma_create_with_handle - allocate an object with the given size and
129 * return a GEM handle to it
130 * @file_priv: DRM file-private structure to register the handle for
132 * @size: size of the object to allocate
133 * @handle: return location for the GEM handle
135 * This function creates a CMA GEM object, allocating a physically contiguous
136 * chunk of memory as backing store. The GEM object is then added to the list
137 * of object associated with the given file and a handle to it is returned.
140 * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
141 * error code on failure.
143 static struct drm_gem_cma_object
*
144 drm_gem_cma_create_with_handle(struct drm_file
*file_priv
,
145 struct drm_device
*drm
, size_t size
,
148 struct drm_gem_cma_object
*cma_obj
;
149 struct drm_gem_object
*gem_obj
;
152 cma_obj
= drm_gem_cma_create(drm
, size
);
156 gem_obj
= &cma_obj
->base
;
159 * allocate a id of idr table where the obj is registered
160 * and handle has the id what user can see.
162 ret
= drm_gem_handle_create(file_priv
, gem_obj
, handle
);
164 goto err_handle_create
;
166 /* drop reference from allocate - handle holds it now. */
167 drm_gem_object_unreference_unlocked(gem_obj
);
172 drm_gem_cma_free_object(gem_obj
);
178 * drm_gem_cma_free_object - free resources associated with a CMA GEM object
179 * @gem_obj: GEM object to free
181 * This function frees the backing memory of the CMA GEM object, cleans up the
182 * GEM object state and frees the memory used to store the object itself.
183 * Drivers using the CMA helpers should set this as their DRM driver's
184 * ->gem_free_object() callback.
186 void drm_gem_cma_free_object(struct drm_gem_object
*gem_obj
)
188 struct drm_gem_cma_object
*cma_obj
;
190 drm_gem_free_mmap_offset(gem_obj
);
192 cma_obj
= to_drm_gem_cma_obj(gem_obj
);
194 if (cma_obj
->vaddr
) {
195 dma_free_writecombine(gem_obj
->dev
->dev
, cma_obj
->base
.size
,
196 cma_obj
->vaddr
, cma_obj
->paddr
);
197 } else if (gem_obj
->import_attach
) {
198 drm_prime_gem_destroy(gem_obj
, cma_obj
->sgt
);
201 drm_gem_object_release(gem_obj
);
205 EXPORT_SYMBOL_GPL(drm_gem_cma_free_object
);
208 * drm_gem_cma_dumb_create - create a dumb buffer object
209 * @file_priv: DRM file-private structure to create the dumb buffer for
213 * This function computes the pitch of the dumb buffer and rounds it up to an
214 * integer number of bytes per pixel. Drivers for hardware that doesn't have
215 * any additional restrictions on the pitch can directly use this function as
216 * their ->dumb_create() callback.
219 * 0 on success or a negative error code on failure.
221 int drm_gem_cma_dumb_create(struct drm_file
*file_priv
,
222 struct drm_device
*drm
,
223 struct drm_mode_create_dumb
*args
)
225 struct drm_gem_cma_object
*cma_obj
;
226 int min_pitch
= DIV_ROUND_UP(args
->width
* args
->bpp
, 8);
228 if (args
->pitch
< min_pitch
)
229 args
->pitch
= min_pitch
;
231 if (args
->size
< args
->pitch
* args
->height
)
232 args
->size
= args
->pitch
* args
->height
;
234 cma_obj
= drm_gem_cma_create_with_handle(file_priv
, drm
, args
->size
,
236 return PTR_ERR_OR_ZERO(cma_obj
);
238 EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create
);
241 * drm_gem_cma_dumb_map_offset - return the fake mmap offset for a CMA GEM
243 * @file_priv: DRM file-private structure containing the GEM object
245 * @handle: GEM object handle
246 * @offset: return location for the fake mmap offset
248 * This function look up an object by its handle and returns the fake mmap
249 * offset associated with it. Drivers using the CMA helpers should set this
250 * as their DRM driver's ->dumb_map_offset() callback.
253 * 0 on success or a negative error code on failure.
255 int drm_gem_cma_dumb_map_offset(struct drm_file
*file_priv
,
256 struct drm_device
*drm
, u32 handle
,
259 struct drm_gem_object
*gem_obj
;
261 mutex_lock(&drm
->struct_mutex
);
263 gem_obj
= drm_gem_object_lookup(drm
, file_priv
, handle
);
265 dev_err(drm
->dev
, "failed to lookup GEM object\n");
266 mutex_unlock(&drm
->struct_mutex
);
270 *offset
= drm_vma_node_offset_addr(&gem_obj
->vma_node
);
272 drm_gem_object_unreference(gem_obj
);
274 mutex_unlock(&drm
->struct_mutex
);
278 EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_map_offset
);
280 const struct vm_operations_struct drm_gem_cma_vm_ops
= {
281 .open
= drm_gem_vm_open
,
282 .close
= drm_gem_vm_close
,
284 EXPORT_SYMBOL_GPL(drm_gem_cma_vm_ops
);
286 static int drm_gem_cma_mmap_obj(struct drm_gem_cma_object
*cma_obj
,
287 struct vm_area_struct
*vma
)
292 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
293 * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
296 vma
->vm_flags
&= ~VM_PFNMAP
;
299 ret
= dma_mmap_writecombine(cma_obj
->base
.dev
->dev
, vma
,
300 cma_obj
->vaddr
, cma_obj
->paddr
,
301 vma
->vm_end
- vma
->vm_start
);
303 drm_gem_vm_close(vma
);
309 * drm_gem_cma_mmap - memory-map a CMA GEM object
311 * @vma: VMA for the area to be mapped
313 * This function implements an augmented version of the GEM DRM file mmap
314 * operation for CMA objects: In addition to the usual GEM VMA setup it
315 * immediately faults in the entire object instead of using on-demaind
316 * faulting. Drivers which employ the CMA helpers should use this function
317 * as their ->mmap() handler in the DRM device file's file_operations
321 * 0 on success or a negative error code on failure.
323 int drm_gem_cma_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
325 struct drm_gem_cma_object
*cma_obj
;
326 struct drm_gem_object
*gem_obj
;
329 ret
= drm_gem_mmap(filp
, vma
);
333 gem_obj
= vma
->vm_private_data
;
334 cma_obj
= to_drm_gem_cma_obj(gem_obj
);
336 return drm_gem_cma_mmap_obj(cma_obj
, vma
);
338 EXPORT_SYMBOL_GPL(drm_gem_cma_mmap
);
340 #ifdef CONFIG_DEBUG_FS
342 * drm_gem_cma_describe - describe a CMA GEM object for debugfs
343 * @cma_obj: CMA GEM object
344 * @m: debugfs file handle
346 * This function can be used to dump a human-readable representation of the
347 * CMA GEM object into a synthetic file.
349 void drm_gem_cma_describe(struct drm_gem_cma_object
*cma_obj
,
352 struct drm_gem_object
*obj
= &cma_obj
->base
;
353 struct drm_device
*dev
= obj
->dev
;
356 WARN_ON(!mutex_is_locked(&dev
->struct_mutex
));
358 off
= drm_vma_node_start(&obj
->vma_node
);
360 seq_printf(m
, "%2d (%2d) %08llx %pad %p %d",
361 obj
->name
, obj
->refcount
.refcount
.counter
,
362 off
, &cma_obj
->paddr
, cma_obj
->vaddr
, obj
->size
);
366 EXPORT_SYMBOL_GPL(drm_gem_cma_describe
);
370 * drm_gem_cma_prime_get_sg_table - provide a scatter/gather table of pinned
371 * pages for a CMA GEM object
374 * This function exports a scatter/gather table suitable for PRIME usage by
375 * calling the standard DMA mapping API. Drivers using the CMA helpers should
376 * set this as their DRM driver's ->gem_prime_get_sg_table() callback.
379 * A pointer to the scatter/gather table of pinned pages or NULL on failure.
381 struct sg_table
*drm_gem_cma_prime_get_sg_table(struct drm_gem_object
*obj
)
383 struct drm_gem_cma_object
*cma_obj
= to_drm_gem_cma_obj(obj
);
384 struct sg_table
*sgt
;
387 sgt
= kzalloc(sizeof(*sgt
), GFP_KERNEL
);
391 ret
= dma_get_sgtable(obj
->dev
->dev
, sgt
, cma_obj
->vaddr
,
392 cma_obj
->paddr
, obj
->size
);
402 EXPORT_SYMBOL_GPL(drm_gem_cma_prime_get_sg_table
);
405 * drm_gem_cma_prime_import_sg_table - produce a CMA GEM object from another
406 * driver's scatter/gather table of pinned pages
407 * @dev: device to import into
408 * @attach: DMA-BUF attachment
409 * @sgt: scatter/gather table of pinned pages
411 * This function imports a scatter/gather table exported via DMA-BUF by
412 * another driver. Imported buffers must be physically contiguous in memory
413 * (i.e. the scatter/gather table must contain a single entry). Drivers that
414 * use the CMA helpers should set this as their DRM driver's
415 * ->gem_prime_import_sg_table() callback.
418 * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
419 * error code on failure.
421 struct drm_gem_object
*
422 drm_gem_cma_prime_import_sg_table(struct drm_device
*dev
,
423 struct dma_buf_attachment
*attach
,
424 struct sg_table
*sgt
)
426 struct drm_gem_cma_object
*cma_obj
;
429 return ERR_PTR(-EINVAL
);
431 /* Create a CMA GEM buffer. */
432 cma_obj
= __drm_gem_cma_create(dev
, attach
->dmabuf
->size
);
434 return ERR_CAST(cma_obj
);
436 cma_obj
->paddr
= sg_dma_address(sgt
->sgl
);
439 DRM_DEBUG_PRIME("dma_addr = %pad, size = %zu\n", &cma_obj
->paddr
, attach
->dmabuf
->size
);
441 return &cma_obj
->base
;
443 EXPORT_SYMBOL_GPL(drm_gem_cma_prime_import_sg_table
);
446 * drm_gem_cma_prime_mmap - memory-map an exported CMA GEM object
448 * @vma: VMA for the area to be mapped
450 * This function maps a buffer imported via DRM PRIME into a userspace
451 * process's address space. Drivers that use the CMA helpers should set this
452 * as their DRM driver's ->gem_prime_mmap() callback.
455 * 0 on success or a negative error code on failure.
457 int drm_gem_cma_prime_mmap(struct drm_gem_object
*obj
,
458 struct vm_area_struct
*vma
)
460 struct drm_gem_cma_object
*cma_obj
;
461 struct drm_device
*dev
= obj
->dev
;
464 mutex_lock(&dev
->struct_mutex
);
465 ret
= drm_gem_mmap_obj(obj
, obj
->size
, vma
);
466 mutex_unlock(&dev
->struct_mutex
);
470 cma_obj
= to_drm_gem_cma_obj(obj
);
471 return drm_gem_cma_mmap_obj(cma_obj
, vma
);
473 EXPORT_SYMBOL_GPL(drm_gem_cma_prime_mmap
);
476 * drm_gem_cma_prime_vmap - map a CMA GEM object into the kernel's virtual
480 * This function maps a buffer exported via DRM PRIME into the kernel's
481 * virtual address space. Since the CMA buffers are already mapped into the
482 * kernel virtual address space this simply returns the cached virtual
483 * address. Drivers using the CMA helpers should set this as their DRM
484 * driver's ->gem_prime_vmap() callback.
487 * The kernel virtual address of the CMA GEM object's backing store.
489 void *drm_gem_cma_prime_vmap(struct drm_gem_object
*obj
)
491 struct drm_gem_cma_object
*cma_obj
= to_drm_gem_cma_obj(obj
);
493 return cma_obj
->vaddr
;
495 EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vmap
);
498 * drm_gem_cma_prime_vunmap - unmap a CMA GEM object from the kernel's virtual
501 * @vaddr: kernel virtual address where the CMA GEM object was mapped
503 * This function removes a buffer exported via DRM PRIME from the kernel's
504 * virtual address space. This is a no-op because CMA buffers cannot be
505 * unmapped from kernel space. Drivers using the CMA helpers should set this
506 * as their DRM driver's ->gem_prime_vunmap() callback.
508 void drm_gem_cma_prime_vunmap(struct drm_gem_object
*obj
, void *vaddr
)
512 EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vunmap
);