drm/i915: remove dev_priv->pc8.enabled
[deliverable/linux.git] / drivers / gpu / drm / drm_gem_cma_helper.c
1 /*
2 * drm gem CMA (contiguous memory allocator) helper functions
3 *
4 * Copyright (C) 2012 Sascha Hauer, Pengutronix
5 *
6 * Based on Samsung Exynos code
7 *
8 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 2
13 * of the License, or (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
20 #include <linux/mm.h>
21 #include <linux/slab.h>
22 #include <linux/mutex.h>
23 #include <linux/export.h>
24 #include <linux/dma-buf.h>
25 #include <linux/dma-mapping.h>
26
27 #include <drm/drmP.h>
28 #include <drm/drm.h>
29 #include <drm/drm_gem_cma_helper.h>
30 #include <drm/drm_vma_manager.h>
31
32 /*
33 * __drm_gem_cma_create - Create a GEM CMA object without allocating memory
34 * @drm: The drm device
35 * @size: The GEM object size
36 *
37 * This function creates and initializes a GEM CMA object of the given size, but
38 * doesn't allocate any memory to back the object.
39 *
40 * Return a struct drm_gem_cma_object* on success or ERR_PTR values on failure.
41 */
42 static struct drm_gem_cma_object *
43 __drm_gem_cma_create(struct drm_device *drm, unsigned int size)
44 {
45 struct drm_gem_cma_object *cma_obj;
46 struct drm_gem_object *gem_obj;
47 int ret;
48
49 cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
50 if (!cma_obj)
51 return ERR_PTR(-ENOMEM);
52
53 gem_obj = &cma_obj->base;
54
55 ret = drm_gem_object_init(drm, gem_obj, size);
56 if (ret)
57 goto error;
58
59 ret = drm_gem_create_mmap_offset(gem_obj);
60 if (ret) {
61 drm_gem_object_release(gem_obj);
62 goto error;
63 }
64
65 return cma_obj;
66
67 error:
68 kfree(cma_obj);
69 return ERR_PTR(ret);
70 }
71
72 /*
73 * drm_gem_cma_create - allocate an object with the given size
74 *
75 * returns a struct drm_gem_cma_object* on success or ERR_PTR values
76 * on failure.
77 */
78 struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
79 unsigned int size)
80 {
81 struct drm_gem_cma_object *cma_obj;
82 int ret;
83
84 size = round_up(size, PAGE_SIZE);
85
86 cma_obj = __drm_gem_cma_create(drm, size);
87 if (IS_ERR(cma_obj))
88 return cma_obj;
89
90 cma_obj->vaddr = dma_alloc_writecombine(drm->dev, size,
91 &cma_obj->paddr, GFP_KERNEL | __GFP_NOWARN);
92 if (!cma_obj->vaddr) {
93 dev_err(drm->dev, "failed to allocate buffer with size %d\n",
94 size);
95 ret = -ENOMEM;
96 goto error;
97 }
98
99 return cma_obj;
100
101 error:
102 drm_gem_cma_free_object(&cma_obj->base);
103 return ERR_PTR(ret);
104 }
105 EXPORT_SYMBOL_GPL(drm_gem_cma_create);
106
107 /*
108 * drm_gem_cma_create_with_handle - allocate an object with the given
109 * size and create a gem handle on it
110 *
111 * returns a struct drm_gem_cma_object* on success or ERR_PTR values
112 * on failure.
113 */
114 static struct drm_gem_cma_object *drm_gem_cma_create_with_handle(
115 struct drm_file *file_priv,
116 struct drm_device *drm, unsigned int size,
117 unsigned int *handle)
118 {
119 struct drm_gem_cma_object *cma_obj;
120 struct drm_gem_object *gem_obj;
121 int ret;
122
123 cma_obj = drm_gem_cma_create(drm, size);
124 if (IS_ERR(cma_obj))
125 return cma_obj;
126
127 gem_obj = &cma_obj->base;
128
129 /*
130 * allocate a id of idr table where the obj is registered
131 * and handle has the id what user can see.
132 */
133 ret = drm_gem_handle_create(file_priv, gem_obj, handle);
134 if (ret)
135 goto err_handle_create;
136
137 /* drop reference from allocate - handle holds it now. */
138 drm_gem_object_unreference_unlocked(gem_obj);
139
140 return cma_obj;
141
142 err_handle_create:
143 drm_gem_cma_free_object(gem_obj);
144
145 return ERR_PTR(ret);
146 }
147
148 /*
149 * drm_gem_cma_free_object - (struct drm_driver)->gem_free_object callback
150 * function
151 */
152 void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
153 {
154 struct drm_gem_cma_object *cma_obj;
155
156 drm_gem_free_mmap_offset(gem_obj);
157
158 cma_obj = to_drm_gem_cma_obj(gem_obj);
159
160 if (cma_obj->vaddr) {
161 dma_free_writecombine(gem_obj->dev->dev, cma_obj->base.size,
162 cma_obj->vaddr, cma_obj->paddr);
163 } else if (gem_obj->import_attach) {
164 drm_prime_gem_destroy(gem_obj, cma_obj->sgt);
165 }
166
167 drm_gem_object_release(gem_obj);
168
169 kfree(cma_obj);
170 }
171 EXPORT_SYMBOL_GPL(drm_gem_cma_free_object);
172
173 /*
174 * drm_gem_cma_dumb_create - (struct drm_driver)->dumb_create callback
175 * function
176 *
177 * This aligns the pitch and size arguments to the minimum required. wrap
178 * this into your own function if you need bigger alignment.
179 */
180 int drm_gem_cma_dumb_create(struct drm_file *file_priv,
181 struct drm_device *dev, struct drm_mode_create_dumb *args)
182 {
183 struct drm_gem_cma_object *cma_obj;
184 int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
185
186 if (args->pitch < min_pitch)
187 args->pitch = min_pitch;
188
189 if (args->size < args->pitch * args->height)
190 args->size = args->pitch * args->height;
191
192 cma_obj = drm_gem_cma_create_with_handle(file_priv, dev,
193 args->size, &args->handle);
194 return PTR_ERR_OR_ZERO(cma_obj);
195 }
196 EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create);
197
198 /*
199 * drm_gem_cma_dumb_map_offset - (struct drm_driver)->dumb_map_offset callback
200 * function
201 */
202 int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
203 struct drm_device *drm, uint32_t handle, uint64_t *offset)
204 {
205 struct drm_gem_object *gem_obj;
206
207 mutex_lock(&drm->struct_mutex);
208
209 gem_obj = drm_gem_object_lookup(drm, file_priv, handle);
210 if (!gem_obj) {
211 dev_err(drm->dev, "failed to lookup gem object\n");
212 mutex_unlock(&drm->struct_mutex);
213 return -EINVAL;
214 }
215
216 *offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
217
218 drm_gem_object_unreference(gem_obj);
219
220 mutex_unlock(&drm->struct_mutex);
221
222 return 0;
223 }
224 EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_map_offset);
225
226 const struct vm_operations_struct drm_gem_cma_vm_ops = {
227 .open = drm_gem_vm_open,
228 .close = drm_gem_vm_close,
229 };
230 EXPORT_SYMBOL_GPL(drm_gem_cma_vm_ops);
231
232 static int drm_gem_cma_mmap_obj(struct drm_gem_cma_object *cma_obj,
233 struct vm_area_struct *vma)
234 {
235 int ret;
236
237 ret = remap_pfn_range(vma, vma->vm_start, cma_obj->paddr >> PAGE_SHIFT,
238 vma->vm_end - vma->vm_start, vma->vm_page_prot);
239 if (ret)
240 drm_gem_vm_close(vma);
241
242 return ret;
243 }
244
245 /*
246 * drm_gem_cma_mmap - (struct file_operation)->mmap callback function
247 */
248 int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma)
249 {
250 struct drm_gem_cma_object *cma_obj;
251 struct drm_gem_object *gem_obj;
252 int ret;
253
254 ret = drm_gem_mmap(filp, vma);
255 if (ret)
256 return ret;
257
258 gem_obj = vma->vm_private_data;
259 cma_obj = to_drm_gem_cma_obj(gem_obj);
260
261 return drm_gem_cma_mmap_obj(cma_obj, vma);
262 }
263 EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
264
265 #ifdef CONFIG_DEBUG_FS
266 void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj, struct seq_file *m)
267 {
268 struct drm_gem_object *obj = &cma_obj->base;
269 struct drm_device *dev = obj->dev;
270 uint64_t off;
271
272 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
273
274 off = drm_vma_node_start(&obj->vma_node);
275
276 seq_printf(m, "%2d (%2d) %08llx %08Zx %p %d",
277 obj->name, obj->refcount.refcount.counter,
278 off, cma_obj->paddr, cma_obj->vaddr, obj->size);
279
280 seq_printf(m, "\n");
281 }
282 EXPORT_SYMBOL_GPL(drm_gem_cma_describe);
283 #endif
284
285 /* low-level interface prime helpers */
286 struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj)
287 {
288 struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
289 struct sg_table *sgt;
290 int ret;
291
292 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
293 if (!sgt)
294 return NULL;
295
296 ret = dma_get_sgtable(obj->dev->dev, sgt, cma_obj->vaddr,
297 cma_obj->paddr, obj->size);
298 if (ret < 0)
299 goto out;
300
301 return sgt;
302
303 out:
304 kfree(sgt);
305 return NULL;
306 }
307 EXPORT_SYMBOL_GPL(drm_gem_cma_prime_get_sg_table);
308
309 struct drm_gem_object *
310 drm_gem_cma_prime_import_sg_table(struct drm_device *dev, size_t size,
311 struct sg_table *sgt)
312 {
313 struct drm_gem_cma_object *cma_obj;
314
315 if (sgt->nents != 1)
316 return ERR_PTR(-EINVAL);
317
318 /* Create a CMA GEM buffer. */
319 cma_obj = __drm_gem_cma_create(dev, size);
320 if (IS_ERR(cma_obj))
321 return ERR_PTR(PTR_ERR(cma_obj));
322
323 cma_obj->paddr = sg_dma_address(sgt->sgl);
324 cma_obj->sgt = sgt;
325
326 DRM_DEBUG_PRIME("dma_addr = 0x%x, size = %zu\n", cma_obj->paddr, size);
327
328 return &cma_obj->base;
329 }
330 EXPORT_SYMBOL_GPL(drm_gem_cma_prime_import_sg_table);
331
332 int drm_gem_cma_prime_mmap(struct drm_gem_object *obj,
333 struct vm_area_struct *vma)
334 {
335 struct drm_gem_cma_object *cma_obj;
336 struct drm_device *dev = obj->dev;
337 int ret;
338
339 mutex_lock(&dev->struct_mutex);
340 ret = drm_gem_mmap_obj(obj, obj->size, vma);
341 mutex_unlock(&dev->struct_mutex);
342 if (ret < 0)
343 return ret;
344
345 cma_obj = to_drm_gem_cma_obj(obj);
346 return drm_gem_cma_mmap_obj(cma_obj, vma);
347 }
348 EXPORT_SYMBOL_GPL(drm_gem_cma_prime_mmap);
349
350 void *drm_gem_cma_prime_vmap(struct drm_gem_object *obj)
351 {
352 struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
353
354 return cma_obj->vaddr;
355 }
356 EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vmap);
357
358 void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
359 {
360 /* Nothing to do */
361 }
362 EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vunmap);
This page took 0.083602 seconds and 5 git commands to generate.