drm/i915: Add struct_mutex locking for debugs/i915_gem_framebuffer
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_gem_dmabuf.c
1 /*
2 * Copyright 2012 Red Hat Inc
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Dave Airlie <airlied@redhat.com>
25 */
26 #include <drm/drmP.h>
27 #include "i915_drv.h"
28 #include <linux/dma-buf.h>
29
30 static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
31 {
32 return to_intel_bo(buf->priv);
33 }
34
35 static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
36 enum dma_data_direction dir)
37 {
38 struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
39 struct sg_table *st;
40 struct scatterlist *src, *dst;
41 int ret, i;
42
43 ret = i915_mutex_lock_interruptible(obj->base.dev);
44 if (ret)
45 goto err;
46
47 ret = i915_gem_object_get_pages(obj);
48 if (ret)
49 goto err_unlock;
50
51 i915_gem_object_pin_pages(obj);
52
53 /* Copy sg so that we make an independent mapping */
54 st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
55 if (st == NULL) {
56 ret = -ENOMEM;
57 goto err_unpin;
58 }
59
60 ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
61 if (ret)
62 goto err_free;
63
64 src = obj->pages->sgl;
65 dst = st->sgl;
66 for (i = 0; i < obj->pages->nents; i++) {
67 sg_set_page(dst, sg_page(src), src->length, 0);
68 dst = sg_next(dst);
69 src = sg_next(src);
70 }
71
72 if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
73 ret =-ENOMEM;
74 goto err_free_sg;
75 }
76
77 mutex_unlock(&obj->base.dev->struct_mutex);
78 return st;
79
80 err_free_sg:
81 sg_free_table(st);
82 err_free:
83 kfree(st);
84 err_unpin:
85 i915_gem_object_unpin_pages(obj);
86 err_unlock:
87 mutex_unlock(&obj->base.dev->struct_mutex);
88 err:
89 return ERR_PTR(ret);
90 }
91
92 static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
93 struct sg_table *sg,
94 enum dma_data_direction dir)
95 {
96 struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
97
98 mutex_lock(&obj->base.dev->struct_mutex);
99
100 dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
101 sg_free_table(sg);
102 kfree(sg);
103
104 i915_gem_object_unpin_pages(obj);
105
106 mutex_unlock(&obj->base.dev->struct_mutex);
107 }
108
109 static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
110 {
111 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
112 struct drm_device *dev = obj->base.dev;
113 struct sg_page_iter sg_iter;
114 struct page **pages;
115 int ret, i;
116
117 ret = i915_mutex_lock_interruptible(dev);
118 if (ret)
119 return ERR_PTR(ret);
120
121 if (obj->dma_buf_vmapping) {
122 obj->vmapping_count++;
123 goto out_unlock;
124 }
125
126 ret = i915_gem_object_get_pages(obj);
127 if (ret)
128 goto err;
129
130 i915_gem_object_pin_pages(obj);
131
132 ret = -ENOMEM;
133
134 pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
135 if (pages == NULL)
136 goto err_unpin;
137
138 i = 0;
139 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
140 pages[i++] = sg_page_iter_page(&sg_iter);
141
142 obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL);
143 drm_free_large(pages);
144
145 if (!obj->dma_buf_vmapping)
146 goto err_unpin;
147
148 obj->vmapping_count = 1;
149 out_unlock:
150 mutex_unlock(&dev->struct_mutex);
151 return obj->dma_buf_vmapping;
152
153 err_unpin:
154 i915_gem_object_unpin_pages(obj);
155 err:
156 mutex_unlock(&dev->struct_mutex);
157 return ERR_PTR(ret);
158 }
159
160 static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
161 {
162 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
163 struct drm_device *dev = obj->base.dev;
164
165 mutex_lock(&dev->struct_mutex);
166 if (--obj->vmapping_count == 0) {
167 vunmap(obj->dma_buf_vmapping);
168 obj->dma_buf_vmapping = NULL;
169
170 i915_gem_object_unpin_pages(obj);
171 }
172 mutex_unlock(&dev->struct_mutex);
173 }
174
175 static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
176 {
177 return NULL;
178 }
179
180 static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
181 {
182
183 }
184 static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
185 {
186 return NULL;
187 }
188
189 static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
190 {
191
192 }
193
194 static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
195 {
196 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
197 int ret;
198
199 if (obj->base.size < vma->vm_end - vma->vm_start)
200 return -EINVAL;
201
202 if (!obj->base.filp)
203 return -ENODEV;
204
205 ret = obj->base.filp->f_op->mmap(obj->base.filp, vma);
206 if (ret)
207 return ret;
208
209 fput(vma->vm_file);
210 vma->vm_file = get_file(obj->base.filp);
211
212 return 0;
213 }
214
215 static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
216 {
217 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
218 struct drm_device *dev = obj->base.dev;
219 int ret;
220 bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
221
222 ret = i915_mutex_lock_interruptible(dev);
223 if (ret)
224 return ret;
225
226 ret = i915_gem_object_set_to_cpu_domain(obj, write);
227 mutex_unlock(&dev->struct_mutex);
228 return ret;
229 }
230
231 static void i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
232 {
233 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
234 struct drm_device *dev = obj->base.dev;
235 struct drm_i915_private *dev_priv = to_i915(dev);
236 bool was_interruptible;
237 int ret;
238
239 mutex_lock(&dev->struct_mutex);
240 was_interruptible = dev_priv->mm.interruptible;
241 dev_priv->mm.interruptible = false;
242
243 ret = i915_gem_object_set_to_gtt_domain(obj, false);
244
245 dev_priv->mm.interruptible = was_interruptible;
246 mutex_unlock(&dev->struct_mutex);
247
248 if (unlikely(ret))
249 DRM_ERROR("unable to flush buffer following CPU access; rendering may be corrupt\n");
250 }
251
252 static const struct dma_buf_ops i915_dmabuf_ops = {
253 .map_dma_buf = i915_gem_map_dma_buf,
254 .unmap_dma_buf = i915_gem_unmap_dma_buf,
255 .release = drm_gem_dmabuf_release,
256 .kmap = i915_gem_dmabuf_kmap,
257 .kmap_atomic = i915_gem_dmabuf_kmap_atomic,
258 .kunmap = i915_gem_dmabuf_kunmap,
259 .kunmap_atomic = i915_gem_dmabuf_kunmap_atomic,
260 .mmap = i915_gem_dmabuf_mmap,
261 .vmap = i915_gem_dmabuf_vmap,
262 .vunmap = i915_gem_dmabuf_vunmap,
263 .begin_cpu_access = i915_gem_begin_cpu_access,
264 .end_cpu_access = i915_gem_end_cpu_access,
265 };
266
267 struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
268 struct drm_gem_object *gem_obj, int flags)
269 {
270 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
271 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
272
273 exp_info.ops = &i915_dmabuf_ops;
274 exp_info.size = gem_obj->size;
275 exp_info.flags = flags;
276 exp_info.priv = gem_obj;
277
278
279 if (obj->ops->dmabuf_export) {
280 int ret = obj->ops->dmabuf_export(obj);
281 if (ret)
282 return ERR_PTR(ret);
283 }
284
285 return dma_buf_export(&exp_info);
286 }
287
288 static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
289 {
290 struct sg_table *sg;
291
292 sg = dma_buf_map_attachment(obj->base.import_attach, DMA_BIDIRECTIONAL);
293 if (IS_ERR(sg))
294 return PTR_ERR(sg);
295
296 obj->pages = sg;
297 return 0;
298 }
299
300 static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj)
301 {
302 dma_buf_unmap_attachment(obj->base.import_attach,
303 obj->pages, DMA_BIDIRECTIONAL);
304 }
305
306 static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
307 .get_pages = i915_gem_object_get_pages_dmabuf,
308 .put_pages = i915_gem_object_put_pages_dmabuf,
309 };
310
311 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
312 struct dma_buf *dma_buf)
313 {
314 struct dma_buf_attachment *attach;
315 struct drm_i915_gem_object *obj;
316 int ret;
317
318 /* is this one of own objects? */
319 if (dma_buf->ops == &i915_dmabuf_ops) {
320 obj = dma_buf_to_obj(dma_buf);
321 /* is it from our device? */
322 if (obj->base.dev == dev) {
323 /*
324 * Importing dmabuf exported from out own gem increases
325 * refcount on gem itself instead of f_count of dmabuf.
326 */
327 drm_gem_object_reference(&obj->base);
328 return &obj->base;
329 }
330 }
331
332 /* need to attach */
333 attach = dma_buf_attach(dma_buf, dev->dev);
334 if (IS_ERR(attach))
335 return ERR_CAST(attach);
336
337 get_dma_buf(dma_buf);
338
339 obj = i915_gem_object_alloc(dev);
340 if (obj == NULL) {
341 ret = -ENOMEM;
342 goto fail_detach;
343 }
344
345 drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
346 i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
347 obj->base.import_attach = attach;
348
349 return &obj->base;
350
351 fail_detach:
352 dma_buf_detach(dma_buf, attach);
353 dma_buf_put(dma_buf);
354
355 return ERR_PTR(ret);
356 }
This page took 0.051308 seconds and 5 git commands to generate.