Commit | Line | Data |
---|---|---|
1286ff73 DV |
1 | /* |
2 | * Copyright 2012 Red Hat Inc | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | |
21 | * DEALINGS IN THE SOFTWARE. | |
22 | * | |
23 | * Authors: | |
24 | * Dave Airlie <airlied@redhat.com> | |
25 | */ | |
ad778f89 CW |
26 | |
27 | #include <linux/dma-buf.h> | |
28 | #include <linux/reservation.h> | |
29 | ||
760285e7 | 30 | #include <drm/drmP.h> |
ad778f89 | 31 | |
1286ff73 | 32 | #include "i915_drv.h" |
1286ff73 | 33 | |
608806a5 DV |
34 | static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf) |
35 | { | |
36 | return to_intel_bo(buf->priv); | |
37 | } | |
38 | ||
6a101cb2 | 39 | static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment, |
9da3da66 | 40 | enum dma_data_direction dir) |
1286ff73 | 41 | { |
608806a5 | 42 | struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf); |
9da3da66 CW |
43 | struct sg_table *st; |
44 | struct scatterlist *src, *dst; | |
45 | int ret, i; | |
1286ff73 | 46 | |
9da3da66 | 47 | ret = i915_mutex_lock_interruptible(obj->base.dev); |
1286ff73 | 48 | if (ret) |
5cfacded | 49 | goto err; |
1286ff73 | 50 | |
37e680a1 | 51 | ret = i915_gem_object_get_pages(obj); |
5cfacded CW |
52 | if (ret) |
53 | goto err_unlock; | |
54 | ||
55 | i915_gem_object_pin_pages(obj); | |
9da3da66 CW |
56 | |
57 | /* Copy sg so that we make an independent mapping */ | |
58 | st = kmalloc(sizeof(struct sg_table), GFP_KERNEL); | |
59 | if (st == NULL) { | |
5cfacded CW |
60 | ret = -ENOMEM; |
61 | goto err_unpin; | |
1286ff73 DV |
62 | } |
63 | ||
9da3da66 | 64 | ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL); |
5cfacded CW |
65 | if (ret) |
66 | goto err_free; | |
9da3da66 CW |
67 | |
68 | src = obj->pages->sgl; | |
69 | dst = st->sgl; | |
70 | for (i = 0; i < obj->pages->nents; i++) { | |
67d5a50c | 71 | sg_set_page(dst, sg_page(src), src->length, 0); |
9da3da66 CW |
72 | dst = sg_next(dst); |
73 | src = sg_next(src); | |
74 | } | |
75 | ||
76 | if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) { | |
5cfacded CW |
77 | ret =-ENOMEM; |
78 | goto err_free_sg; | |
1286ff73 DV |
79 | } |
80 | ||
9da3da66 CW |
81 | mutex_unlock(&obj->base.dev->struct_mutex); |
82 | return st; | |
5cfacded CW |
83 | |
84 | err_free_sg: | |
85 | sg_free_table(st); | |
86 | err_free: | |
87 | kfree(st); | |
88 | err_unpin: | |
89 | i915_gem_object_unpin_pages(obj); | |
90 | err_unlock: | |
91 | mutex_unlock(&obj->base.dev->struct_mutex); | |
92 | err: | |
93 | return ERR_PTR(ret); | |
1286ff73 DV |
94 | } |
95 | ||
6a101cb2 | 96 | static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, |
2f745ad3 CW |
97 | struct sg_table *sg, |
98 | enum dma_data_direction dir) | |
1286ff73 | 99 | { |
608806a5 | 100 | struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf); |
f214266c | 101 | |
1286ff73 DV |
102 | dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir); |
103 | sg_free_table(sg); | |
104 | kfree(sg); | |
f214266c | 105 | |
6d19245f | 106 | mutex_lock(&obj->base.dev->struct_mutex); |
f214266c | 107 | i915_gem_object_unpin_pages(obj); |
f214266c | 108 | mutex_unlock(&obj->base.dev->struct_mutex); |
1286ff73 DV |
109 | } |
110 | ||
9a70cc2a DA |
111 | static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf) |
112 | { | |
608806a5 | 113 | struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); |
9a70cc2a | 114 | struct drm_device *dev = obj->base.dev; |
0a798eb9 CW |
115 | void *addr; |
116 | int ret; | |
9a70cc2a DA |
117 | |
118 | ret = i915_mutex_lock_interruptible(dev); | |
119 | if (ret) | |
120 | return ERR_PTR(ret); | |
121 | ||
d31d7cb1 | 122 | addr = i915_gem_object_pin_map(obj, I915_MAP_WB); |
9a70cc2a | 123 | mutex_unlock(&dev->struct_mutex); |
9da3da66 | 124 | |
0a798eb9 | 125 | return addr; |
9a70cc2a DA |
126 | } |
127 | ||
128 | static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) | |
129 | { | |
608806a5 | 130 | struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); |
9a70cc2a | 131 | struct drm_device *dev = obj->base.dev; |
9a70cc2a | 132 | |
ce7ec768 | 133 | mutex_lock(&dev->struct_mutex); |
0a798eb9 | 134 | i915_gem_object_unpin_map(obj); |
9a70cc2a DA |
135 | mutex_unlock(&dev->struct_mutex); |
136 | } | |
137 | ||
1286ff73 DV |
138 | static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num) |
139 | { | |
140 | return NULL; | |
141 | } | |
142 | ||
143 | static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr) | |
144 | { | |
145 | ||
146 | } | |
147 | static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num) | |
148 | { | |
149 | return NULL; | |
150 | } | |
151 | ||
152 | static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr) | |
153 | { | |
154 | ||
155 | } | |
156 | ||
2dad9d4d DA |
157 | static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma) |
158 | { | |
2dbf0d90 TV |
159 | struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); |
160 | int ret; | |
161 | ||
162 | if (obj->base.size < vma->vm_end - vma->vm_start) | |
163 | return -EINVAL; | |
164 | ||
165 | if (!obj->base.filp) | |
166 | return -ENODEV; | |
167 | ||
168 | ret = obj->base.filp->f_op->mmap(obj->base.filp, vma); | |
169 | if (ret) | |
170 | return ret; | |
171 | ||
172 | fput(vma->vm_file); | |
173 | vma->vm_file = get_file(obj->base.filp); | |
174 | ||
175 | return 0; | |
2dad9d4d DA |
176 | } |
177 | ||
831e9da7 | 178 | static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction) |
ec6f1bb9 | 179 | { |
608806a5 | 180 | struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); |
ec6f1bb9 DA |
181 | struct drm_device *dev = obj->base.dev; |
182 | int ret; | |
183 | bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE); | |
184 | ||
185 | ret = i915_mutex_lock_interruptible(dev); | |
186 | if (ret) | |
187 | return ret; | |
188 | ||
189 | ret = i915_gem_object_set_to_cpu_domain(obj, write); | |
190 | mutex_unlock(&dev->struct_mutex); | |
191 | return ret; | |
192 | } | |
193 | ||
18b862dc | 194 | static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction) |
346400c8 TV |
195 | { |
196 | struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); | |
197 | struct drm_device *dev = obj->base.dev; | |
346400c8 TV |
198 | int ret; |
199 | ||
18b862dc CW |
200 | ret = i915_mutex_lock_interruptible(dev); |
201 | if (ret) | |
202 | return ret; | |
346400c8 TV |
203 | |
204 | ret = i915_gem_object_set_to_gtt_domain(obj, false); | |
346400c8 TV |
205 | mutex_unlock(&dev->struct_mutex); |
206 | ||
18b862dc | 207 | return ret; |
346400c8 TV |
208 | } |
209 | ||
6a101cb2 | 210 | static const struct dma_buf_ops i915_dmabuf_ops = { |
1286ff73 DV |
211 | .map_dma_buf = i915_gem_map_dma_buf, |
212 | .unmap_dma_buf = i915_gem_unmap_dma_buf, | |
c1d6798d | 213 | .release = drm_gem_dmabuf_release, |
1286ff73 DV |
214 | .kmap = i915_gem_dmabuf_kmap, |
215 | .kmap_atomic = i915_gem_dmabuf_kmap_atomic, | |
216 | .kunmap = i915_gem_dmabuf_kunmap, | |
217 | .kunmap_atomic = i915_gem_dmabuf_kunmap_atomic, | |
2dad9d4d | 218 | .mmap = i915_gem_dmabuf_mmap, |
9a70cc2a DA |
219 | .vmap = i915_gem_dmabuf_vmap, |
220 | .vunmap = i915_gem_dmabuf_vunmap, | |
ec6f1bb9 | 221 | .begin_cpu_access = i915_gem_begin_cpu_access, |
346400c8 | 222 | .end_cpu_access = i915_gem_end_cpu_access, |
1286ff73 DV |
223 | }; |
224 | ||
ad778f89 CW |
225 | static void export_fences(struct drm_i915_gem_object *obj, |
226 | struct dma_buf *dma_buf) | |
227 | { | |
228 | struct reservation_object *resv = dma_buf->resv; | |
229 | struct drm_i915_gem_request *req; | |
230 | unsigned long active; | |
231 | int idx; | |
232 | ||
233 | active = __I915_BO_ACTIVE(obj); | |
234 | if (!active) | |
235 | return; | |
236 | ||
237 | /* Serialise with execbuf to prevent concurrent fence-loops */ | |
238 | mutex_lock(&obj->base.dev->struct_mutex); | |
239 | ||
240 | /* Mark the object for future fences before racily adding old fences */ | |
241 | obj->base.dma_buf = dma_buf; | |
242 | ||
243 | ww_mutex_lock(&resv->lock, NULL); | |
244 | ||
245 | for_each_active(active, idx) { | |
246 | req = i915_gem_active_get(&obj->last_read[idx], | |
247 | &obj->base.dev->struct_mutex); | |
248 | if (!req) | |
249 | continue; | |
250 | ||
251 | if (reservation_object_reserve_shared(resv) == 0) | |
252 | reservation_object_add_shared_fence(resv, &req->fence); | |
253 | ||
254 | i915_gem_request_put(req); | |
255 | } | |
256 | ||
257 | req = i915_gem_active_get(&obj->last_write, | |
258 | &obj->base.dev->struct_mutex); | |
259 | if (req) { | |
260 | reservation_object_add_excl_fence(resv, &req->fence); | |
261 | i915_gem_request_put(req); | |
262 | } | |
263 | ||
264 | ww_mutex_unlock(&resv->lock); | |
265 | mutex_unlock(&obj->base.dev->struct_mutex); | |
266 | } | |
267 | ||
1286ff73 | 268 | struct dma_buf *i915_gem_prime_export(struct drm_device *dev, |
9da3da66 | 269 | struct drm_gem_object *gem_obj, int flags) |
1286ff73 | 270 | { |
5cc9ed4b | 271 | struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); |
d8fbe341 | 272 | DEFINE_DMA_BUF_EXPORT_INFO(exp_info); |
ad778f89 | 273 | struct dma_buf *dma_buf; |
d8fbe341 SS |
274 | |
275 | exp_info.ops = &i915_dmabuf_ops; | |
276 | exp_info.size = gem_obj->size; | |
277 | exp_info.flags = flags; | |
278 | exp_info.priv = gem_obj; | |
279 | ||
5cc9ed4b CW |
280 | if (obj->ops->dmabuf_export) { |
281 | int ret = obj->ops->dmabuf_export(obj); | |
282 | if (ret) | |
283 | return ERR_PTR(ret); | |
284 | } | |
285 | ||
ad778f89 CW |
286 | dma_buf = dma_buf_export(&exp_info); |
287 | if (IS_ERR(dma_buf)) | |
288 | return dma_buf; | |
289 | ||
290 | export_fences(obj, dma_buf); | |
291 | return dma_buf; | |
1286ff73 DV |
292 | } |
293 | ||
2f745ad3 CW |
294 | static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj) |
295 | { | |
296 | struct sg_table *sg; | |
297 | ||
298 | sg = dma_buf_map_attachment(obj->base.import_attach, DMA_BIDIRECTIONAL); | |
299 | if (IS_ERR(sg)) | |
300 | return PTR_ERR(sg); | |
301 | ||
302 | obj->pages = sg; | |
2f745ad3 | 303 | return 0; |
1286ff73 DV |
304 | } |
305 | ||
2f745ad3 CW |
306 | static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj) |
307 | { | |
308 | dma_buf_unmap_attachment(obj->base.import_attach, | |
309 | obj->pages, DMA_BIDIRECTIONAL); | |
2f745ad3 CW |
310 | } |
311 | ||
312 | static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = { | |
313 | .get_pages = i915_gem_object_get_pages_dmabuf, | |
314 | .put_pages = i915_gem_object_put_pages_dmabuf, | |
315 | }; | |
316 | ||
1286ff73 | 317 | struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, |
9da3da66 | 318 | struct dma_buf *dma_buf) |
1286ff73 DV |
319 | { |
320 | struct dma_buf_attachment *attach; | |
1286ff73 | 321 | struct drm_i915_gem_object *obj; |
1286ff73 DV |
322 | int ret; |
323 | ||
324 | /* is this one of own objects? */ | |
325 | if (dma_buf->ops == &i915_dmabuf_ops) { | |
608806a5 | 326 | obj = dma_buf_to_obj(dma_buf); |
1286ff73 DV |
327 | /* is it from our device? */ |
328 | if (obj->base.dev == dev) { | |
be8a42ae SWK |
329 | /* |
330 | * Importing dmabuf exported from out own gem increases | |
331 | * refcount on gem itself instead of f_count of dmabuf. | |
332 | */ | |
25dc556a | 333 | return &i915_gem_object_get(obj)->base; |
1286ff73 DV |
334 | } |
335 | } | |
336 | ||
337 | /* need to attach */ | |
338 | attach = dma_buf_attach(dma_buf, dev->dev); | |
339 | if (IS_ERR(attach)) | |
340 | return ERR_CAST(attach); | |
341 | ||
011c2282 ID |
342 | get_dma_buf(dma_buf); |
343 | ||
42dcedd4 | 344 | obj = i915_gem_object_alloc(dev); |
1286ff73 DV |
345 | if (obj == NULL) { |
346 | ret = -ENOMEM; | |
2f745ad3 | 347 | goto fail_detach; |
1286ff73 DV |
348 | } |
349 | ||
89c8233f | 350 | drm_gem_private_object_init(dev, &obj->base, dma_buf->size); |
2f745ad3 | 351 | i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops); |
1286ff73 DV |
352 | obj->base.import_attach = attach; |
353 | ||
30bc06c0 CW |
354 | /* We use GTT as shorthand for a coherent domain, one that is |
355 | * neither in the GPU cache nor in the CPU cache, where all | |
356 | * writes are immediately visible in memory. (That's not strictly | |
357 | * true, but it's close! There are internal buffers such as the | |
358 | * write-combined buffer or a delay through the chipset for GTT | |
359 | * writes that do require us to treat GTT as a separate cache domain.) | |
360 | */ | |
361 | obj->base.read_domains = I915_GEM_DOMAIN_GTT; | |
362 | obj->base.write_domain = 0; | |
363 | ||
1286ff73 DV |
364 | return &obj->base; |
365 | ||
1286ff73 DV |
366 | fail_detach: |
367 | dma_buf_detach(dma_buf, attach); | |
011c2282 ID |
368 | dma_buf_put(dma_buf); |
369 | ||
1286ff73 DV |
370 | return ERR_PTR(ret); |
371 | } |