Commit | Line | Data |
---|---|---|
ebfdd6d5 HS |
1 | /* |
2 | * udl_dmabuf.c | |
3 | * | |
4 | * Copyright (c) 2014 The Chromium OS Authors | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify it | |
7 | * under the terms of the GNU General Public License as published by the | |
8 | * Free Software Foundation; either version 2 of the License, or (at your | |
9 | * option) any later version. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
19 | ||
20 | #include <drm/drmP.h> | |
21 | #include "udl_drv.h" | |
22 | #include <linux/shmem_fs.h> | |
23 | #include <linux/dma-buf.h> | |
24 | ||
25 | struct udl_drm_dmabuf_attachment { | |
26 | struct sg_table sgt; | |
27 | enum dma_data_direction dir; | |
28 | bool is_mapped; | |
29 | }; | |
30 | ||
31 | static int udl_attach_dma_buf(struct dma_buf *dmabuf, | |
32 | struct device *dev, | |
33 | struct dma_buf_attachment *attach) | |
34 | { | |
35 | struct udl_drm_dmabuf_attachment *udl_attach; | |
36 | ||
37 | DRM_DEBUG_PRIME("[DEV:%s] size:%zd\n", dev_name(attach->dev), | |
38 | attach->dmabuf->size); | |
39 | ||
40 | udl_attach = kzalloc(sizeof(*udl_attach), GFP_KERNEL); | |
41 | if (!udl_attach) | |
42 | return -ENOMEM; | |
43 | ||
44 | udl_attach->dir = DMA_NONE; | |
45 | attach->priv = udl_attach; | |
46 | ||
47 | return 0; | |
48 | } | |
49 | ||
50 | static void udl_detach_dma_buf(struct dma_buf *dmabuf, | |
51 | struct dma_buf_attachment *attach) | |
52 | { | |
53 | struct udl_drm_dmabuf_attachment *udl_attach = attach->priv; | |
54 | struct sg_table *sgt; | |
55 | ||
56 | if (!udl_attach) | |
57 | return; | |
58 | ||
59 | DRM_DEBUG_PRIME("[DEV:%s] size:%zd\n", dev_name(attach->dev), | |
60 | attach->dmabuf->size); | |
61 | ||
62 | sgt = &udl_attach->sgt; | |
63 | ||
64 | if (udl_attach->dir != DMA_NONE) | |
65 | dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, | |
66 | udl_attach->dir); | |
67 | ||
68 | sg_free_table(sgt); | |
69 | kfree(udl_attach); | |
70 | attach->priv = NULL; | |
71 | } | |
72 | ||
73 | static struct sg_table *udl_map_dma_buf(struct dma_buf_attachment *attach, | |
74 | enum dma_data_direction dir) | |
75 | { | |
76 | struct udl_drm_dmabuf_attachment *udl_attach = attach->priv; | |
77 | struct udl_gem_object *obj = to_udl_bo(attach->dmabuf->priv); | |
78 | struct drm_device *dev = obj->base.dev; | |
79 | struct scatterlist *rd, *wr; | |
80 | struct sg_table *sgt = NULL; | |
81 | unsigned int i; | |
82 | int page_count; | |
83 | int nents, ret; | |
84 | ||
85 | DRM_DEBUG_PRIME("[DEV:%s] size:%zd dir=%d\n", dev_name(attach->dev), | |
86 | attach->dmabuf->size, dir); | |
87 | ||
88 | /* just return current sgt if already requested. */ | |
89 | if (udl_attach->dir == dir && udl_attach->is_mapped) | |
90 | return &udl_attach->sgt; | |
91 | ||
92 | if (!obj->pages) { | |
4bc158e0 HS |
93 | ret = udl_gem_get_pages(obj); |
94 | if (ret) { | |
95 | DRM_ERROR("failed to map pages.\n"); | |
96 | return ERR_PTR(ret); | |
97 | } | |
ebfdd6d5 HS |
98 | } |
99 | ||
100 | page_count = obj->base.size / PAGE_SIZE; | |
101 | obj->sg = drm_prime_pages_to_sg(obj->pages, page_count); | |
102 | if (!obj->sg) { | |
103 | DRM_ERROR("sg is null.\n"); | |
104 | return ERR_PTR(-ENOMEM); | |
105 | } | |
106 | ||
107 | sgt = &udl_attach->sgt; | |
108 | ||
109 | ret = sg_alloc_table(sgt, obj->sg->orig_nents, GFP_KERNEL); | |
110 | if (ret) { | |
111 | DRM_ERROR("failed to alloc sgt.\n"); | |
112 | return ERR_PTR(-ENOMEM); | |
113 | } | |
114 | ||
115 | mutex_lock(&dev->struct_mutex); | |
116 | ||
117 | rd = obj->sg->sgl; | |
118 | wr = sgt->sgl; | |
119 | for (i = 0; i < sgt->orig_nents; ++i) { | |
120 | sg_set_page(wr, sg_page(rd), rd->length, rd->offset); | |
121 | rd = sg_next(rd); | |
122 | wr = sg_next(wr); | |
123 | } | |
124 | ||
125 | if (dir != DMA_NONE) { | |
126 | nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir); | |
127 | if (!nents) { | |
128 | DRM_ERROR("failed to map sgl with iommu.\n"); | |
129 | sg_free_table(sgt); | |
130 | sgt = ERR_PTR(-EIO); | |
131 | goto err_unlock; | |
132 | } | |
133 | } | |
134 | ||
135 | udl_attach->is_mapped = true; | |
136 | udl_attach->dir = dir; | |
137 | attach->priv = udl_attach; | |
138 | ||
139 | err_unlock: | |
140 | mutex_unlock(&dev->struct_mutex); | |
141 | return sgt; | |
142 | } | |
143 | ||
144 | static void udl_unmap_dma_buf(struct dma_buf_attachment *attach, | |
145 | struct sg_table *sgt, | |
146 | enum dma_data_direction dir) | |
147 | { | |
148 | /* Nothing to do. */ | |
149 | DRM_DEBUG_PRIME("[DEV:%s] size:%zd dir:%d\n", dev_name(attach->dev), | |
150 | attach->dmabuf->size, dir); | |
151 | } | |
152 | ||
153 | static void *udl_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num) | |
154 | { | |
155 | /* TODO */ | |
156 | ||
157 | return NULL; | |
158 | } | |
159 | ||
160 | static void *udl_dmabuf_kmap_atomic(struct dma_buf *dma_buf, | |
161 | unsigned long page_num) | |
162 | { | |
163 | /* TODO */ | |
164 | ||
165 | return NULL; | |
166 | } | |
167 | ||
168 | static void udl_dmabuf_kunmap(struct dma_buf *dma_buf, | |
169 | unsigned long page_num, void *addr) | |
170 | { | |
171 | /* TODO */ | |
172 | } | |
173 | ||
174 | static void udl_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, | |
175 | unsigned long page_num, | |
176 | void *addr) | |
177 | { | |
178 | /* TODO */ | |
179 | } | |
180 | ||
181 | static int udl_dmabuf_mmap(struct dma_buf *dma_buf, | |
182 | struct vm_area_struct *vma) | |
183 | { | |
184 | /* TODO */ | |
185 | ||
186 | return -EINVAL; | |
187 | } | |
188 | ||
189 | static struct dma_buf_ops udl_dmabuf_ops = { | |
190 | .attach = udl_attach_dma_buf, | |
191 | .detach = udl_detach_dma_buf, | |
192 | .map_dma_buf = udl_map_dma_buf, | |
193 | .unmap_dma_buf = udl_unmap_dma_buf, | |
194 | .kmap = udl_dmabuf_kmap, | |
195 | .kmap_atomic = udl_dmabuf_kmap_atomic, | |
196 | .kunmap = udl_dmabuf_kunmap, | |
197 | .kunmap_atomic = udl_dmabuf_kunmap_atomic, | |
198 | .mmap = udl_dmabuf_mmap, | |
199 | .release = drm_gem_dmabuf_release, | |
200 | }; | |
201 | ||
202 | struct dma_buf *udl_gem_prime_export(struct drm_device *dev, | |
203 | struct drm_gem_object *obj, int flags) | |
204 | { | |
205 | return dma_buf_export(obj, &udl_dmabuf_ops, obj->size, flags, NULL); | |
206 | } | |
207 | ||
208 | static int udl_prime_create(struct drm_device *dev, | |
209 | size_t size, | |
210 | struct sg_table *sg, | |
211 | struct udl_gem_object **obj_p) | |
212 | { | |
213 | struct udl_gem_object *obj; | |
214 | int npages; | |
215 | ||
216 | npages = size / PAGE_SIZE; | |
217 | ||
218 | *obj_p = NULL; | |
219 | obj = udl_gem_alloc_object(dev, npages * PAGE_SIZE); | |
220 | if (!obj) | |
221 | return -ENOMEM; | |
222 | ||
223 | obj->sg = sg; | |
224 | obj->pages = drm_malloc_ab(npages, sizeof(struct page *)); | |
225 | if (obj->pages == NULL) { | |
226 | DRM_ERROR("obj pages is NULL %d\n", npages); | |
227 | return -ENOMEM; | |
228 | } | |
229 | ||
230 | drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages); | |
231 | ||
232 | *obj_p = obj; | |
233 | return 0; | |
234 | } | |
235 | ||
236 | struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev, | |
237 | struct dma_buf *dma_buf) | |
238 | { | |
239 | struct dma_buf_attachment *attach; | |
240 | struct sg_table *sg; | |
241 | struct udl_gem_object *uobj; | |
242 | int ret; | |
243 | ||
244 | /* need to attach */ | |
245 | get_device(dev->dev); | |
246 | attach = dma_buf_attach(dma_buf, dev->dev); | |
247 | if (IS_ERR(attach)) { | |
248 | put_device(dev->dev); | |
249 | return ERR_CAST(attach); | |
250 | } | |
251 | ||
252 | get_dma_buf(dma_buf); | |
253 | ||
254 | sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); | |
255 | if (IS_ERR(sg)) { | |
256 | ret = PTR_ERR(sg); | |
257 | goto fail_detach; | |
258 | } | |
259 | ||
260 | ret = udl_prime_create(dev, dma_buf->size, sg, &uobj); | |
261 | if (ret) | |
262 | goto fail_unmap; | |
263 | ||
264 | uobj->base.import_attach = attach; | |
265 | uobj->flags = UDL_BO_WC; | |
266 | ||
267 | return &uobj->base; | |
268 | ||
269 | fail_unmap: | |
270 | dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL); | |
271 | fail_detach: | |
272 | dma_buf_detach(dma_buf, attach); | |
273 | dma_buf_put(dma_buf); | |
274 | put_device(dev->dev); | |
275 | return ERR_PTR(ret); | |
276 | } |