MAINTAINERS: add maintainer for i.MX DRM driver
[deliverable/linux.git] / drivers / gpu / drm / udl / udl_dmabuf.c
CommitLineData
ebfdd6d5
HS
1/*
2 * udl_dmabuf.c
3 *
4 * Copyright (c) 2014 The Chromium OS Authors
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <drm/drmP.h>
21#include "udl_drv.h"
22#include <linux/shmem_fs.h>
23#include <linux/dma-buf.h>
24
25struct udl_drm_dmabuf_attachment {
26 struct sg_table sgt;
27 enum dma_data_direction dir;
28 bool is_mapped;
29};
30
31static int udl_attach_dma_buf(struct dma_buf *dmabuf,
32 struct device *dev,
33 struct dma_buf_attachment *attach)
34{
35 struct udl_drm_dmabuf_attachment *udl_attach;
36
37 DRM_DEBUG_PRIME("[DEV:%s] size:%zd\n", dev_name(attach->dev),
38 attach->dmabuf->size);
39
40 udl_attach = kzalloc(sizeof(*udl_attach), GFP_KERNEL);
41 if (!udl_attach)
42 return -ENOMEM;
43
44 udl_attach->dir = DMA_NONE;
45 attach->priv = udl_attach;
46
47 return 0;
48}
49
50static void udl_detach_dma_buf(struct dma_buf *dmabuf,
51 struct dma_buf_attachment *attach)
52{
53 struct udl_drm_dmabuf_attachment *udl_attach = attach->priv;
54 struct sg_table *sgt;
55
56 if (!udl_attach)
57 return;
58
59 DRM_DEBUG_PRIME("[DEV:%s] size:%zd\n", dev_name(attach->dev),
60 attach->dmabuf->size);
61
62 sgt = &udl_attach->sgt;
63
64 if (udl_attach->dir != DMA_NONE)
65 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
66 udl_attach->dir);
67
68 sg_free_table(sgt);
69 kfree(udl_attach);
70 attach->priv = NULL;
71}
72
73static struct sg_table *udl_map_dma_buf(struct dma_buf_attachment *attach,
74 enum dma_data_direction dir)
75{
76 struct udl_drm_dmabuf_attachment *udl_attach = attach->priv;
77 struct udl_gem_object *obj = to_udl_bo(attach->dmabuf->priv);
78 struct drm_device *dev = obj->base.dev;
79 struct scatterlist *rd, *wr;
80 struct sg_table *sgt = NULL;
81 unsigned int i;
82 int page_count;
83 int nents, ret;
84
85 DRM_DEBUG_PRIME("[DEV:%s] size:%zd dir=%d\n", dev_name(attach->dev),
86 attach->dmabuf->size, dir);
87
88 /* just return current sgt if already requested. */
89 if (udl_attach->dir == dir && udl_attach->is_mapped)
90 return &udl_attach->sgt;
91
92 if (!obj->pages) {
93 DRM_ERROR("pages is null.\n");
94 return ERR_PTR(-ENOMEM);
95 }
96
97 page_count = obj->base.size / PAGE_SIZE;
98 obj->sg = drm_prime_pages_to_sg(obj->pages, page_count);
99 if (!obj->sg) {
100 DRM_ERROR("sg is null.\n");
101 return ERR_PTR(-ENOMEM);
102 }
103
104 sgt = &udl_attach->sgt;
105
106 ret = sg_alloc_table(sgt, obj->sg->orig_nents, GFP_KERNEL);
107 if (ret) {
108 DRM_ERROR("failed to alloc sgt.\n");
109 return ERR_PTR(-ENOMEM);
110 }
111
112 mutex_lock(&dev->struct_mutex);
113
114 rd = obj->sg->sgl;
115 wr = sgt->sgl;
116 for (i = 0; i < sgt->orig_nents; ++i) {
117 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
118 rd = sg_next(rd);
119 wr = sg_next(wr);
120 }
121
122 if (dir != DMA_NONE) {
123 nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
124 if (!nents) {
125 DRM_ERROR("failed to map sgl with iommu.\n");
126 sg_free_table(sgt);
127 sgt = ERR_PTR(-EIO);
128 goto err_unlock;
129 }
130 }
131
132 udl_attach->is_mapped = true;
133 udl_attach->dir = dir;
134 attach->priv = udl_attach;
135
136err_unlock:
137 mutex_unlock(&dev->struct_mutex);
138 return sgt;
139}
140
141static void udl_unmap_dma_buf(struct dma_buf_attachment *attach,
142 struct sg_table *sgt,
143 enum dma_data_direction dir)
144{
145 /* Nothing to do. */
146 DRM_DEBUG_PRIME("[DEV:%s] size:%zd dir:%d\n", dev_name(attach->dev),
147 attach->dmabuf->size, dir);
148}
149
150static void *udl_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
151{
152 /* TODO */
153
154 return NULL;
155}
156
157static void *udl_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
158 unsigned long page_num)
159{
160 /* TODO */
161
162 return NULL;
163}
164
165static void udl_dmabuf_kunmap(struct dma_buf *dma_buf,
166 unsigned long page_num, void *addr)
167{
168 /* TODO */
169}
170
171static void udl_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
172 unsigned long page_num,
173 void *addr)
174{
175 /* TODO */
176}
177
178static int udl_dmabuf_mmap(struct dma_buf *dma_buf,
179 struct vm_area_struct *vma)
180{
181 /* TODO */
182
183 return -EINVAL;
184}
185
186static struct dma_buf_ops udl_dmabuf_ops = {
187 .attach = udl_attach_dma_buf,
188 .detach = udl_detach_dma_buf,
189 .map_dma_buf = udl_map_dma_buf,
190 .unmap_dma_buf = udl_unmap_dma_buf,
191 .kmap = udl_dmabuf_kmap,
192 .kmap_atomic = udl_dmabuf_kmap_atomic,
193 .kunmap = udl_dmabuf_kunmap,
194 .kunmap_atomic = udl_dmabuf_kunmap_atomic,
195 .mmap = udl_dmabuf_mmap,
196 .release = drm_gem_dmabuf_release,
197};
198
199struct dma_buf *udl_gem_prime_export(struct drm_device *dev,
200 struct drm_gem_object *obj, int flags)
201{
202 return dma_buf_export(obj, &udl_dmabuf_ops, obj->size, flags, NULL);
203}
204
205static int udl_prime_create(struct drm_device *dev,
206 size_t size,
207 struct sg_table *sg,
208 struct udl_gem_object **obj_p)
209{
210 struct udl_gem_object *obj;
211 int npages;
212
213 npages = size / PAGE_SIZE;
214
215 *obj_p = NULL;
216 obj = udl_gem_alloc_object(dev, npages * PAGE_SIZE);
217 if (!obj)
218 return -ENOMEM;
219
220 obj->sg = sg;
221 obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
222 if (obj->pages == NULL) {
223 DRM_ERROR("obj pages is NULL %d\n", npages);
224 return -ENOMEM;
225 }
226
227 drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages);
228
229 *obj_p = obj;
230 return 0;
231}
232
233struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
234 struct dma_buf *dma_buf)
235{
236 struct dma_buf_attachment *attach;
237 struct sg_table *sg;
238 struct udl_gem_object *uobj;
239 int ret;
240
241 /* need to attach */
242 get_device(dev->dev);
243 attach = dma_buf_attach(dma_buf, dev->dev);
244 if (IS_ERR(attach)) {
245 put_device(dev->dev);
246 return ERR_CAST(attach);
247 }
248
249 get_dma_buf(dma_buf);
250
251 sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
252 if (IS_ERR(sg)) {
253 ret = PTR_ERR(sg);
254 goto fail_detach;
255 }
256
257 ret = udl_prime_create(dev, dma_buf->size, sg, &uobj);
258 if (ret)
259 goto fail_unmap;
260
261 uobj->base.import_attach = attach;
262 uobj->flags = UDL_BO_WC;
263
264 return &uobj->base;
265
266fail_unmap:
267 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
268fail_detach:
269 dma_buf_detach(dma_buf, attach);
270 dma_buf_put(dma_buf);
271 put_device(dev->dev);
272 return ERR_PTR(ret);
273}
This page took 0.04395 seconds and 5 git commands to generate.