mm: kill vma flag VM_RESERVED and mm->reserved_vm counter
[deliverable/linux.git] / drivers / staging / omapdrm / omap_gem_dmabuf.c
1 /*
2 * drivers/staging/omapdrm/omap_gem_dmabuf.c
3 *
4 * Copyright (C) 2011 Texas Instruments
5 * Author: Rob Clark <rob.clark@linaro.org>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "omap_drv.h"
21
22 #include <linux/dma-buf.h>
23
24 static struct sg_table *omap_gem_map_dma_buf(
25 struct dma_buf_attachment *attachment,
26 enum dma_data_direction dir)
27 {
28 struct drm_gem_object *obj = attachment->dmabuf->priv;
29 struct sg_table *sg;
30 dma_addr_t paddr;
31 int ret;
32
33 sg = kzalloc(sizeof(*sg), GFP_KERNEL);
34 if (!sg)
35 return ERR_PTR(-ENOMEM);
36
37 /* camera, etc, need physically contiguous.. but we need a
38 * better way to know this..
39 */
40 ret = omap_gem_get_paddr(obj, &paddr, true);
41 if (ret)
42 goto out;
43
44 ret = sg_alloc_table(sg, 1, GFP_KERNEL);
45 if (ret)
46 goto out;
47
48 sg_init_table(sg->sgl, 1);
49 sg_dma_len(sg->sgl) = obj->size;
50 sg_set_page(sg->sgl, pfn_to_page(PFN_DOWN(paddr)), obj->size, 0);
51 sg_dma_address(sg->sgl) = paddr;
52
53 /* this should be after _get_paddr() to ensure we have pages attached */
54 omap_gem_dma_sync(obj, dir);
55
56 out:
57 if (ret)
58 return ERR_PTR(ret);
59 return sg;
60 }
61
62 static void omap_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
63 struct sg_table *sg, enum dma_data_direction dir)
64 {
65 struct drm_gem_object *obj = attachment->dmabuf->priv;
66 omap_gem_put_paddr(obj);
67 sg_free_table(sg);
68 kfree(sg);
69 }
70
71 static void omap_gem_dmabuf_release(struct dma_buf *buffer)
72 {
73 struct drm_gem_object *obj = buffer->priv;
74 /* release reference that was taken when dmabuf was exported
75 * in omap_gem_prime_set()..
76 */
77 drm_gem_object_unreference_unlocked(obj);
78 }
79
80
81 static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer,
82 size_t start, size_t len, enum dma_data_direction dir)
83 {
84 struct drm_gem_object *obj = buffer->priv;
85 struct page **pages;
86 if (omap_gem_flags(obj) & OMAP_BO_TILED) {
87 /* TODO we would need to pin at least part of the buffer to
88 * get de-tiled view. For now just reject it.
89 */
90 return -ENOMEM;
91 }
92 /* make sure we have the pages: */
93 return omap_gem_get_pages(obj, &pages, true);
94 }
95
96 static void omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer,
97 size_t start, size_t len, enum dma_data_direction dir)
98 {
99 struct drm_gem_object *obj = buffer->priv;
100 omap_gem_put_pages(obj);
101 }
102
103
104 static void *omap_gem_dmabuf_kmap_atomic(struct dma_buf *buffer,
105 unsigned long page_num)
106 {
107 struct drm_gem_object *obj = buffer->priv;
108 struct page **pages;
109 omap_gem_get_pages(obj, &pages, false);
110 omap_gem_cpu_sync(obj, page_num);
111 return kmap_atomic(pages[page_num]);
112 }
113
114 static void omap_gem_dmabuf_kunmap_atomic(struct dma_buf *buffer,
115 unsigned long page_num, void *addr)
116 {
117 kunmap_atomic(addr);
118 }
119
120 static void *omap_gem_dmabuf_kmap(struct dma_buf *buffer,
121 unsigned long page_num)
122 {
123 struct drm_gem_object *obj = buffer->priv;
124 struct page **pages;
125 omap_gem_get_pages(obj, &pages, false);
126 omap_gem_cpu_sync(obj, page_num);
127 return kmap(pages[page_num]);
128 }
129
130 static void omap_gem_dmabuf_kunmap(struct dma_buf *buffer,
131 unsigned long page_num, void *addr)
132 {
133 struct drm_gem_object *obj = buffer->priv;
134 struct page **pages;
135 omap_gem_get_pages(obj, &pages, false);
136 kunmap(pages[page_num]);
137 }
138
139 /*
140 * TODO maybe we can split up drm_gem_mmap to avoid duplicating
141 * some here.. or at least have a drm_dmabuf_mmap helper.
142 */
143 static int omap_gem_dmabuf_mmap(struct dma_buf *buffer,
144 struct vm_area_struct *vma)
145 {
146 struct drm_gem_object *obj = buffer->priv;
147 int ret = 0;
148
149 if (WARN_ON(!obj->filp))
150 return -EINVAL;
151
152 /* Check for valid size. */
153 if (omap_gem_mmap_size(obj) < vma->vm_end - vma->vm_start) {
154 ret = -EINVAL;
155 goto out_unlock;
156 }
157
158 if (!obj->dev->driver->gem_vm_ops) {
159 ret = -EINVAL;
160 goto out_unlock;
161 }
162
163 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
164 vma->vm_ops = obj->dev->driver->gem_vm_ops;
165 vma->vm_private_data = obj;
166 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
167
168 /* Take a ref for this mapping of the object, so that the fault
169 * handler can dereference the mmap offset's pointer to the object.
170 * This reference is cleaned up by the corresponding vm_close
171 * (which should happen whether the vma was created by this call, or
172 * by a vm_open due to mremap or partial unmap or whatever).
173 */
174 vma->vm_ops->open(vma);
175
176 out_unlock:
177
178 return omap_gem_mmap_obj(obj, vma);
179 }
180
181 struct dma_buf_ops omap_dmabuf_ops = {
182 .map_dma_buf = omap_gem_map_dma_buf,
183 .unmap_dma_buf = omap_gem_unmap_dma_buf,
184 .release = omap_gem_dmabuf_release,
185 .begin_cpu_access = omap_gem_dmabuf_begin_cpu_access,
186 .end_cpu_access = omap_gem_dmabuf_end_cpu_access,
187 .kmap_atomic = omap_gem_dmabuf_kmap_atomic,
188 .kunmap_atomic = omap_gem_dmabuf_kunmap_atomic,
189 .kmap = omap_gem_dmabuf_kmap,
190 .kunmap = omap_gem_dmabuf_kunmap,
191 .mmap = omap_gem_dmabuf_mmap,
192 };
193
194 struct dma_buf * omap_gem_prime_export(struct drm_device *dev,
195 struct drm_gem_object *obj, int flags)
196 {
197 return dma_buf_export(obj, &omap_dmabuf_ops, obj->size, 0600);
198 }
199
200 struct drm_gem_object * omap_gem_prime_import(struct drm_device *dev,
201 struct dma_buf *buffer)
202 {
203 struct drm_gem_object *obj;
204
205 /* is this one of own objects? */
206 if (buffer->ops == &omap_dmabuf_ops) {
207 obj = buffer->priv;
208 /* is it from our device? */
209 if (obj->dev == dev) {
210 drm_gem_object_reference(obj);
211 return obj;
212 }
213 }
214
215 /*
216 * TODO add support for importing buffers from other devices..
217 * for now we don't need this but would be nice to add eventually
218 */
219 return ERR_PTR(-EINVAL);
220 }
This page took 0.035147 seconds and 5 git commands to generate.