Commit | Line | Data |
---|---|---|
349c9e13 BG |
1 | /* |
2 | * drivers/staging/android/ion/ion_cma_heap.c | |
3 | * | |
4 | * Copyright (C) Linaro 2012 | |
5 | * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson. | |
6 | * | |
7 | * This software is licensed under the terms of the GNU General Public | |
8 | * License version 2, as published by the Free Software Foundation, and | |
9 | * may be copied, distributed, and modified under those terms. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | */ | |
17 | ||
18 | #include <linux/device.h> | |
19 | #include <linux/slab.h> | |
20 | #include <linux/errno.h> | |
21 | #include <linux/err.h> | |
22 | #include <linux/dma-mapping.h> | |
23 | ||
24 | #include "ion.h" | |
25 | #include "ion_priv.h" | |
26 | ||
27 | #define ION_CMA_ALLOCATE_FAILED -1 | |
28 | ||
29 | struct ion_cma_heap { | |
30 | struct ion_heap heap; | |
31 | struct device *dev; | |
32 | }; | |
33 | ||
34 | #define to_cma_heap(x) container_of(x, struct ion_cma_heap, heap) | |
35 | ||
36 | struct ion_cma_buffer_info { | |
37 | void *cpu_addr; | |
38 | dma_addr_t handle; | |
39 | struct sg_table *table; | |
40 | }; | |
41 | ||
349c9e13 | 42 | |
349c9e13 BG |
43 | /* ION CMA heap operations functions */ |
44 | static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer, | |
45 | unsigned long len, unsigned long align, | |
46 | unsigned long flags) | |
47 | { | |
48 | struct ion_cma_heap *cma_heap = to_cma_heap(heap); | |
49 | struct device *dev = cma_heap->dev; | |
50 | struct ion_cma_buffer_info *info; | |
51 | ||
52 | dev_dbg(dev, "Request buffer allocation len %ld\n", len); | |
53 | ||
661f82f6 CC |
54 | if (buffer->flags & ION_FLAG_CACHED) |
55 | return -EINVAL; | |
56 | ||
57 | if (align > PAGE_SIZE) | |
58 | return -EINVAL; | |
59 | ||
349c9e13 | 60 | info = kzalloc(sizeof(struct ion_cma_buffer_info), GFP_KERNEL); |
f0ca3e87 | 61 | if (!info) |
349c9e13 | 62 | return ION_CMA_ALLOCATE_FAILED; |
349c9e13 | 63 | |
661f82f6 CC |
64 | info->cpu_addr = dma_alloc_coherent(dev, len, &(info->handle), |
65 | GFP_HIGHUSER | __GFP_ZERO); | |
349c9e13 BG |
66 | |
67 | if (!info->cpu_addr) { | |
68 | dev_err(dev, "Fail to allocate buffer\n"); | |
69 | goto err; | |
70 | } | |
71 | ||
72 | info->table = kmalloc(sizeof(struct sg_table), GFP_KERNEL); | |
f0ca3e87 | 73 | if (!info->table) |
349c9e13 | 74 | goto free_mem; |
349c9e13 | 75 | |
936d61ee JP |
76 | if (dma_get_sgtable(dev, info->table, info->cpu_addr, info->handle, |
77 | len)) | |
c13bd1c4 | 78 | goto free_table; |
349c9e13 BG |
79 | /* keep this for memory release */ |
80 | buffer->priv_virt = info; | |
f82ad60e | 81 | buffer->sg_table = info->table; |
349c9e13 BG |
82 | dev_dbg(dev, "Allocate buffer %p\n", buffer); |
83 | return 0; | |
84 | ||
85 | free_table: | |
86 | kfree(info->table); | |
87 | free_mem: | |
88 | dma_free_coherent(dev, len, info->cpu_addr, info->handle); | |
89 | err: | |
90 | kfree(info); | |
91 | return ION_CMA_ALLOCATE_FAILED; | |
92 | } | |
93 | ||
94 | static void ion_cma_free(struct ion_buffer *buffer) | |
95 | { | |
96 | struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap); | |
97 | struct device *dev = cma_heap->dev; | |
98 | struct ion_cma_buffer_info *info = buffer->priv_virt; | |
99 | ||
100 | dev_dbg(dev, "Release buffer %p\n", buffer); | |
101 | /* release memory */ | |
102 | dma_free_coherent(dev, buffer->size, info->cpu_addr, info->handle); | |
103 | /* release sg table */ | |
104 | sg_free_table(info->table); | |
105 | kfree(info->table); | |
106 | kfree(info); | |
107 | } | |
108 | ||
349c9e13 BG |
109 | static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer, |
110 | struct vm_area_struct *vma) | |
111 | { | |
112 | struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap); | |
113 | struct device *dev = cma_heap->dev; | |
114 | struct ion_cma_buffer_info *info = buffer->priv_virt; | |
115 | ||
116 | return dma_mmap_coherent(dev, vma, info->cpu_addr, info->handle, | |
117 | buffer->size); | |
118 | } | |
119 | ||
f63958d8 CC |
120 | static void *ion_cma_map_kernel(struct ion_heap *heap, |
121 | struct ion_buffer *buffer) | |
349c9e13 BG |
122 | { |
123 | struct ion_cma_buffer_info *info = buffer->priv_virt; | |
124 | /* kernel memory mapping has been done at allocation time */ | |
125 | return info->cpu_addr; | |
126 | } | |
127 | ||
661f82f6 | 128 | static void ion_cma_unmap_kernel(struct ion_heap *heap, |
679011bd | 129 | struct ion_buffer *buffer) |
661f82f6 CC |
130 | { |
131 | } | |
132 | ||
349c9e13 BG |
133 | static struct ion_heap_ops ion_cma_ops = { |
134 | .allocate = ion_cma_allocate, | |
135 | .free = ion_cma_free, | |
349c9e13 BG |
136 | .map_user = ion_cma_mmap, |
137 | .map_kernel = ion_cma_map_kernel, | |
661f82f6 | 138 | .unmap_kernel = ion_cma_unmap_kernel, |
349c9e13 BG |
139 | }; |
140 | ||
141 | struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data) | |
142 | { | |
143 | struct ion_cma_heap *cma_heap; | |
144 | ||
145 | cma_heap = kzalloc(sizeof(struct ion_cma_heap), GFP_KERNEL); | |
146 | ||
147 | if (!cma_heap) | |
148 | return ERR_PTR(-ENOMEM); | |
149 | ||
150 | cma_heap->heap.ops = &ion_cma_ops; | |
7e416174 SR |
151 | /* |
152 | * get device from private heaps data, later it will be | |
153 | * used to make the link with reserved CMA memory | |
154 | */ | |
349c9e13 BG |
155 | cma_heap->dev = data->priv; |
156 | cma_heap->heap.type = ION_HEAP_TYPE_DMA; | |
157 | return &cma_heap->heap; | |
158 | } | |
159 | ||
160 | void ion_cma_heap_destroy(struct ion_heap *heap) | |
161 | { | |
162 | struct ion_cma_heap *cma_heap = to_cma_heap(heap); | |
163 | ||
164 | kfree(cma_heap); | |
165 | } |