Commit | Line | Data |
---|---|---|
c30707be RSZ |
1 | /* |
2 | * drivers/staging/android/ion/ion_heap.c | |
3 | * | |
4 | * Copyright (C) 2011 Google, Inc. | |
5 | * | |
6 | * This software is licensed under the terms of the GNU General Public | |
7 | * License version 2, as published by the Free Software Foundation, and | |
8 | * may be copied, distributed, and modified under those terms. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | * | |
15 | */ | |
16 | ||
17 | #include <linux/err.h> | |
ea313b5f RSZ |
18 | #include <linux/freezer.h> |
19 | #include <linux/kthread.h> | |
8898227e | 20 | #include <linux/mm.h> |
ea313b5f RSZ |
21 | #include <linux/rtmutex.h> |
22 | #include <linux/sched.h> | |
8898227e RSZ |
23 | #include <linux/scatterlist.h> |
24 | #include <linux/vmalloc.h> | |
c30707be RSZ |
25 | #include "ion.h" |
26 | #include "ion_priv.h" | |
27 | ||
8898227e RSZ |
28 | void *ion_heap_map_kernel(struct ion_heap *heap, |
29 | struct ion_buffer *buffer) | |
30 | { | |
31 | struct scatterlist *sg; | |
32 | int i, j; | |
33 | void *vaddr; | |
34 | pgprot_t pgprot; | |
35 | struct sg_table *table = buffer->sg_table; | |
36 | int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; | |
37 | struct page **pages = vmalloc(sizeof(struct page *) * npages); | |
38 | struct page **tmp = pages; | |
39 | ||
40 | if (!pages) | |
f63958d8 | 41 | return NULL; |
8898227e RSZ |
42 | |
43 | if (buffer->flags & ION_FLAG_CACHED) | |
44 | pgprot = PAGE_KERNEL; | |
45 | else | |
46 | pgprot = pgprot_writecombine(PAGE_KERNEL); | |
47 | ||
48 | for_each_sg(table->sgl, sg, table->nents, i) { | |
06e0dcae | 49 | int npages_this_entry = PAGE_ALIGN(sg->length) / PAGE_SIZE; |
8898227e | 50 | struct page *page = sg_page(sg); |
10f62861 | 51 | |
8898227e | 52 | BUG_ON(i >= npages); |
e1d855b0 | 53 | for (j = 0; j < npages_this_entry; j++) |
8898227e | 54 | *(tmp++) = page++; |
8898227e RSZ |
55 | } |
56 | vaddr = vmap(pages, npages, VM_MAP, pgprot); | |
57 | vfree(pages); | |
58 | ||
cf31378b | 59 | if (!vaddr) |
dfc4a9b1 CC |
60 | return ERR_PTR(-ENOMEM); |
61 | ||
8898227e RSZ |
62 | return vaddr; |
63 | } | |
64 | ||
65 | void ion_heap_unmap_kernel(struct ion_heap *heap, | |
66 | struct ion_buffer *buffer) | |
67 | { | |
68 | vunmap(buffer->vaddr); | |
69 | } | |
70 | ||
71 | int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, | |
72 | struct vm_area_struct *vma) | |
73 | { | |
74 | struct sg_table *table = buffer->sg_table; | |
75 | unsigned long addr = vma->vm_start; | |
76 | unsigned long offset = vma->vm_pgoff * PAGE_SIZE; | |
77 | struct scatterlist *sg; | |
78 | int i; | |
e460bc5e | 79 | int ret; |
8898227e RSZ |
80 | |
81 | for_each_sg(table->sgl, sg, table->nents, i) { | |
82 | struct page *page = sg_page(sg); | |
83 | unsigned long remainder = vma->vm_end - addr; | |
06e0dcae | 84 | unsigned long len = sg->length; |
8898227e | 85 | |
06e0dcae CC |
86 | if (offset >= sg->length) { |
87 | offset -= sg->length; | |
8898227e RSZ |
88 | continue; |
89 | } else if (offset) { | |
90 | page += offset / PAGE_SIZE; | |
06e0dcae | 91 | len = sg->length - offset; |
8898227e RSZ |
92 | offset = 0; |
93 | } | |
94 | len = min(len, remainder); | |
e460bc5e | 95 | ret = remap_pfn_range(vma, addr, page_to_pfn(page), len, |
33a5956c | 96 | vma->vm_page_prot); |
e460bc5e CC |
97 | if (ret) |
98 | return ret; | |
8898227e RSZ |
99 | addr += len; |
100 | if (addr >= vma->vm_end) | |
101 | return 0; | |
102 | } | |
103 | return 0; | |
104 | } | |
105 | ||
8b312bb9 CC |
106 | static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot) |
107 | { | |
108 | void *addr = vm_map_ram(pages, num, -1, pgprot); | |
10f62861 | 109 | |
8b312bb9 CC |
110 | if (!addr) |
111 | return -ENOMEM; | |
112 | memset(addr, 0, PAGE_SIZE * num); | |
113 | vm_unmap_ram(addr, num); | |
114 | ||
115 | return 0; | |
116 | } | |
117 | ||
df6cf5c8 | 118 | static int ion_heap_sglist_zero(struct scatterlist *sgl, unsigned int nents, |
33a5956c | 119 | pgprot_t pgprot) |
df6cf5c8 CC |
120 | { |
121 | int p = 0; | |
122 | int ret = 0; | |
123 | struct sg_page_iter piter; | |
124 | struct page *pages[32]; | |
125 | ||
126 | for_each_sg_page(sgl, &piter, nents, 0) { | |
127 | pages[p++] = sg_page_iter_page(&piter); | |
128 | if (p == ARRAY_SIZE(pages)) { | |
129 | ret = ion_heap_clear_pages(pages, p, pgprot); | |
130 | if (ret) | |
131 | return ret; | |
132 | p = 0; | |
133 | } | |
134 | } | |
135 | if (p) | |
136 | ret = ion_heap_clear_pages(pages, p, pgprot); | |
137 | ||
138 | return ret; | |
139 | } | |
140 | ||
0b6b2cde RSZ |
141 | int ion_heap_buffer_zero(struct ion_buffer *buffer) |
142 | { | |
143 | struct sg_table *table = buffer->sg_table; | |
144 | pgprot_t pgprot; | |
0b6b2cde RSZ |
145 | |
146 | if (buffer->flags & ION_FLAG_CACHED) | |
147 | pgprot = PAGE_KERNEL; | |
148 | else | |
149 | pgprot = pgprot_writecombine(PAGE_KERNEL); | |
150 | ||
df6cf5c8 CC |
151 | return ion_heap_sglist_zero(table->sgl, table->nents, pgprot); |
152 | } | |
0b6b2cde | 153 | |
df6cf5c8 CC |
154 | int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot) |
155 | { | |
156 | struct scatterlist sg; | |
157 | ||
158 | sg_init_table(&sg, 1); | |
159 | sg_set_page(&sg, page, size, 0); | |
160 | return ion_heap_sglist_zero(&sg, 1, pgprot); | |
0b6b2cde RSZ |
161 | } |
162 | ||
e1d855b0 | 163 | void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer) |
ea313b5f | 164 | { |
6a72a700 | 165 | spin_lock(&heap->free_lock); |
ea313b5f RSZ |
166 | list_add(&buffer->list, &heap->free_list); |
167 | heap->free_list_size += buffer->size; | |
6a72a700 | 168 | spin_unlock(&heap->free_lock); |
ea313b5f RSZ |
169 | wake_up(&heap->waitqueue); |
170 | } | |
171 | ||
172 | size_t ion_heap_freelist_size(struct ion_heap *heap) | |
173 | { | |
174 | size_t size; | |
175 | ||
6a72a700 | 176 | spin_lock(&heap->free_lock); |
ea313b5f | 177 | size = heap->free_list_size; |
6a72a700 | 178 | spin_unlock(&heap->free_lock); |
ea313b5f RSZ |
179 | |
180 | return size; | |
181 | } | |
182 | ||
53a91c68 | 183 | static size_t _ion_heap_freelist_drain(struct ion_heap *heap, size_t size, |
33a5956c | 184 | bool skip_pools) |
ea313b5f | 185 | { |
6a72a700 | 186 | struct ion_buffer *buffer; |
ea313b5f RSZ |
187 | size_t total_drained = 0; |
188 | ||
189 | if (ion_heap_freelist_size(heap) == 0) | |
190 | return 0; | |
191 | ||
6a72a700 | 192 | spin_lock(&heap->free_lock); |
ea313b5f RSZ |
193 | if (size == 0) |
194 | size = heap->free_list_size; | |
195 | ||
6a72a700 | 196 | while (!list_empty(&heap->free_list)) { |
ea313b5f RSZ |
197 | if (total_drained >= size) |
198 | break; | |
6a72a700 JS |
199 | buffer = list_first_entry(&heap->free_list, struct ion_buffer, |
200 | list); | |
ea313b5f | 201 | list_del(&buffer->list); |
ea313b5f | 202 | heap->free_list_size -= buffer->size; |
53a91c68 MH |
203 | if (skip_pools) |
204 | buffer->private_flags |= ION_PRIV_FLAG_SHRINKER_FREE; | |
ea313b5f | 205 | total_drained += buffer->size; |
6a72a700 | 206 | spin_unlock(&heap->free_lock); |
f020b443 | 207 | ion_buffer_destroy(buffer); |
6a72a700 | 208 | spin_lock(&heap->free_lock); |
ea313b5f | 209 | } |
6a72a700 | 210 | spin_unlock(&heap->free_lock); |
ea313b5f RSZ |
211 | |
212 | return total_drained; | |
213 | } | |
214 | ||
53a91c68 MH |
215 | size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size) |
216 | { | |
217 | return _ion_heap_freelist_drain(heap, size, false); | |
218 | } | |
219 | ||
220 | size_t ion_heap_freelist_shrink(struct ion_heap *heap, size_t size) | |
221 | { | |
222 | return _ion_heap_freelist_drain(heap, size, true); | |
223 | } | |
224 | ||
f63958d8 | 225 | static int ion_heap_deferred_free(void *data) |
ea313b5f RSZ |
226 | { |
227 | struct ion_heap *heap = data; | |
228 | ||
229 | while (true) { | |
230 | struct ion_buffer *buffer; | |
231 | ||
232 | wait_event_freezable(heap->waitqueue, | |
233 | ion_heap_freelist_size(heap) > 0); | |
234 | ||
6a72a700 | 235 | spin_lock(&heap->free_lock); |
ea313b5f | 236 | if (list_empty(&heap->free_list)) { |
6a72a700 | 237 | spin_unlock(&heap->free_lock); |
ea313b5f RSZ |
238 | continue; |
239 | } | |
240 | buffer = list_first_entry(&heap->free_list, struct ion_buffer, | |
241 | list); | |
242 | list_del(&buffer->list); | |
243 | heap->free_list_size -= buffer->size; | |
6a72a700 | 244 | spin_unlock(&heap->free_lock); |
ea313b5f RSZ |
245 | ion_buffer_destroy(buffer); |
246 | } | |
247 | ||
248 | return 0; | |
249 | } | |
250 | ||
251 | int ion_heap_init_deferred_free(struct ion_heap *heap) | |
252 | { | |
253 | struct sched_param param = { .sched_priority = 0 }; | |
254 | ||
255 | INIT_LIST_HEAD(&heap->free_list); | |
ea313b5f RSZ |
256 | init_waitqueue_head(&heap->waitqueue); |
257 | heap->task = kthread_run(ion_heap_deferred_free, heap, | |
258 | "%s", heap->name); | |
ea313b5f RSZ |
259 | if (IS_ERR(heap->task)) { |
260 | pr_err("%s: creating thread for deferred free failed\n", | |
261 | __func__); | |
ab0c069a | 262 | return PTR_ERR_OR_ZERO(heap->task); |
ea313b5f | 263 | } |
54de9af9 | 264 | sched_setscheduler(heap->task, SCHED_IDLE, ¶m); |
ea313b5f RSZ |
265 | return 0; |
266 | } | |
267 | ||
b9daf0b6 | 268 | static unsigned long ion_heap_shrink_count(struct shrinker *shrinker, |
33a5956c | 269 | struct shrink_control *sc) |
b9daf0b6 CC |
270 | { |
271 | struct ion_heap *heap = container_of(shrinker, struct ion_heap, | |
272 | shrinker); | |
273 | int total = 0; | |
274 | ||
275 | total = ion_heap_freelist_size(heap) / PAGE_SIZE; | |
276 | if (heap->ops->shrink) | |
277 | total += heap->ops->shrink(heap, sc->gfp_mask, 0); | |
278 | return total; | |
279 | } | |
280 | ||
281 | static unsigned long ion_heap_shrink_scan(struct shrinker *shrinker, | |
33a5956c | 282 | struct shrink_control *sc) |
b9daf0b6 CC |
283 | { |
284 | struct ion_heap *heap = container_of(shrinker, struct ion_heap, | |
285 | shrinker); | |
286 | int freed = 0; | |
287 | int to_scan = sc->nr_to_scan; | |
288 | ||
289 | if (to_scan == 0) | |
290 | return 0; | |
291 | ||
292 | /* | |
293 | * shrink the free list first, no point in zeroing the memory if we're | |
53a91c68 | 294 | * just going to reclaim it. Also, skip any possible page pooling. |
b9daf0b6 CC |
295 | */ |
296 | if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) | |
53a91c68 | 297 | freed = ion_heap_freelist_shrink(heap, to_scan * PAGE_SIZE) / |
b9daf0b6 CC |
298 | PAGE_SIZE; |
299 | ||
300 | to_scan -= freed; | |
301 | if (to_scan <= 0) | |
302 | return freed; | |
303 | ||
304 | if (heap->ops->shrink) | |
305 | freed += heap->ops->shrink(heap, sc->gfp_mask, to_scan); | |
306 | return freed; | |
307 | } | |
308 | ||
309 | void ion_heap_init_shrinker(struct ion_heap *heap) | |
310 | { | |
311 | heap->shrinker.count_objects = ion_heap_shrink_count; | |
312 | heap->shrinker.scan_objects = ion_heap_shrink_scan; | |
313 | heap->shrinker.seeks = DEFAULT_SEEKS; | |
314 | heap->shrinker.batch = 0; | |
315 | register_shrinker(&heap->shrinker); | |
316 | } | |
317 | ||
c30707be RSZ |
318 | struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data) |
319 | { | |
320 | struct ion_heap *heap = NULL; | |
321 | ||
322 | switch (heap_data->type) { | |
323 | case ION_HEAP_TYPE_SYSTEM_CONTIG: | |
324 | heap = ion_system_contig_heap_create(heap_data); | |
325 | break; | |
326 | case ION_HEAP_TYPE_SYSTEM: | |
327 | heap = ion_system_heap_create(heap_data); | |
328 | break; | |
329 | case ION_HEAP_TYPE_CARVEOUT: | |
330 | heap = ion_carveout_heap_create(heap_data); | |
331 | break; | |
e3c2eb7c RSZ |
332 | case ION_HEAP_TYPE_CHUNK: |
333 | heap = ion_chunk_heap_create(heap_data); | |
334 | break; | |
349c9e13 BG |
335 | case ION_HEAP_TYPE_DMA: |
336 | heap = ion_cma_heap_create(heap_data); | |
337 | break; | |
c30707be RSZ |
338 | default: |
339 | pr_err("%s: Invalid heap type %d\n", __func__, | |
340 | heap_data->type); | |
341 | return ERR_PTR(-EINVAL); | |
342 | } | |
343 | ||
344 | if (IS_ERR_OR_NULL(heap)) { | |
e61fc915 | 345 | pr_err("%s: error creating heap %s type %d base %lu size %zu\n", |
c30707be RSZ |
346 | __func__, heap_data->name, heap_data->type, |
347 | heap_data->base, heap_data->size); | |
348 | return ERR_PTR(-EINVAL); | |
349 | } | |
350 | ||
351 | heap->name = heap_data->name; | |
352 | heap->id = heap_data->id; | |
353 | return heap; | |
354 | } | |
8c6c463e | 355 | EXPORT_SYMBOL(ion_heap_create); |
c30707be RSZ |
356 | |
357 | void ion_heap_destroy(struct ion_heap *heap) | |
358 | { | |
359 | if (!heap) | |
360 | return; | |
361 | ||
362 | switch (heap->type) { | |
363 | case ION_HEAP_TYPE_SYSTEM_CONTIG: | |
364 | ion_system_contig_heap_destroy(heap); | |
365 | break; | |
366 | case ION_HEAP_TYPE_SYSTEM: | |
367 | ion_system_heap_destroy(heap); | |
368 | break; | |
369 | case ION_HEAP_TYPE_CARVEOUT: | |
370 | ion_carveout_heap_destroy(heap); | |
371 | break; | |
e3c2eb7c RSZ |
372 | case ION_HEAP_TYPE_CHUNK: |
373 | ion_chunk_heap_destroy(heap); | |
374 | break; | |
349c9e13 BG |
375 | case ION_HEAP_TYPE_DMA: |
376 | ion_cma_heap_destroy(heap); | |
377 | break; | |
c30707be RSZ |
378 | default: |
379 | pr_err("%s: Invalid heap type %d\n", __func__, | |
380 | heap->type); | |
381 | } | |
382 | } | |
8c6c463e | 383 | EXPORT_SYMBOL(ion_heap_destroy); |