Merge remote-tracking branch 'staging/staging-next'
[deliverable/linux.git] / drivers / staging / android / ion / ion_system_heap.c
CommitLineData
c30707be
RSZ
1/*
2 * drivers/staging/android/ion/ion_system_heap.c
3 *
4 * Copyright (C) 2011 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
bd5d6bda
RSZ
17#include <asm/page.h>
18#include <linux/dma-mapping.h>
c30707be 19#include <linux/err.h>
bd5d6bda 20#include <linux/highmem.h>
c30707be
RSZ
21#include <linux/mm.h>
22#include <linux/scatterlist.h>
45b17a80 23#include <linux/seq_file.h>
c30707be
RSZ
24#include <linux/slab.h>
25#include <linux/vmalloc.h>
26#include "ion.h"
27#include "ion_priv.h"
28
e7f63771
CF
29#define NUM_ORDERS ARRAY_SIZE(orders)
30
f63958d8 31static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN |
2ef23053 32 __GFP_NORETRY) & ~__GFP_RECLAIM;
d2805d7f 33static gfp_t low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO);
45b17a80 34static const unsigned int orders[] = {8, 4, 0};
e7f63771 35
45b17a80
RSZ
36static int order_to_index(unsigned int order)
37{
38 int i;
10f62861 39
e7f63771 40 for (i = 0; i < NUM_ORDERS; i++)
45b17a80
RSZ
41 if (order == orders[i])
42 return i;
43 BUG();
44 return -1;
45}
46
79240748 47static inline unsigned int order_to_size(int order)
45b17a80
RSZ
48{
49 return PAGE_SIZE << order;
50}
51
52struct ion_system_heap {
53 struct ion_heap heap;
e7f63771
CF
54 struct ion_page_pool *uncached_pools[NUM_ORDERS];
55 struct ion_page_pool *cached_pools[NUM_ORDERS];
45b17a80
RSZ
56};
57
e7f63771
CF
58/**
59 * The page from page-pool are all zeroed before. We need do cache
60 * clean for cached buffer. The uncached buffer are always non-cached
61 * since it's allocated. So no need for non-cached pages.
62 */
45b17a80
RSZ
63static struct page *alloc_buffer_page(struct ion_system_heap *heap,
64 struct ion_buffer *buffer,
65 unsigned long order)
66{
67 bool cached = ion_buffer_cached(buffer);
e7f63771 68 struct ion_page_pool *pool;
45b17a80
RSZ
69 struct page *page;
70
e7f63771
CF
71 if (!cached)
72 pool = heap->uncached_pools[order_to_index(order)];
73 else
74 pool = heap->cached_pools[order_to_index(order)];
ee4a4986 75
e7f63771 76 page = ion_page_pool_alloc(pool);
8fae8312 77
e7f63771
CF
78 if (cached)
79 ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order,
80 DMA_BIDIRECTIONAL);
45b17a80
RSZ
81 return page;
82}
83
84static void free_buffer_page(struct ion_system_heap *heap,
06566f5d 85 struct ion_buffer *buffer, struct page *page)
45b17a80 86{
e7f63771 87 struct ion_page_pool *pool;
06566f5d 88 unsigned int order = compound_order(page);
45b17a80 89 bool cached = ion_buffer_cached(buffer);
45b17a80 90
e7f63771
CF
91 /* go to system */
92 if (buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE) {
45b17a80 93 __free_pages(page, order);
e7f63771 94 return;
45b17a80 95 }
e7f63771
CF
96
97 if (!cached)
98 pool = heap->uncached_pools[order_to_index(order)];
99 else
100 pool = heap->cached_pools[order_to_index(order)];
101
102 ion_page_pool_free(pool, page);
45b17a80
RSZ
103}
104
b0599c01 105
7eb88bff
HS
106static struct page *alloc_largest_available(struct ion_system_heap *heap,
107 struct ion_buffer *buffer,
108 unsigned long size,
109 unsigned int max_order)
bd5d6bda 110{
bd5d6bda 111 struct page *page;
bd5d6bda
RSZ
112 int i;
113
e7f63771 114 for (i = 0; i < NUM_ORDERS; i++) {
45b17a80 115 if (size < order_to_size(orders[i]))
bd5d6bda 116 continue;
ba96a2ee
RSZ
117 if (max_order < orders[i])
118 continue;
45b17a80
RSZ
119
120 page = alloc_buffer_page(heap, buffer, orders[i]);
bd5d6bda
RSZ
121 if (!page)
122 continue;
45b17a80 123
7eb88bff 124 return page;
bd5d6bda 125 }
f4ea823b 126
bd5d6bda
RSZ
127 return NULL;
128}
129
c30707be 130static int ion_system_heap_allocate(struct ion_heap *heap,
679011bd
BL
131 struct ion_buffer *buffer,
132 unsigned long size, unsigned long align,
133 unsigned long flags)
c30707be 134{
45b17a80
RSZ
135 struct ion_system_heap *sys_heap = container_of(heap,
136 struct ion_system_heap,
137 heap);
4d5ca329
RSZ
138 struct sg_table *table;
139 struct scatterlist *sg;
bd5d6bda 140 struct list_head pages;
7eb88bff 141 struct page *page, *tmp_page;
13ba7805 142 int i = 0;
c9e8440e 143 unsigned long size_remaining = PAGE_ALIGN(size);
ba96a2ee
RSZ
144 unsigned int max_order = orders[0];
145
c13d1df9
CC
146 if (align > PAGE_SIZE)
147 return -EINVAL;
148
c9e8440e
CC
149 if (size / PAGE_SIZE > totalram_pages / 2)
150 return -ENOMEM;
151
bd5d6bda
RSZ
152 INIT_LIST_HEAD(&pages);
153 while (size_remaining > 0) {
7eb88bff 154 page = alloc_largest_available(sys_heap, buffer, size_remaining,
679011bd 155 max_order);
7eb88bff 156 if (!page)
79240748 157 goto free_pages;
7eb88bff
HS
158 list_add_tail(&page->lru, &pages);
159 size_remaining -= PAGE_SIZE << compound_order(page);
160 max_order = compound_order(page);
13ba7805 161 i++;
bd5d6bda 162 }
b6152016 163 table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
4d5ca329 164 if (!table)
79240748 165 goto free_pages;
bd5d6bda 166
79240748
HS
167 if (sg_alloc_table(table, i, GFP_KERNEL))
168 goto free_table;
bd5d6bda
RSZ
169
170 sg = table->sgl;
7eb88bff 171 list_for_each_entry_safe(page, tmp_page, &pages, lru) {
d10e4ffd 172 sg_set_page(sg, page, PAGE_SIZE << compound_order(page), 0);
c13bd1c4 173 sg = sg_next(sg);
7eb88bff 174 list_del(&page->lru);
c30707be 175 }
bd5d6bda 176
f82ad60e 177 buffer->sg_table = table;
b15934b6 178 return 0;
79240748
HS
179
180free_table:
4d5ca329 181 kfree(table);
79240748 182free_pages:
7eb88bff 183 list_for_each_entry_safe(page, tmp_page, &pages, lru)
06566f5d 184 free_buffer_page(sys_heap, buffer, page);
b15934b6 185 return -ENOMEM;
c30707be
RSZ
186}
187
f63958d8 188static void ion_system_heap_free(struct ion_buffer *buffer)
c30707be 189{
79240748 190 struct ion_system_heap *sys_heap = container_of(buffer->heap,
45b17a80
RSZ
191 struct ion_system_heap,
192 heap);
8898227e 193 struct sg_table *table = buffer->sg_table;
45b17a80 194 struct scatterlist *sg;
45b17a80 195 int i;
b15934b6 196
e7f63771
CF
197 /* zero the buffer before goto page pool */
198 if (!(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE))
0b6b2cde 199 ion_heap_buffer_zero(buffer);
77cbe828 200
b15934b6 201 for_each_sg(table->sgl, sg, table->nents, i)
06566f5d 202 free_buffer_page(sys_heap, buffer, sg_page(sg));
45b17a80
RSZ
203 sg_free_table(table);
204 kfree(table);
c30707be
RSZ
205}
206
b9daf0b6 207static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
679011bd 208 int nr_to_scan)
b1aced6f 209{
e7f63771
CF
210 struct ion_page_pool *uncached_pool;
211 struct ion_page_pool *cached_pool;
b9daf0b6 212 struct ion_system_heap *sys_heap;
ea313b5f 213 int nr_total = 0;
b44d9ce3
GK
214 int i, nr_freed;
215 int only_scan = 0;
b1aced6f 216
b9daf0b6 217 sys_heap = container_of(heap, struct ion_system_heap, heap);
ea313b5f 218
b44d9ce3
GK
219 if (!nr_to_scan)
220 only_scan = 1;
221
e7f63771
CF
222 for (i = 0; i < NUM_ORDERS; i++) {
223 uncached_pool = sys_heap->uncached_pools[i];
224 cached_pool = sys_heap->cached_pools[i];
225
226 if (only_scan) {
227 nr_total += ion_page_pool_shrink(uncached_pool,
228 gfp_mask,
229 nr_to_scan);
230
231 nr_total += ion_page_pool_shrink(cached_pool,
232 gfp_mask,
233 nr_to_scan);
234 } else {
235 nr_freed = ion_page_pool_shrink(uncached_pool,
236 gfp_mask,
237 nr_to_scan);
b44d9ce3 238 nr_to_scan -= nr_freed;
e7f63771
CF
239 nr_total += nr_freed;
240 if (nr_to_scan <= 0)
241 break;
242 nr_freed = ion_page_pool_shrink(cached_pool,
243 gfp_mask,
244 nr_to_scan);
245 nr_to_scan -= nr_freed;
246 nr_total += nr_freed;
b44d9ce3
GK
247 if (nr_to_scan <= 0)
248 break;
249 }
ea313b5f 250 }
b9daf0b6 251 return nr_total;
ea313b5f
RSZ
252}
253
b9daf0b6
CC
254static struct ion_heap_ops system_heap_ops = {
255 .allocate = ion_system_heap_allocate,
256 .free = ion_system_heap_free,
b9daf0b6
CC
257 .map_kernel = ion_heap_map_kernel,
258 .unmap_kernel = ion_heap_unmap_kernel,
259 .map_user = ion_heap_map_user,
260 .shrink = ion_system_heap_shrink,
261};
262
45b17a80
RSZ
263static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
264 void *unused)
265{
266
267 struct ion_system_heap *sys_heap = container_of(heap,
268 struct ion_system_heap,
269 heap);
270 int i;
e7f63771 271 struct ion_page_pool *pool;
10f62861 272
e7f63771
CF
273 for (i = 0; i < NUM_ORDERS; i++) {
274 pool = sys_heap->uncached_pools[i];
10f62861 275
e7f63771 276 seq_printf(s, "%d order %u highmem pages uncached %lu total\n",
0fb9b815 277 pool->high_count, pool->order,
79240748 278 (PAGE_SIZE << pool->order) * pool->high_count);
e7f63771
CF
279 seq_printf(s, "%d order %u lowmem pages uncached %lu total\n",
280 pool->low_count, pool->order,
281 (PAGE_SIZE << pool->order) * pool->low_count);
282 }
283
284 for (i = 0; i < NUM_ORDERS; i++) {
285 pool = sys_heap->cached_pools[i];
286
287 seq_printf(s, "%d order %u highmem pages cached %lu total\n",
288 pool->high_count, pool->order,
289 (PAGE_SIZE << pool->order) * pool->high_count);
290 seq_printf(s, "%d order %u lowmem pages cached %lu total\n",
0fb9b815 291 pool->low_count, pool->order,
79240748 292 (PAGE_SIZE << pool->order) * pool->low_count);
45b17a80
RSZ
293 }
294 return 0;
295}
296
e7f63771
CF
297static void ion_system_heap_destroy_pools(struct ion_page_pool **pools)
298{
299 int i;
300
301 for (i = 0; i < NUM_ORDERS; i++)
302 if (pools[i])
303 ion_page_pool_destroy(pools[i]);
304}
305
306static int ion_system_heap_create_pools(struct ion_page_pool **pools,
307 bool cached)
308{
309 int i;
310 gfp_t gfp_flags = low_order_gfp_flags;
311
312 for (i = 0; i < NUM_ORDERS; i++) {
313 struct ion_page_pool *pool;
314
315 if (orders[i] > 4)
316 gfp_flags = high_order_gfp_flags;
317
318 pool = ion_page_pool_create(gfp_flags, orders[i], cached);
319 if (!pool)
320 goto err_create_pool;
321 pools[i] = pool;
322 }
323 return 0;
324
325err_create_pool:
326 ion_system_heap_destroy_pools(pools);
327 return -ENOMEM;
328}
329
c30707be
RSZ
330struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
331{
45b17a80 332 struct ion_system_heap *heap;
c30707be 333
e7f63771 334 heap = kzalloc(sizeof(*heap), GFP_KERNEL);
c30707be
RSZ
335 if (!heap)
336 return ERR_PTR(-ENOMEM);
45b17a80
RSZ
337 heap->heap.ops = &system_heap_ops;
338 heap->heap.type = ION_HEAP_TYPE_SYSTEM;
fe2faea7 339 heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
6944561e 340
e7f63771
CF
341 if (ion_system_heap_create_pools(heap->uncached_pools, false))
342 goto free_heap;
ee4a4986 343
e7f63771
CF
344 if (ion_system_heap_create_pools(heap->cached_pools, true))
345 goto destroy_uncached_pools;
ea313b5f 346
45b17a80
RSZ
347 heap->heap.debug_show = ion_system_heap_debug_show;
348 return &heap->heap;
79240748 349
e7f63771
CF
350destroy_uncached_pools:
351 ion_system_heap_destroy_pools(heap->uncached_pools);
352
353free_heap:
45b17a80
RSZ
354 kfree(heap);
355 return ERR_PTR(-ENOMEM);
c30707be
RSZ
356}
357
358void ion_system_heap_destroy(struct ion_heap *heap)
359{
45b17a80
RSZ
360 struct ion_system_heap *sys_heap = container_of(heap,
361 struct ion_system_heap,
362 heap);
363 int i;
364
e7f63771
CF
365 for (i = 0; i < NUM_ORDERS; i++) {
366 ion_page_pool_destroy(sys_heap->uncached_pools[i]);
367 ion_page_pool_destroy(sys_heap->cached_pools[i]);
368 }
45b17a80 369 kfree(sys_heap);
c30707be
RSZ
370}
371
372static int ion_system_contig_heap_allocate(struct ion_heap *heap,
373 struct ion_buffer *buffer,
374 unsigned long len,
375 unsigned long align,
376 unsigned long flags)
377{
c13d1df9 378 int order = get_order(len);
5c6a4705
CC
379 struct page *page;
380 struct sg_table *table;
381 unsigned long i;
382 int ret;
c13d1df9
CC
383
384 if (align > (PAGE_SIZE << order))
385 return -EINVAL;
386
5c6a4705
CC
387 page = alloc_pages(low_order_gfp_flags, order);
388 if (!page)
c30707be 389 return -ENOMEM;
5c6a4705
CC
390
391 split_page(page, order);
392
393 len = PAGE_ALIGN(len);
394 for (i = len >> PAGE_SHIFT; i < (1 << order); i++)
395 __free_page(page + i);
396
b6152016 397 table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
5c6a4705
CC
398 if (!table) {
399 ret = -ENOMEM;
79240748 400 goto free_pages;
5c6a4705
CC
401 }
402
403 ret = sg_alloc_table(table, 1, GFP_KERNEL);
404 if (ret)
79240748 405 goto free_table;
5c6a4705
CC
406
407 sg_set_page(table->sgl, page, len, 0);
408
f82ad60e 409 buffer->sg_table = table;
5c6a4705
CC
410
411 ion_pages_sync_for_device(NULL, page, len, DMA_BIDIRECTIONAL);
412
c30707be 413 return 0;
5c6a4705 414
79240748
HS
415free_table:
416 kfree(table);
417free_pages:
5c6a4705
CC
418 for (i = 0; i < len >> PAGE_SHIFT; i++)
419 __free_page(page + i);
79240748 420
5c6a4705 421 return ret;
c30707be
RSZ
422}
423
f63958d8 424static void ion_system_contig_heap_free(struct ion_buffer *buffer)
c30707be 425{
f82ad60e 426 struct sg_table *table = buffer->sg_table;
5c6a4705
CC
427 struct page *page = sg_page(table->sgl);
428 unsigned long pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT;
429 unsigned long i;
430
431 for (i = 0; i < pages; i++)
432 __free_page(page + i);
433 sg_free_table(table);
434 kfree(table);
c30707be
RSZ
435}
436
c30707be
RSZ
437static struct ion_heap_ops kmalloc_ops = {
438 .allocate = ion_system_contig_heap_allocate,
439 .free = ion_system_contig_heap_free,
8898227e
RSZ
440 .map_kernel = ion_heap_map_kernel,
441 .unmap_kernel = ion_heap_unmap_kernel,
a82130f4 442 .map_user = ion_heap_map_user,
c30707be
RSZ
443};
444
445struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused)
446{
447 struct ion_heap *heap;
448
449 heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
450 if (!heap)
451 return ERR_PTR(-ENOMEM);
452 heap->ops = &kmalloc_ops;
453 heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
454 return heap;
455}
456
457void ion_system_contig_heap_destroy(struct ion_heap *heap)
458{
459 kfree(heap);
460}
This page took 0.463014 seconds and 5 git commands to generate.