Merge remote-tracking branch 'staging/staging-next'
[deliverable/linux.git] / drivers / staging / android / ion / ion_page_pool.c
1 /*
2 * drivers/staging/android/ion/ion_mem_pool.c
3 *
4 * Copyright (C) 2011 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17 #include <linux/debugfs.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/err.h>
20 #include <linux/fs.h>
21 #include <linux/list.h>
22 #include <linux/init.h>
23 #include <linux/slab.h>
24 #include <linux/swap.h>
25 #include "ion_priv.h"
26
27 static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
28 {
29 struct page *page = alloc_pages(pool->gfp_mask, pool->order);
30
31 if (!page)
32 return NULL;
33 if (!pool->cached)
34 ion_pages_sync_for_device(NULL, page, PAGE_SIZE << pool->order,
35 DMA_BIDIRECTIONAL);
36 return page;
37 }
38
39 static void ion_page_pool_free_pages(struct ion_page_pool *pool,
40 struct page *page)
41 {
42 __free_pages(page, pool->order);
43 }
44
45 static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page)
46 {
47 mutex_lock(&pool->mutex);
48 if (PageHighMem(page)) {
49 list_add_tail(&page->lru, &pool->high_items);
50 pool->high_count++;
51 } else {
52 list_add_tail(&page->lru, &pool->low_items);
53 pool->low_count++;
54 }
55 mutex_unlock(&pool->mutex);
56 return 0;
57 }
58
59 static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high)
60 {
61 struct page *page;
62
63 if (high) {
64 BUG_ON(!pool->high_count);
65 page = list_first_entry(&pool->high_items, struct page, lru);
66 pool->high_count--;
67 } else {
68 BUG_ON(!pool->low_count);
69 page = list_first_entry(&pool->low_items, struct page, lru);
70 pool->low_count--;
71 }
72
73 list_del(&page->lru);
74 return page;
75 }
76
77 struct page *ion_page_pool_alloc(struct ion_page_pool *pool)
78 {
79 struct page *page = NULL;
80
81 BUG_ON(!pool);
82
83 mutex_lock(&pool->mutex);
84 if (pool->high_count)
85 page = ion_page_pool_remove(pool, true);
86 else if (pool->low_count)
87 page = ion_page_pool_remove(pool, false);
88 mutex_unlock(&pool->mutex);
89
90 if (!page)
91 page = ion_page_pool_alloc_pages(pool);
92
93 return page;
94 }
95
96 void ion_page_pool_free(struct ion_page_pool *pool, struct page *page)
97 {
98 int ret;
99
100 BUG_ON(pool->order != compound_order(page));
101
102 ret = ion_page_pool_add(pool, page);
103 if (ret)
104 ion_page_pool_free_pages(pool, page);
105 }
106
107 static int ion_page_pool_total(struct ion_page_pool *pool, bool high)
108 {
109 int count = pool->low_count;
110
111 if (high)
112 count += pool->high_count;
113
114 return count << pool->order;
115 }
116
117 int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
118 int nr_to_scan)
119 {
120 int freed = 0;
121 bool high;
122
123 if (current_is_kswapd())
124 high = true;
125 else
126 high = !!(gfp_mask & __GFP_HIGHMEM);
127
128 if (nr_to_scan == 0)
129 return ion_page_pool_total(pool, high);
130
131 while (freed < nr_to_scan) {
132 struct page *page;
133
134 mutex_lock(&pool->mutex);
135 if (pool->low_count) {
136 page = ion_page_pool_remove(pool, false);
137 } else if (high && pool->high_count) {
138 page = ion_page_pool_remove(pool, true);
139 } else {
140 mutex_unlock(&pool->mutex);
141 break;
142 }
143 mutex_unlock(&pool->mutex);
144 ion_page_pool_free_pages(pool, page);
145 freed += (1 << pool->order);
146 }
147
148 return freed;
149 }
150
151 struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order,
152 bool cached)
153 {
154 struct ion_page_pool *pool = kmalloc(sizeof(*pool), GFP_KERNEL);
155
156 if (!pool)
157 return NULL;
158 pool->high_count = 0;
159 pool->low_count = 0;
160 INIT_LIST_HEAD(&pool->low_items);
161 INIT_LIST_HEAD(&pool->high_items);
162 pool->gfp_mask = gfp_mask | __GFP_COMP;
163 pool->order = order;
164 mutex_init(&pool->mutex);
165 plist_node_init(&pool->list, order);
166 if (cached)
167 pool->cached = true;
168
169 return pool;
170 }
171
172 void ion_page_pool_destroy(struct ion_page_pool *pool)
173 {
174 kfree(pool);
175 }
176
177 static int __init ion_page_pool_init(void)
178 {
179 return 0;
180 }
181 device_initcall(ion_page_pool_init);
This page took 0.036194 seconds and 5 git commands to generate.