5f62c284072c8dac10d6b76ef290d468cf444866
[deliverable/linux.git] / drivers / base / dma-contiguous.c
1 /*
2 * Contiguous Memory Allocator for DMA mapping framework
3 * Copyright (c) 2010-2011 by Samsung Electronics.
4 * Written by:
5 * Marek Szyprowski <m.szyprowski@samsung.com>
6 * Michal Nazarewicz <mina86@mina86.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of the
11 * License or (at your optional) any later version of the license.
12 */
13
14 #define pr_fmt(fmt) "cma: " fmt
15
16 #ifdef CONFIG_CMA_DEBUG
17 #ifndef DEBUG
18 # define DEBUG
19 #endif
20 #endif
21
22 #include <asm/page.h>
23 #include <asm/dma-contiguous.h>
24
25 #include <linux/memblock.h>
26 #include <linux/err.h>
27 #include <linux/mm.h>
28 #include <linux/mutex.h>
29 #include <linux/page-isolation.h>
30 #include <linux/sizes.h>
31 #include <linux/slab.h>
32 #include <linux/swap.h>
33 #include <linux/mm_types.h>
34 #include <linux/dma-contiguous.h>
35 #include <linux/log2.h>
36
37 struct cma {
38 unsigned long base_pfn;
39 unsigned long count;
40 unsigned long *bitmap;
41 struct mutex lock;
42 };
43
44 struct cma *dma_contiguous_default_area;
45
46 #ifdef CONFIG_CMA_SIZE_MBYTES
47 #define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
48 #else
49 #define CMA_SIZE_MBYTES 0
50 #endif
51
52 /*
53 * Default global CMA area size can be defined in kernel's .config.
54 * This is useful mainly for distro maintainers to create a kernel
55 * that works correctly for most supported systems.
56 * The size can be set in bytes or as a percentage of the total memory
57 * in the system.
58 *
59 * Users, who want to set the size of global CMA area for their system
60 * should use cma= kernel parameter.
61 */
62 static const phys_addr_t size_bytes = CMA_SIZE_MBYTES * SZ_1M;
63 static phys_addr_t size_cmdline = -1;
64 static phys_addr_t base_cmdline;
65 static phys_addr_t limit_cmdline;
66
67 static int __init early_cma(char *p)
68 {
69 pr_debug("%s(%s)\n", __func__, p);
70 size_cmdline = memparse(p, &p);
71 if (*p != '@')
72 return 0;
73 base_cmdline = memparse(p + 1, &p);
74 if (*p != '-') {
75 limit_cmdline = base_cmdline + size_cmdline;
76 return 0;
77 }
78 limit_cmdline = memparse(p + 1, &p);
79
80 return 0;
81 }
82 early_param("cma", early_cma);
83
84 #ifdef CONFIG_CMA_SIZE_PERCENTAGE
85
86 static phys_addr_t __init __maybe_unused cma_early_percent_memory(void)
87 {
88 struct memblock_region *reg;
89 unsigned long total_pages = 0;
90
91 /*
92 * We cannot use memblock_phys_mem_size() here, because
93 * memblock_analyze() has not been called yet.
94 */
95 for_each_memblock(memory, reg)
96 total_pages += memblock_region_memory_end_pfn(reg) -
97 memblock_region_memory_base_pfn(reg);
98
99 return (total_pages * CONFIG_CMA_SIZE_PERCENTAGE / 100) << PAGE_SHIFT;
100 }
101
102 #else
103
104 static inline __maybe_unused phys_addr_t cma_early_percent_memory(void)
105 {
106 return 0;
107 }
108
109 #endif
110
111 /**
112 * dma_contiguous_reserve() - reserve area(s) for contiguous memory handling
113 * @limit: End address of the reserved memory (optional, 0 for any).
114 *
115 * This function reserves memory from early allocator. It should be
116 * called by arch specific code once the early allocator (memblock or bootmem)
117 * has been activated and all other subsystems have already allocated/reserved
118 * memory.
119 */
120 void __init dma_contiguous_reserve(phys_addr_t limit)
121 {
122 phys_addr_t selected_size = 0;
123 phys_addr_t selected_base = 0;
124 phys_addr_t selected_limit = limit;
125 bool fixed = false;
126
127 pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
128
129 if (size_cmdline != -1) {
130 selected_size = size_cmdline;
131 selected_base = base_cmdline;
132 selected_limit = min_not_zero(limit_cmdline, limit);
133 if (base_cmdline + size_cmdline == limit_cmdline)
134 fixed = true;
135 } else {
136 #ifdef CONFIG_CMA_SIZE_SEL_MBYTES
137 selected_size = size_bytes;
138 #elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE)
139 selected_size = cma_early_percent_memory();
140 #elif defined(CONFIG_CMA_SIZE_SEL_MIN)
141 selected_size = min(size_bytes, cma_early_percent_memory());
142 #elif defined(CONFIG_CMA_SIZE_SEL_MAX)
143 selected_size = max(size_bytes, cma_early_percent_memory());
144 #endif
145 }
146
147 if (selected_size && !dma_contiguous_default_area) {
148 pr_debug("%s: reserving %ld MiB for global area\n", __func__,
149 (unsigned long)selected_size / SZ_1M);
150
151 dma_contiguous_reserve_area(selected_size, selected_base,
152 selected_limit,
153 &dma_contiguous_default_area,
154 fixed);
155 }
156 }
157
158 static DEFINE_MUTEX(cma_mutex);
159
160 static int __init cma_activate_area(struct cma *cma)
161 {
162 int bitmap_size = BITS_TO_LONGS(cma->count) * sizeof(long);
163 unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
164 unsigned i = cma->count >> pageblock_order;
165 struct zone *zone;
166
167 cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
168
169 if (!cma->bitmap)
170 return -ENOMEM;
171
172 WARN_ON_ONCE(!pfn_valid(pfn));
173 zone = page_zone(pfn_to_page(pfn));
174
175 do {
176 unsigned j;
177 base_pfn = pfn;
178 for (j = pageblock_nr_pages; j; --j, pfn++) {
179 WARN_ON_ONCE(!pfn_valid(pfn));
180 /*
181 * alloc_contig_range requires the pfn range
182 * specified to be in the same zone. Make this
183 * simple by forcing the entire CMA resv range
184 * to be in the same zone.
185 */
186 if (page_zone(pfn_to_page(pfn)) != zone)
187 goto err;
188 }
189 init_cma_reserved_pageblock(pfn_to_page(base_pfn));
190 } while (--i);
191
192 mutex_init(&cma->lock);
193 return 0;
194
195 err:
196 kfree(cma->bitmap);
197 return -EINVAL;
198 }
199
200 static struct cma cma_areas[MAX_CMA_AREAS];
201 static unsigned cma_area_count;
202
203 static int __init cma_init_reserved_areas(void)
204 {
205 int i;
206
207 for (i = 0; i < cma_area_count; i++) {
208 int ret = cma_activate_area(&cma_areas[i]);
209 if (ret)
210 return ret;
211 }
212
213 return 0;
214 }
215 core_initcall(cma_init_reserved_areas);
216
217 static int __init __dma_contiguous_reserve_area(phys_addr_t size,
218 phys_addr_t base, phys_addr_t limit,
219 phys_addr_t alignment,
220 struct cma **res_cma, bool fixed)
221 {
222 struct cma *cma = &cma_areas[cma_area_count];
223 int ret = 0;
224
225 pr_debug("%s(size %lx, base %08lx, limit %08lx alignment %08lx)\n",
226 __func__, (unsigned long)size, (unsigned long)base,
227 (unsigned long)limit, (unsigned long)alignment);
228
229 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
230 pr_err("Not enough slots for CMA reserved regions!\n");
231 return -ENOSPC;
232 }
233
234 if (!size)
235 return -EINVAL;
236
237 if (alignment && !is_power_of_2(alignment))
238 return -EINVAL;
239
240 /*
241 * Sanitise input arguments.
242 * Pages both ends in CMA area could be merged into adjacent unmovable
243 * migratetype page by page allocator's buddy algorithm. In the case,
244 * you couldn't get a contiguous memory, which is not what we want.
245 */
246 alignment = max(alignment,
247 (phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order));
248 base = ALIGN(base, alignment);
249 size = ALIGN(size, alignment);
250 limit &= ~(alignment - 1);
251
252 /* Reserve memory */
253 if (base && fixed) {
254 if (memblock_is_region_reserved(base, size) ||
255 memblock_reserve(base, size) < 0) {
256 ret = -EBUSY;
257 goto err;
258 }
259 } else {
260 phys_addr_t addr = memblock_alloc_range(size, alignment, base,
261 limit);
262 if (!addr) {
263 ret = -ENOMEM;
264 goto err;
265 } else {
266 base = addr;
267 }
268 }
269
270 /*
271 * Each reserved area must be initialised later, when more kernel
272 * subsystems (like slab allocator) are available.
273 */
274 cma->base_pfn = PFN_DOWN(base);
275 cma->count = size >> PAGE_SHIFT;
276 *res_cma = cma;
277 cma_area_count++;
278
279 pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M,
280 (unsigned long)base);
281 return 0;
282
283 err:
284 pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
285 return ret;
286 }
287
288 /**
289 * dma_contiguous_reserve_area() - reserve custom contiguous area
290 * @size: Size of the reserved area (in bytes),
291 * @base: Base address of the reserved area optional, use 0 for any
292 * @limit: End address of the reserved memory (optional, 0 for any).
293 * @res_cma: Pointer to store the created cma region.
294 * @fixed: hint about where to place the reserved area
295 *
296 * This function reserves memory from early allocator. It should be
297 * called by arch specific code once the early allocator (memblock or bootmem)
298 * has been activated and all other subsystems have already allocated/reserved
299 * memory. This function allows to create custom reserved areas for specific
300 * devices.
301 *
302 * If @fixed is true, reserve contiguous area at exactly @base. If false,
303 * reserve in range from @base to @limit.
304 */
305 int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
306 phys_addr_t limit, struct cma **res_cma,
307 bool fixed)
308 {
309 int ret;
310
311 ret = __dma_contiguous_reserve_area(size, base, limit, 0,
312 res_cma, fixed);
313 if (ret)
314 return ret;
315
316 /* Architecture specific contiguous memory fixup. */
317 dma_contiguous_early_fixup(PFN_PHYS((*res_cma)->base_pfn),
318 (*res_cma)->count << PAGE_SHIFT);
319
320 return 0;
321 }
322
323 static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count)
324 {
325 mutex_lock(&cma->lock);
326 bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count);
327 mutex_unlock(&cma->lock);
328 }
329
330 static struct page *__dma_alloc_from_contiguous(struct cma *cma, int count,
331 unsigned int align)
332 {
333 unsigned long mask, pfn, pageno, start = 0;
334 struct page *page = NULL;
335 int ret;
336
337 if (!cma || !cma->count)
338 return NULL;
339
340 pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
341 count, align);
342
343 if (!count)
344 return NULL;
345
346 mask = (1 << align) - 1;
347
348
349 for (;;) {
350 mutex_lock(&cma->lock);
351 pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
352 start, count, mask);
353 if (pageno >= cma->count) {
354 mutex_unlock(&cma->lock);
355 break;
356 }
357 bitmap_set(cma->bitmap, pageno, count);
358 /*
359 * It's safe to drop the lock here. We've marked this region for
360 * our exclusive use. If the migration fails we will take the
361 * lock again and unmark it.
362 */
363 mutex_unlock(&cma->lock);
364
365 pfn = cma->base_pfn + pageno;
366 mutex_lock(&cma_mutex);
367 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
368 mutex_unlock(&cma_mutex);
369 if (ret == 0) {
370 page = pfn_to_page(pfn);
371 break;
372 } else if (ret != -EBUSY) {
373 clear_cma_bitmap(cma, pfn, count);
374 break;
375 }
376 clear_cma_bitmap(cma, pfn, count);
377 pr_debug("%s(): memory range at %p is busy, retrying\n",
378 __func__, pfn_to_page(pfn));
379 /* try again with a bit different memory target */
380 start = pageno + mask + 1;
381 }
382
383 pr_debug("%s(): returned %p\n", __func__, page);
384 return page;
385 }
386
387 /**
388 * dma_alloc_from_contiguous() - allocate pages from contiguous area
389 * @dev: Pointer to device for which the allocation is performed.
390 * @count: Requested number of pages.
391 * @align: Requested alignment of pages (in PAGE_SIZE order).
392 *
393 * This function allocates memory buffer for specified device. It uses
394 * device specific contiguous memory area if available or the default
395 * global one. Requires architecture specific dev_get_cma_area() helper
396 * function.
397 */
398 struct page *dma_alloc_from_contiguous(struct device *dev, int count,
399 unsigned int align)
400 {
401 struct cma *cma = dev_get_cma_area(dev);
402
403 if (align > CONFIG_CMA_ALIGNMENT)
404 align = CONFIG_CMA_ALIGNMENT;
405
406 return __dma_alloc_from_contiguous(cma, count, align);
407 }
408
409 static bool __dma_release_from_contiguous(struct cma *cma, struct page *pages,
410 int count)
411 {
412 unsigned long pfn;
413
414 if (!cma || !pages)
415 return false;
416
417 pr_debug("%s(page %p)\n", __func__, (void *)pages);
418
419 pfn = page_to_pfn(pages);
420
421 if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
422 return false;
423
424 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
425
426 free_contig_range(pfn, count);
427 clear_cma_bitmap(cma, pfn, count);
428
429 return true;
430 }
431
432 /**
433 * dma_release_from_contiguous() - release allocated pages
434 * @dev: Pointer to device for which the pages were allocated.
435 * @pages: Allocated pages.
436 * @count: Number of allocated pages.
437 *
438 * This function releases memory allocated by dma_alloc_from_contiguous().
439 * It returns false when provided pages do not belong to contiguous area and
440 * true otherwise.
441 */
442 bool dma_release_from_contiguous(struct device *dev, struct page *pages,
443 int count)
444 {
445 struct cma *cma = dev_get_cma_area(dev);
446
447 return __dma_release_from_contiguous(cma, pages, count);
448 }
This page took 0.049101 seconds and 4 git commands to generate.