2 * Contiguous Memory Allocator for DMA mapping framework
3 * Copyright (c) 2010-2011 by Samsung Electronics.
5 * Marek Szyprowski <m.szyprowski@samsung.com>
6 * Michal Nazarewicz <mina86@mina86.com>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of the
11 * License or (at your optional) any later version of the license.
14 #define pr_fmt(fmt) "cma: " fmt
16 #ifdef CONFIG_CMA_DEBUG
23 #include <asm/dma-contiguous.h>
25 #include <linux/memblock.h>
26 #include <linux/err.h>
28 #include <linux/mutex.h>
29 #include <linux/page-isolation.h>
30 #include <linux/sizes.h>
31 #include <linux/slab.h>
32 #include <linux/swap.h>
33 #include <linux/mm_types.h>
34 #include <linux/dma-contiguous.h>
35 #include <linux/log2.h>
38 unsigned long base_pfn
;
40 unsigned long *bitmap
;
44 struct cma
*dma_contiguous_default_area
;
46 #ifdef CONFIG_CMA_SIZE_MBYTES
47 #define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
49 #define CMA_SIZE_MBYTES 0
53 * Default global CMA area size can be defined in kernel's .config.
54 * This is useful mainly for distro maintainers to create a kernel
55 * that works correctly for most supported systems.
56 * The size can be set in bytes or as a percentage of the total memory
59 * Users, who want to set the size of global CMA area for their system
60 * should use cma= kernel parameter.
62 static const phys_addr_t size_bytes
= CMA_SIZE_MBYTES
* SZ_1M
;
63 static phys_addr_t size_cmdline
= -1;
64 static phys_addr_t base_cmdline
;
65 static phys_addr_t limit_cmdline
;
67 static int __init
early_cma(char *p
)
69 pr_debug("%s(%s)\n", __func__
, p
);
70 size_cmdline
= memparse(p
, &p
);
73 base_cmdline
= memparse(p
+ 1, &p
);
75 limit_cmdline
= base_cmdline
+ size_cmdline
;
78 limit_cmdline
= memparse(p
+ 1, &p
);
82 early_param("cma", early_cma
);
84 #ifdef CONFIG_CMA_SIZE_PERCENTAGE
86 static phys_addr_t __init __maybe_unused
cma_early_percent_memory(void)
88 struct memblock_region
*reg
;
89 unsigned long total_pages
= 0;
92 * We cannot use memblock_phys_mem_size() here, because
93 * memblock_analyze() has not been called yet.
95 for_each_memblock(memory
, reg
)
96 total_pages
+= memblock_region_memory_end_pfn(reg
) -
97 memblock_region_memory_base_pfn(reg
);
99 return (total_pages
* CONFIG_CMA_SIZE_PERCENTAGE
/ 100) << PAGE_SHIFT
;
104 static inline __maybe_unused phys_addr_t
cma_early_percent_memory(void)
112 * dma_contiguous_reserve() - reserve area(s) for contiguous memory handling
113 * @limit: End address of the reserved memory (optional, 0 for any).
115 * This function reserves memory from early allocator. It should be
116 * called by arch specific code once the early allocator (memblock or bootmem)
117 * has been activated and all other subsystems have already allocated/reserved
120 void __init
dma_contiguous_reserve(phys_addr_t limit
)
122 phys_addr_t selected_size
= 0;
123 phys_addr_t selected_base
= 0;
124 phys_addr_t selected_limit
= limit
;
127 pr_debug("%s(limit %08lx)\n", __func__
, (unsigned long)limit
);
129 if (size_cmdline
!= -1) {
130 selected_size
= size_cmdline
;
131 selected_base
= base_cmdline
;
132 selected_limit
= min_not_zero(limit_cmdline
, limit
);
133 if (base_cmdline
+ size_cmdline
== limit_cmdline
)
136 #ifdef CONFIG_CMA_SIZE_SEL_MBYTES
137 selected_size
= size_bytes
;
138 #elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE)
139 selected_size
= cma_early_percent_memory();
140 #elif defined(CONFIG_CMA_SIZE_SEL_MIN)
141 selected_size
= min(size_bytes
, cma_early_percent_memory());
142 #elif defined(CONFIG_CMA_SIZE_SEL_MAX)
143 selected_size
= max(size_bytes
, cma_early_percent_memory());
147 if (selected_size
&& !dma_contiguous_default_area
) {
148 pr_debug("%s: reserving %ld MiB for global area\n", __func__
,
149 (unsigned long)selected_size
/ SZ_1M
);
151 dma_contiguous_reserve_area(selected_size
, selected_base
,
153 &dma_contiguous_default_area
,
158 static DEFINE_MUTEX(cma_mutex
);
160 static int __init
cma_activate_area(struct cma
*cma
)
162 int bitmap_size
= BITS_TO_LONGS(cma
->count
) * sizeof(long);
163 unsigned long base_pfn
= cma
->base_pfn
, pfn
= base_pfn
;
164 unsigned i
= cma
->count
>> pageblock_order
;
167 cma
->bitmap
= kzalloc(bitmap_size
, GFP_KERNEL
);
172 WARN_ON_ONCE(!pfn_valid(pfn
));
173 zone
= page_zone(pfn_to_page(pfn
));
178 for (j
= pageblock_nr_pages
; j
; --j
, pfn
++) {
179 WARN_ON_ONCE(!pfn_valid(pfn
));
181 * alloc_contig_range requires the pfn range
182 * specified to be in the same zone. Make this
183 * simple by forcing the entire CMA resv range
184 * to be in the same zone.
186 if (page_zone(pfn_to_page(pfn
)) != zone
)
189 init_cma_reserved_pageblock(pfn_to_page(base_pfn
));
192 mutex_init(&cma
->lock
);
200 static struct cma cma_areas
[MAX_CMA_AREAS
];
201 static unsigned cma_area_count
;
203 static int __init
cma_init_reserved_areas(void)
207 for (i
= 0; i
< cma_area_count
; i
++) {
208 int ret
= cma_activate_area(&cma_areas
[i
]);
215 core_initcall(cma_init_reserved_areas
);
217 static int __init
__dma_contiguous_reserve_area(phys_addr_t size
,
218 phys_addr_t base
, phys_addr_t limit
,
219 phys_addr_t alignment
,
220 struct cma
**res_cma
, bool fixed
)
222 struct cma
*cma
= &cma_areas
[cma_area_count
];
225 pr_debug("%s(size %lx, base %08lx, limit %08lx alignment %08lx)\n",
226 __func__
, (unsigned long)size
, (unsigned long)base
,
227 (unsigned long)limit
, (unsigned long)alignment
);
229 if (cma_area_count
== ARRAY_SIZE(cma_areas
)) {
230 pr_err("Not enough slots for CMA reserved regions!\n");
237 if (alignment
&& !is_power_of_2(alignment
))
241 * Sanitise input arguments.
242 * Pages both ends in CMA area could be merged into adjacent unmovable
243 * migratetype page by page allocator's buddy algorithm. In the case,
244 * you couldn't get a contiguous memory, which is not what we want.
246 alignment
= max(alignment
,
247 (phys_addr_t
)PAGE_SIZE
<< max(MAX_ORDER
- 1, pageblock_order
));
248 base
= ALIGN(base
, alignment
);
249 size
= ALIGN(size
, alignment
);
250 limit
&= ~(alignment
- 1);
254 if (memblock_is_region_reserved(base
, size
) ||
255 memblock_reserve(base
, size
) < 0) {
260 phys_addr_t addr
= memblock_alloc_range(size
, alignment
, base
,
271 * Each reserved area must be initialised later, when more kernel
272 * subsystems (like slab allocator) are available.
274 cma
->base_pfn
= PFN_DOWN(base
);
275 cma
->count
= size
>> PAGE_SHIFT
;
279 pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size
/ SZ_1M
,
280 (unsigned long)base
);
284 pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size
/ SZ_1M
);
289 * dma_contiguous_reserve_area() - reserve custom contiguous area
290 * @size: Size of the reserved area (in bytes),
291 * @base: Base address of the reserved area optional, use 0 for any
292 * @limit: End address of the reserved memory (optional, 0 for any).
293 * @res_cma: Pointer to store the created cma region.
294 * @fixed: hint about where to place the reserved area
296 * This function reserves memory from early allocator. It should be
297 * called by arch specific code once the early allocator (memblock or bootmem)
298 * has been activated and all other subsystems have already allocated/reserved
299 * memory. This function allows to create custom reserved areas for specific
302 * If @fixed is true, reserve contiguous area at exactly @base. If false,
303 * reserve in range from @base to @limit.
305 int __init
dma_contiguous_reserve_area(phys_addr_t size
, phys_addr_t base
,
306 phys_addr_t limit
, struct cma
**res_cma
,
311 ret
= __dma_contiguous_reserve_area(size
, base
, limit
, 0,
316 /* Architecture specific contiguous memory fixup. */
317 dma_contiguous_early_fixup(PFN_PHYS((*res_cma
)->base_pfn
),
318 (*res_cma
)->count
<< PAGE_SHIFT
);
323 static void clear_cma_bitmap(struct cma
*cma
, unsigned long pfn
, int count
)
325 mutex_lock(&cma
->lock
);
326 bitmap_clear(cma
->bitmap
, pfn
- cma
->base_pfn
, count
);
327 mutex_unlock(&cma
->lock
);
330 static struct page
*__dma_alloc_from_contiguous(struct cma
*cma
, int count
,
333 unsigned long mask
, pfn
, pageno
, start
= 0;
334 struct page
*page
= NULL
;
337 if (!cma
|| !cma
->count
)
340 pr_debug("%s(cma %p, count %d, align %d)\n", __func__
, (void *)cma
,
346 mask
= (1 << align
) - 1;
350 mutex_lock(&cma
->lock
);
351 pageno
= bitmap_find_next_zero_area(cma
->bitmap
, cma
->count
,
353 if (pageno
>= cma
->count
) {
354 mutex_unlock(&cma
->lock
);
357 bitmap_set(cma
->bitmap
, pageno
, count
);
359 * It's safe to drop the lock here. We've marked this region for
360 * our exclusive use. If the migration fails we will take the
361 * lock again and unmark it.
363 mutex_unlock(&cma
->lock
);
365 pfn
= cma
->base_pfn
+ pageno
;
366 mutex_lock(&cma_mutex
);
367 ret
= alloc_contig_range(pfn
, pfn
+ count
, MIGRATE_CMA
);
368 mutex_unlock(&cma_mutex
);
370 page
= pfn_to_page(pfn
);
372 } else if (ret
!= -EBUSY
) {
373 clear_cma_bitmap(cma
, pfn
, count
);
376 clear_cma_bitmap(cma
, pfn
, count
);
377 pr_debug("%s(): memory range at %p is busy, retrying\n",
378 __func__
, pfn_to_page(pfn
));
379 /* try again with a bit different memory target */
380 start
= pageno
+ mask
+ 1;
383 pr_debug("%s(): returned %p\n", __func__
, page
);
388 * dma_alloc_from_contiguous() - allocate pages from contiguous area
389 * @dev: Pointer to device for which the allocation is performed.
390 * @count: Requested number of pages.
391 * @align: Requested alignment of pages (in PAGE_SIZE order).
393 * This function allocates memory buffer for specified device. It uses
394 * device specific contiguous memory area if available or the default
395 * global one. Requires architecture specific dev_get_cma_area() helper
398 struct page
*dma_alloc_from_contiguous(struct device
*dev
, int count
,
401 struct cma
*cma
= dev_get_cma_area(dev
);
403 if (align
> CONFIG_CMA_ALIGNMENT
)
404 align
= CONFIG_CMA_ALIGNMENT
;
406 return __dma_alloc_from_contiguous(cma
, count
, align
);
409 static bool __dma_release_from_contiguous(struct cma
*cma
, struct page
*pages
,
417 pr_debug("%s(page %p)\n", __func__
, (void *)pages
);
419 pfn
= page_to_pfn(pages
);
421 if (pfn
< cma
->base_pfn
|| pfn
>= cma
->base_pfn
+ cma
->count
)
424 VM_BUG_ON(pfn
+ count
> cma
->base_pfn
+ cma
->count
);
426 free_contig_range(pfn
, count
);
427 clear_cma_bitmap(cma
, pfn
, count
);
433 * dma_release_from_contiguous() - release allocated pages
434 * @dev: Pointer to device for which the pages were allocated.
435 * @pages: Allocated pages.
436 * @count: Number of allocated pages.
438 * This function releases memory allocated by dma_alloc_from_contiguous().
439 * It returns false when provided pages do not belong to contiguous area and
442 bool dma_release_from_contiguous(struct device
*dev
, struct page
*pages
,
445 struct cma
*cma
= dev_get_cma_area(dev
);
447 return __dma_release_from_contiguous(cma
, pages
, count
);