2 * Procedures for maintaining information about logical memory blocks.
4 * Peter Bergner, IBM Corp. June 2001.
5 * Copyright (C) 2001 Peter Bergner.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/bitops.h>
17 #include <linux/poison.h>
18 #include <linux/pfn.h>
19 #include <linux/debugfs.h>
20 #include <linux/seq_file.h>
21 #include <linux/memblock.h>
23 struct memblock memblock __initdata_memblock
;
25 int memblock_debug __initdata_memblock
;
26 int memblock_can_resize __initdata_memblock
;
27 static struct memblock_region memblock_memory_init_regions
[INIT_MEMBLOCK_REGIONS
+ 1] __initdata_memblock
;
28 static struct memblock_region memblock_reserved_init_regions
[INIT_MEMBLOCK_REGIONS
+ 1] __initdata_memblock
;
30 /* inline so we don't get a warning when pr_debug is compiled out */
31 static inline const char *memblock_type_name(struct memblock_type
*type
)
33 if (type
== &memblock
.memory
)
35 else if (type
== &memblock
.reserved
)
42 * Address comparison utilities
44 static unsigned long __init_memblock
memblock_addrs_overlap(phys_addr_t base1
, phys_addr_t size1
,
45 phys_addr_t base2
, phys_addr_t size2
)
47 return ((base1
< (base2
+ size2
)) && (base2
< (base1
+ size1
)));
50 long __init_memblock
memblock_overlaps_region(struct memblock_type
*type
, phys_addr_t base
, phys_addr_t size
)
54 for (i
= 0; i
< type
->cnt
; i
++) {
55 phys_addr_t rgnbase
= type
->regions
[i
].base
;
56 phys_addr_t rgnsize
= type
->regions
[i
].size
;
57 if (memblock_addrs_overlap(base
, size
, rgnbase
, rgnsize
))
61 return (i
< type
->cnt
) ? i
: -1;
65 * Find, allocate, deallocate or reserve unreserved regions. All allocations
69 static phys_addr_t __init_memblock
memblock_find_region(phys_addr_t start
, phys_addr_t end
,
70 phys_addr_t size
, phys_addr_t align
)
72 phys_addr_t base
, res_base
;
75 /* In case, huge size is requested */
79 base
= round_down(end
- size
, align
);
81 /* Prevent allocations returning 0 as it's also used to
82 * indicate an allocation failure
87 while (start
<= base
) {
88 j
= memblock_overlaps_region(&memblock
.reserved
, base
, size
);
91 res_base
= memblock
.reserved
.regions
[j
].base
;
94 base
= round_down(res_base
- size
, align
);
101 * Find a free area with specified alignment in a specific range.
103 phys_addr_t __init_memblock
memblock_find_in_range(phys_addr_t start
, phys_addr_t end
,
104 phys_addr_t size
, phys_addr_t align
)
110 /* Pump up max_addr */
111 if (end
== MEMBLOCK_ALLOC_ACCESSIBLE
)
112 end
= memblock
.current_limit
;
114 /* We do a top-down search, this tends to limit memory
115 * fragmentation by keeping early boot allocs near the
118 for (i
= memblock
.memory
.cnt
- 1; i
>= 0; i
--) {
119 phys_addr_t memblockbase
= memblock
.memory
.regions
[i
].base
;
120 phys_addr_t memblocksize
= memblock
.memory
.regions
[i
].size
;
121 phys_addr_t bottom
, top
, found
;
123 if (memblocksize
< size
)
125 if ((memblockbase
+ memblocksize
) <= start
)
127 bottom
= max(memblockbase
, start
);
128 top
= min(memblockbase
+ memblocksize
, end
);
131 found
= memblock_find_region(bottom
, top
, size
, align
);
139 * Free memblock.reserved.regions
141 int __init_memblock
memblock_free_reserved_regions(void)
143 if (memblock
.reserved
.regions
== memblock_reserved_init_regions
)
146 return memblock_free(__pa(memblock
.reserved
.regions
),
147 sizeof(struct memblock_region
) * memblock
.reserved
.max
);
151 * Reserve memblock.reserved.regions
153 int __init_memblock
memblock_reserve_reserved_regions(void)
155 if (memblock
.reserved
.regions
== memblock_reserved_init_regions
)
158 return memblock_reserve(__pa(memblock
.reserved
.regions
),
159 sizeof(struct memblock_region
) * memblock
.reserved
.max
);
162 static void __init_memblock
memblock_remove_region(struct memblock_type
*type
, unsigned long r
)
164 memmove(&type
->regions
[r
], &type
->regions
[r
+ 1],
165 (type
->cnt
- (r
+ 1)) * sizeof(type
->regions
[r
]));
168 /* Special case for empty arrays */
169 if (type
->cnt
== 0) {
171 type
->regions
[0].base
= 0;
172 type
->regions
[0].size
= 0;
173 memblock_set_region_node(&type
->regions
[0], MAX_NUMNODES
);
177 /* Defined below but needed now */
178 static long memblock_add_region(struct memblock_type
*type
, phys_addr_t base
, phys_addr_t size
);
180 static int __init_memblock
memblock_double_array(struct memblock_type
*type
)
182 struct memblock_region
*new_array
, *old_array
;
183 phys_addr_t old_size
, new_size
, addr
;
184 int use_slab
= slab_is_available();
186 /* We don't allow resizing until we know about the reserved regions
187 * of memory that aren't suitable for allocation
189 if (!memblock_can_resize
)
192 /* Calculate new doubled size */
193 old_size
= type
->max
* sizeof(struct memblock_region
);
194 new_size
= old_size
<< 1;
196 /* Try to find some space for it.
198 * WARNING: We assume that either slab_is_available() and we use it or
199 * we use MEMBLOCK for allocations. That means that this is unsafe to use
200 * when bootmem is currently active (unless bootmem itself is implemented
201 * on top of MEMBLOCK which isn't the case yet)
203 * This should however not be an issue for now, as we currently only
204 * call into MEMBLOCK while it's still active, or much later when slab is
205 * active for memory hotplug operations
208 new_array
= kmalloc(new_size
, GFP_KERNEL
);
209 addr
= new_array
? __pa(new_array
) : 0;
211 addr
= memblock_find_in_range(0, MEMBLOCK_ALLOC_ACCESSIBLE
, new_size
, sizeof(phys_addr_t
));
213 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
214 memblock_type_name(type
), type
->max
, type
->max
* 2);
217 new_array
= __va(addr
);
219 memblock_dbg("memblock: %s array is doubled to %ld at [%#010llx-%#010llx]",
220 memblock_type_name(type
), type
->max
* 2, (u64
)addr
, (u64
)addr
+ new_size
- 1);
222 /* Found space, we now need to move the array over before
223 * we add the reserved region since it may be our reserved
224 * array itself that is full.
226 memcpy(new_array
, type
->regions
, old_size
);
227 memset(new_array
+ type
->max
, 0, old_size
);
228 old_array
= type
->regions
;
229 type
->regions
= new_array
;
232 /* If we use SLAB that's it, we are done */
236 /* Add the new reserved region now. Should not fail ! */
237 BUG_ON(memblock_add_region(&memblock
.reserved
, addr
, new_size
));
239 /* If the array wasn't our static init one, then free it. We only do
240 * that before SLAB is available as later on, we don't know whether
241 * to use kfree or free_bootmem_pages(). Shouldn't be a big deal
244 if (old_array
!= memblock_memory_init_regions
&&
245 old_array
!= memblock_reserved_init_regions
)
246 memblock_free(__pa(old_array
), old_size
);
252 * memblock_merge_regions - merge neighboring compatible regions
253 * @type: memblock type to scan
255 * Scan @type and merge neighboring compatible regions.
257 static void __init_memblock
memblock_merge_regions(struct memblock_type
*type
)
261 /* cnt never goes below 1 */
262 while (i
< type
->cnt
- 1) {
263 struct memblock_region
*this = &type
->regions
[i
];
264 struct memblock_region
*next
= &type
->regions
[i
+ 1];
266 if (this->base
+ this->size
!= next
->base
||
267 memblock_get_region_node(this) !=
268 memblock_get_region_node(next
)) {
269 BUG_ON(this->base
+ this->size
> next
->base
);
274 this->size
+= next
->size
;
275 memmove(next
, next
+ 1, (type
->cnt
- (i
+ 1)) * sizeof(*next
));
281 * memblock_insert_region - insert new memblock region
282 * @type: memblock type to insert into
283 * @idx: index for the insertion point
284 * @base: base address of the new region
285 * @size: size of the new region
287 * Insert new memblock region [@base,@base+@size) into @type at @idx.
288 * @type must already have extra room to accomodate the new region.
290 static void __init_memblock
memblock_insert_region(struct memblock_type
*type
,
291 int idx
, phys_addr_t base
,
292 phys_addr_t size
, int nid
)
294 struct memblock_region
*rgn
= &type
->regions
[idx
];
296 BUG_ON(type
->cnt
>= type
->max
);
297 memmove(rgn
+ 1, rgn
, (type
->cnt
- idx
) * sizeof(*rgn
));
300 memblock_set_region_node(rgn
, nid
);
305 * memblock_add_region - add new memblock region
306 * @type: memblock type to add new region into
307 * @base: base address of the new region
308 * @size: size of the new region
310 * Add new memblock region [@base,@base+@size) into @type. The new region
311 * is allowed to overlap with existing ones - overlaps don't affect already
312 * existing regions. @type is guaranteed to be minimal (all neighbouring
313 * compatible regions are merged) after the addition.
316 * 0 on success, -errno on failure.
318 static long __init_memblock
memblock_add_region(struct memblock_type
*type
,
319 phys_addr_t base
, phys_addr_t size
)
322 phys_addr_t obase
= base
, end
= base
+ size
;
325 /* special case for empty array */
326 if (type
->regions
[0].size
== 0) {
327 WARN_ON(type
->cnt
!= 1);
328 type
->regions
[0].base
= base
;
329 type
->regions
[0].size
= size
;
330 memblock_set_region_node(&type
->regions
[0], MAX_NUMNODES
);
335 * The following is executed twice. Once with %false @insert and
336 * then with %true. The first counts the number of regions needed
337 * to accomodate the new area. The second actually inserts them.
342 for (i
= 0; i
< type
->cnt
; i
++) {
343 struct memblock_region
*rgn
= &type
->regions
[i
];
344 phys_addr_t rbase
= rgn
->base
;
345 phys_addr_t rend
= rbase
+ rgn
->size
;
352 * @rgn overlaps. If it separates the lower part of new
353 * area, insert that portion.
358 memblock_insert_region(type
, i
++, base
,
359 rbase
- base
, MAX_NUMNODES
);
361 /* area below @rend is dealt with, forget about it */
362 base
= min(rend
, end
);
365 /* insert the remaining portion */
369 memblock_insert_region(type
, i
, base
, end
- base
,
374 * If this was the first round, resize array and repeat for actual
375 * insertions; otherwise, merge and return.
378 while (type
->cnt
+ nr_new
> type
->max
)
379 if (memblock_double_array(type
) < 0)
384 memblock_merge_regions(type
);
389 long __init_memblock
memblock_add(phys_addr_t base
, phys_addr_t size
)
391 return memblock_add_region(&memblock
.memory
, base
, size
);
394 static long __init_memblock
__memblock_remove(struct memblock_type
*type
,
395 phys_addr_t base
, phys_addr_t size
)
397 phys_addr_t end
= base
+ size
;
400 /* Walk through the array for collisions */
401 for (i
= 0; i
< type
->cnt
; i
++) {
402 struct memblock_region
*rgn
= &type
->regions
[i
];
403 phys_addr_t rend
= rgn
->base
+ rgn
->size
;
405 /* Nothing more to do, exit */
406 if (rgn
->base
> end
|| rgn
->size
== 0)
409 /* If we fully enclose the block, drop it */
410 if (base
<= rgn
->base
&& end
>= rend
) {
411 memblock_remove_region(type
, i
--);
415 /* If we are fully enclosed within a block
416 * then we need to split it and we are done
418 if (base
> rgn
->base
&& end
< rend
) {
419 rgn
->size
= base
- rgn
->base
;
420 if (!memblock_add_region(type
, end
, rend
- end
))
422 /* Failure to split is bad, we at least
423 * restore the block before erroring
425 rgn
->size
= rend
- rgn
->base
;
430 /* Check if we need to trim the bottom of a block */
431 if (rgn
->base
< end
&& rend
> end
) {
432 rgn
->size
-= end
- rgn
->base
;
437 /* And check if we need to trim the top of a block */
439 rgn
->size
-= rend
- base
;
445 long __init_memblock
memblock_remove(phys_addr_t base
, phys_addr_t size
)
447 return __memblock_remove(&memblock
.memory
, base
, size
);
450 long __init_memblock
memblock_free(phys_addr_t base
, phys_addr_t size
)
452 memblock_dbg(" memblock_free: [%#016llx-%#016llx] %pF\n",
453 base
, base
+ size
, (void *)_RET_IP_
);
455 return __memblock_remove(&memblock
.reserved
, base
, size
);
458 long __init_memblock
memblock_reserve(phys_addr_t base
, phys_addr_t size
)
460 struct memblock_type
*_rgn
= &memblock
.reserved
;
462 memblock_dbg("memblock_reserve: [%#016llx-%#016llx] %pF\n",
463 base
, base
+ size
, (void *)_RET_IP_
);
466 return memblock_add_region(_rgn
, base
, size
);
470 * __next_free_mem_range - next function for for_each_free_mem_range()
471 * @idx: pointer to u64 loop variable
472 * @nid: nid: node selector, %MAX_NUMNODES for all nodes
473 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
474 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
475 * @p_nid: ptr to int for nid of the range, can be %NULL
477 * Find the first free area from *@idx which matches @nid, fill the out
478 * parameters, and update *@idx for the next iteration. The lower 32bit of
479 * *@idx contains index into memory region and the upper 32bit indexes the
480 * areas before each reserved region. For example, if reserved regions
481 * look like the following,
483 * 0:[0-16), 1:[32-48), 2:[128-130)
485 * The upper 32bit indexes the following regions.
487 * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX)
489 * As both region arrays are sorted, the function advances the two indices
490 * in lockstep and returns each intersection.
492 void __init_memblock
__next_free_mem_range(u64
*idx
, int nid
,
493 phys_addr_t
*out_start
,
494 phys_addr_t
*out_end
, int *out_nid
)
496 struct memblock_type
*mem
= &memblock
.memory
;
497 struct memblock_type
*rsv
= &memblock
.reserved
;
498 int mi
= *idx
& 0xffffffff;
501 for ( ; mi
< mem
->cnt
; mi
++) {
502 struct memblock_region
*m
= &mem
->regions
[mi
];
503 phys_addr_t m_start
= m
->base
;
504 phys_addr_t m_end
= m
->base
+ m
->size
;
506 /* only memory regions are associated with nodes, check it */
507 if (nid
!= MAX_NUMNODES
&& nid
!= memblock_get_region_node(m
))
510 /* scan areas before each reservation for intersection */
511 for ( ; ri
< rsv
->cnt
+ 1; ri
++) {
512 struct memblock_region
*r
= &rsv
->regions
[ri
];
513 phys_addr_t r_start
= ri
? r
[-1].base
+ r
[-1].size
: 0;
514 phys_addr_t r_end
= ri
< rsv
->cnt
? r
->base
: ULLONG_MAX
;
516 /* if ri advanced past mi, break out to advance mi */
517 if (r_start
>= m_end
)
519 /* if the two regions intersect, we're done */
520 if (m_start
< r_end
) {
522 *out_start
= max(m_start
, r_start
);
524 *out_end
= min(m_end
, r_end
);
526 *out_nid
= memblock_get_region_node(m
);
528 * The region which ends first is advanced
529 * for the next iteration.
535 *idx
= (u32
)mi
| (u64
)ri
<< 32;
541 /* signal end of iteration */
545 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
547 * Common iterator interface used to define for_each_mem_range().
549 void __init_memblock
__next_mem_pfn_range(int *idx
, int nid
,
550 unsigned long *out_start_pfn
,
551 unsigned long *out_end_pfn
, int *out_nid
)
553 struct memblock_type
*type
= &memblock
.memory
;
554 struct memblock_region
*r
;
556 while (++*idx
< type
->cnt
) {
557 r
= &type
->regions
[*idx
];
559 if (PFN_UP(r
->base
) >= PFN_DOWN(r
->base
+ r
->size
))
561 if (nid
== MAX_NUMNODES
|| nid
== r
->nid
)
564 if (*idx
>= type
->cnt
) {
570 *out_start_pfn
= PFN_UP(r
->base
);
572 *out_end_pfn
= PFN_DOWN(r
->base
+ r
->size
);
578 * memblock_set_node - set node ID on memblock regions
579 * @base: base of area to set node ID for
580 * @size: size of area to set node ID for
581 * @nid: node ID to set
583 * Set the nid of memblock memory regions in [@base,@base+@size) to @nid.
584 * Regions which cross the area boundaries are split as necessary.
587 * 0 on success, -errno on failure.
589 int __init_memblock
memblock_set_node(phys_addr_t base
, phys_addr_t size
,
592 struct memblock_type
*type
= &memblock
.memory
;
593 phys_addr_t end
= base
+ size
;
596 /* we'll create at most two more regions */
597 while (type
->cnt
+ 2 > type
->max
)
598 if (memblock_double_array(type
) < 0)
601 for (i
= 0; i
< type
->cnt
; i
++) {
602 struct memblock_region
*rgn
= &type
->regions
[i
];
603 phys_addr_t rbase
= rgn
->base
;
604 phys_addr_t rend
= rbase
+ rgn
->size
;
613 * @rgn intersects from below. Split and continue
614 * to process the next region - the new top half.
617 rgn
->size
= rend
- rgn
->base
;
618 memblock_insert_region(type
, i
, rbase
, base
- rbase
,
620 } else if (rend
> end
) {
622 * @rgn intersects from above. Split and redo the
623 * current region - the new bottom half.
626 rgn
->size
= rend
- rgn
->base
;
627 memblock_insert_region(type
, i
--, rbase
, end
- rbase
,
630 /* @rgn is fully contained, set ->nid */
635 memblock_merge_regions(type
);
638 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
640 phys_addr_t __init
__memblock_alloc_base(phys_addr_t size
, phys_addr_t align
, phys_addr_t max_addr
)
644 /* We align the size to limit fragmentation. Without this, a lot of
645 * small allocs quickly eat up the whole reserve array on sparc
647 size
= round_up(size
, align
);
649 found
= memblock_find_in_range(0, max_addr
, size
, align
);
650 if (found
&& !memblock_add_region(&memblock
.reserved
, found
, size
))
656 phys_addr_t __init
memblock_alloc_base(phys_addr_t size
, phys_addr_t align
, phys_addr_t max_addr
)
660 alloc
= __memblock_alloc_base(size
, align
, max_addr
);
663 panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
664 (unsigned long long) size
, (unsigned long long) max_addr
);
669 phys_addr_t __init
memblock_alloc(phys_addr_t size
, phys_addr_t align
)
671 return memblock_alloc_base(size
, align
, MEMBLOCK_ALLOC_ACCESSIBLE
);
676 * Additional node-local top-down allocators.
678 * WARNING: Only available after early_node_map[] has been populated,
679 * on some architectures, that is after all the calls to add_active_range()
680 * have been done to populate it.
683 static phys_addr_t __init
memblock_nid_range_rev(phys_addr_t start
,
684 phys_addr_t end
, int *nid
)
686 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
687 unsigned long start_pfn
, end_pfn
;
690 for_each_mem_pfn_range(i
, MAX_NUMNODES
, &start_pfn
, &end_pfn
, nid
)
691 if (end
> PFN_PHYS(start_pfn
) && end
<= PFN_PHYS(end_pfn
))
692 return max(start
, PFN_PHYS(start_pfn
));
698 phys_addr_t __init
memblock_find_in_range_node(phys_addr_t start
,
701 phys_addr_t align
, int nid
)
703 struct memblock_type
*mem
= &memblock
.memory
;
708 /* Pump up max_addr */
709 if (end
== MEMBLOCK_ALLOC_ACCESSIBLE
)
710 end
= memblock
.current_limit
;
712 for (i
= mem
->cnt
- 1; i
>= 0; i
--) {
713 struct memblock_region
*r
= &mem
->regions
[i
];
714 phys_addr_t base
= max(start
, r
->base
);
715 phys_addr_t top
= min(end
, r
->base
+ r
->size
);
718 phys_addr_t tbase
, ret
;
721 tbase
= memblock_nid_range_rev(base
, top
, &tnid
);
722 if (nid
== MAX_NUMNODES
|| tnid
== nid
) {
723 ret
= memblock_find_region(tbase
, top
, size
, align
);
734 phys_addr_t __init
memblock_alloc_nid(phys_addr_t size
, phys_addr_t align
, int nid
)
739 * We align the size to limit fragmentation. Without this, a lot of
740 * small allocs quickly eat up the whole reserve array on sparc
742 size
= round_up(size
, align
);
744 found
= memblock_find_in_range_node(0, MEMBLOCK_ALLOC_ACCESSIBLE
,
746 if (found
&& !memblock_add_region(&memblock
.reserved
, found
, size
))
752 phys_addr_t __init
memblock_alloc_try_nid(phys_addr_t size
, phys_addr_t align
, int nid
)
754 phys_addr_t res
= memblock_alloc_nid(size
, align
, nid
);
758 return memblock_alloc_base(size
, align
, MEMBLOCK_ALLOC_ACCESSIBLE
);
763 * Remaining API functions
766 /* You must call memblock_analyze() before this. */
767 phys_addr_t __init
memblock_phys_mem_size(void)
769 return memblock
.memory_size
;
772 phys_addr_t __init_memblock
memblock_end_of_DRAM(void)
774 int idx
= memblock
.memory
.cnt
- 1;
776 return (memblock
.memory
.regions
[idx
].base
+ memblock
.memory
.regions
[idx
].size
);
779 /* You must call memblock_analyze() after this. */
780 void __init
memblock_enforce_memory_limit(phys_addr_t memory_limit
)
784 struct memblock_region
*p
;
789 /* Truncate the memblock regions to satisfy the memory limit. */
790 limit
= memory_limit
;
791 for (i
= 0; i
< memblock
.memory
.cnt
; i
++) {
792 if (limit
> memblock
.memory
.regions
[i
].size
) {
793 limit
-= memblock
.memory
.regions
[i
].size
;
797 memblock
.memory
.regions
[i
].size
= limit
;
798 memblock
.memory
.cnt
= i
+ 1;
802 memory_limit
= memblock_end_of_DRAM();
804 /* And truncate any reserves above the limit also. */
805 for (i
= 0; i
< memblock
.reserved
.cnt
; i
++) {
806 p
= &memblock
.reserved
.regions
[i
];
808 if (p
->base
> memory_limit
)
810 else if ((p
->base
+ p
->size
) > memory_limit
)
811 p
->size
= memory_limit
- p
->base
;
814 memblock_remove_region(&memblock
.reserved
, i
);
820 static int __init_memblock
memblock_search(struct memblock_type
*type
, phys_addr_t addr
)
822 unsigned int left
= 0, right
= type
->cnt
;
825 unsigned int mid
= (right
+ left
) / 2;
827 if (addr
< type
->regions
[mid
].base
)
829 else if (addr
>= (type
->regions
[mid
].base
+
830 type
->regions
[mid
].size
))
834 } while (left
< right
);
838 int __init
memblock_is_reserved(phys_addr_t addr
)
840 return memblock_search(&memblock
.reserved
, addr
) != -1;
843 int __init_memblock
memblock_is_memory(phys_addr_t addr
)
845 return memblock_search(&memblock
.memory
, addr
) != -1;
848 int __init_memblock
memblock_is_region_memory(phys_addr_t base
, phys_addr_t size
)
850 int idx
= memblock_search(&memblock
.memory
, base
);
854 return memblock
.memory
.regions
[idx
].base
<= base
&&
855 (memblock
.memory
.regions
[idx
].base
+
856 memblock
.memory
.regions
[idx
].size
) >= (base
+ size
);
859 int __init_memblock
memblock_is_region_reserved(phys_addr_t base
, phys_addr_t size
)
861 return memblock_overlaps_region(&memblock
.reserved
, base
, size
) >= 0;
865 void __init_memblock
memblock_set_current_limit(phys_addr_t limit
)
867 memblock
.current_limit
= limit
;
870 static void __init_memblock
memblock_dump(struct memblock_type
*type
, char *name
)
872 unsigned long long base
, size
;
875 pr_info(" %s.cnt = 0x%lx\n", name
, type
->cnt
);
877 for (i
= 0; i
< type
->cnt
; i
++) {
878 struct memblock_region
*rgn
= &type
->regions
[i
];
879 char nid_buf
[32] = "";
883 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
884 if (memblock_get_region_node(rgn
) != MAX_NUMNODES
)
885 snprintf(nid_buf
, sizeof(nid_buf
), " on node %d",
886 memblock_get_region_node(rgn
));
888 pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes%s\n",
889 name
, i
, base
, base
+ size
- 1, size
, nid_buf
);
893 void __init_memblock
memblock_dump_all(void)
898 pr_info("MEMBLOCK configuration:\n");
899 pr_info(" memory size = 0x%llx\n", (unsigned long long)memblock
.memory_size
);
901 memblock_dump(&memblock
.memory
, "memory");
902 memblock_dump(&memblock
.reserved
, "reserved");
905 void __init
memblock_analyze(void)
909 /* Check marker in the unused last array entry */
910 WARN_ON(memblock_memory_init_regions
[INIT_MEMBLOCK_REGIONS
].base
911 != (phys_addr_t
)RED_INACTIVE
);
912 WARN_ON(memblock_reserved_init_regions
[INIT_MEMBLOCK_REGIONS
].base
913 != (phys_addr_t
)RED_INACTIVE
);
915 memblock
.memory_size
= 0;
917 for (i
= 0; i
< memblock
.memory
.cnt
; i
++)
918 memblock
.memory_size
+= memblock
.memory
.regions
[i
].size
;
920 /* We allow resizing from there */
921 memblock_can_resize
= 1;
924 void __init
memblock_init(void)
926 static int init_done __initdata
= 0;
932 /* Hookup the initial arrays */
933 memblock
.memory
.regions
= memblock_memory_init_regions
;
934 memblock
.memory
.max
= INIT_MEMBLOCK_REGIONS
;
935 memblock
.reserved
.regions
= memblock_reserved_init_regions
;
936 memblock
.reserved
.max
= INIT_MEMBLOCK_REGIONS
;
938 /* Write a marker in the unused last array entry */
939 memblock
.memory
.regions
[INIT_MEMBLOCK_REGIONS
].base
= (phys_addr_t
)RED_INACTIVE
;
940 memblock
.reserved
.regions
[INIT_MEMBLOCK_REGIONS
].base
= (phys_addr_t
)RED_INACTIVE
;
942 /* Create a dummy zero size MEMBLOCK which will get coalesced away later.
943 * This simplifies the memblock_add() code below...
945 memblock
.memory
.regions
[0].base
= 0;
946 memblock
.memory
.regions
[0].size
= 0;
947 memblock_set_region_node(&memblock
.memory
.regions
[0], MAX_NUMNODES
);
948 memblock
.memory
.cnt
= 1;
951 memblock
.reserved
.regions
[0].base
= 0;
952 memblock
.reserved
.regions
[0].size
= 0;
953 memblock_set_region_node(&memblock
.reserved
.regions
[0], MAX_NUMNODES
);
954 memblock
.reserved
.cnt
= 1;
956 memblock
.current_limit
= MEMBLOCK_ALLOC_ANYWHERE
;
959 static int __init
early_memblock(char *p
)
961 if (p
&& strstr(p
, "debug"))
965 early_param("memblock", early_memblock
);
967 #if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK)
969 static int memblock_debug_show(struct seq_file
*m
, void *private)
971 struct memblock_type
*type
= m
->private;
972 struct memblock_region
*reg
;
975 for (i
= 0; i
< type
->cnt
; i
++) {
976 reg
= &type
->regions
[i
];
977 seq_printf(m
, "%4d: ", i
);
978 if (sizeof(phys_addr_t
) == 4)
979 seq_printf(m
, "0x%08lx..0x%08lx\n",
980 (unsigned long)reg
->base
,
981 (unsigned long)(reg
->base
+ reg
->size
- 1));
983 seq_printf(m
, "0x%016llx..0x%016llx\n",
984 (unsigned long long)reg
->base
,
985 (unsigned long long)(reg
->base
+ reg
->size
- 1));
991 static int memblock_debug_open(struct inode
*inode
, struct file
*file
)
993 return single_open(file
, memblock_debug_show
, inode
->i_private
);
996 static const struct file_operations memblock_debug_fops
= {
997 .open
= memblock_debug_open
,
1000 .release
= single_release
,
1003 static int __init
memblock_init_debugfs(void)
1005 struct dentry
*root
= debugfs_create_dir("memblock", NULL
);
1008 debugfs_create_file("memory", S_IRUGO
, root
, &memblock
.memory
, &memblock_debug_fops
);
1009 debugfs_create_file("reserved", S_IRUGO
, root
, &memblock
.reserved
, &memblock_debug_fops
);
1013 __initcall(memblock_init_debugfs
);
1015 #endif /* CONFIG_DEBUG_FS */