memblock: s/memblock_analyze()/memblock_allow_resize()/ and update users
[deliverable/linux.git] / mm / memblock.c
CommitLineData
95f72d1e
YL
1/*
2 * Procedures for maintaining information about logical memory blocks.
3 *
4 * Peter Bergner, IBM Corp. June 2001.
5 * Copyright (C) 2001 Peter Bergner.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/kernel.h>
142b45a7 14#include <linux/slab.h>
95f72d1e
YL
15#include <linux/init.h>
16#include <linux/bitops.h>
449e8df3 17#include <linux/poison.h>
c196f76f 18#include <linux/pfn.h>
6d03b885
BH
19#include <linux/debugfs.h>
20#include <linux/seq_file.h>
95f72d1e
YL
21#include <linux/memblock.h>
22
fe091c20
TH
23static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
24static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
25
26struct memblock memblock __initdata_memblock = {
27 .memory.regions = memblock_memory_init_regions,
28 .memory.cnt = 1, /* empty dummy entry */
29 .memory.max = INIT_MEMBLOCK_REGIONS,
30
31 .reserved.regions = memblock_reserved_init_regions,
32 .reserved.cnt = 1, /* empty dummy entry */
33 .reserved.max = INIT_MEMBLOCK_REGIONS,
34
35 .current_limit = MEMBLOCK_ALLOC_ANYWHERE,
36};
95f72d1e 37
10d06439 38int memblock_debug __initdata_memblock;
1aadc056 39static int memblock_can_resize __initdata_memblock;
95f72d1e 40
142b45a7
BH
41/* inline so we don't get a warning when pr_debug is compiled out */
42static inline const char *memblock_type_name(struct memblock_type *type)
43{
44 if (type == &memblock.memory)
45 return "memory";
46 else if (type == &memblock.reserved)
47 return "reserved";
48 else
49 return "unknown";
50}
51
eb18f1b5
TH
52/* adjust *@size so that (@base + *@size) doesn't overflow, return new size */
53static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
54{
55 return *size = min(*size, (phys_addr_t)ULLONG_MAX - base);
56}
57
6ed311b2
BH
58/*
59 * Address comparison utilities
60 */
10d06439 61static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
2898cc4c 62 phys_addr_t base2, phys_addr_t size2)
95f72d1e
YL
63{
64 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
65}
66
2d7d3eb2
HS
67static long __init_memblock memblock_overlaps_region(struct memblock_type *type,
68 phys_addr_t base, phys_addr_t size)
6ed311b2
BH
69{
70 unsigned long i;
71
72 for (i = 0; i < type->cnt; i++) {
73 phys_addr_t rgnbase = type->regions[i].base;
74 phys_addr_t rgnsize = type->regions[i].size;
75 if (memblock_addrs_overlap(base, size, rgnbase, rgnsize))
76 break;
77 }
78
79 return (i < type->cnt) ? i : -1;
80}
81
82/*
83 * Find, allocate, deallocate or reserve unreserved regions. All allocations
84 * are top-down.
85 */
86
cd79481d 87static phys_addr_t __init_memblock memblock_find_region(phys_addr_t start, phys_addr_t end,
6ed311b2
BH
88 phys_addr_t size, phys_addr_t align)
89{
90 phys_addr_t base, res_base;
91 long j;
92
f1af98c7
YL
93 /* In case, huge size is requested */
94 if (end < size)
1f5026a7 95 return 0;
f1af98c7 96
348968eb 97 base = round_down(end - size, align);
f1af98c7 98
25818f0f
BH
99 /* Prevent allocations returning 0 as it's also used to
100 * indicate an allocation failure
101 */
102 if (start == 0)
103 start = PAGE_SIZE;
104
6ed311b2
BH
105 while (start <= base) {
106 j = memblock_overlaps_region(&memblock.reserved, base, size);
107 if (j < 0)
108 return base;
109 res_base = memblock.reserved.regions[j].base;
110 if (res_base < size)
111 break;
348968eb 112 base = round_down(res_base - size, align);
6ed311b2
BH
113 }
114
1f5026a7 115 return 0;
6ed311b2
BH
116}
117
fc769a8e
TH
118/*
119 * Find a free area with specified alignment in a specific range.
120 */
121phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start, phys_addr_t end,
122 phys_addr_t size, phys_addr_t align)
6ed311b2
BH
123{
124 long i;
6ed311b2
BH
125
126 BUG_ON(0 == size);
127
6ed311b2 128 /* Pump up max_addr */
fef501d4
BH
129 if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
130 end = memblock.current_limit;
6ed311b2
BH
131
132 /* We do a top-down search, this tends to limit memory
133 * fragmentation by keeping early boot allocs near the
134 * top of memory
135 */
136 for (i = memblock.memory.cnt - 1; i >= 0; i--) {
137 phys_addr_t memblockbase = memblock.memory.regions[i].base;
138 phys_addr_t memblocksize = memblock.memory.regions[i].size;
fef501d4 139 phys_addr_t bottom, top, found;
6ed311b2
BH
140
141 if (memblocksize < size)
142 continue;
fef501d4
BH
143 if ((memblockbase + memblocksize) <= start)
144 break;
145 bottom = max(memblockbase, start);
146 top = min(memblockbase + memblocksize, end);
147 if (bottom >= top)
148 continue;
149 found = memblock_find_region(bottom, top, size, align);
1f5026a7 150 if (found)
fef501d4 151 return found;
6ed311b2 152 }
1f5026a7 153 return 0;
6ed311b2
BH
154}
155
7950c407
YL
156/*
157 * Free memblock.reserved.regions
158 */
159int __init_memblock memblock_free_reserved_regions(void)
160{
161 if (memblock.reserved.regions == memblock_reserved_init_regions)
162 return 0;
163
164 return memblock_free(__pa(memblock.reserved.regions),
165 sizeof(struct memblock_region) * memblock.reserved.max);
166}
167
168/*
169 * Reserve memblock.reserved.regions
170 */
171int __init_memblock memblock_reserve_reserved_regions(void)
172{
173 if (memblock.reserved.regions == memblock_reserved_init_regions)
174 return 0;
175
176 return memblock_reserve(__pa(memblock.reserved.regions),
177 sizeof(struct memblock_region) * memblock.reserved.max);
178}
179
10d06439 180static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
95f72d1e 181{
1440c4e2 182 type->total_size -= type->regions[r].size;
7c0caeb8
TH
183 memmove(&type->regions[r], &type->regions[r + 1],
184 (type->cnt - (r + 1)) * sizeof(type->regions[r]));
e3239ff9 185 type->cnt--;
95f72d1e 186
8f7a6605
BH
187 /* Special case for empty arrays */
188 if (type->cnt == 0) {
1440c4e2 189 WARN_ON(type->total_size != 0);
8f7a6605
BH
190 type->cnt = 1;
191 type->regions[0].base = 0;
192 type->regions[0].size = 0;
7c0caeb8 193 memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
8f7a6605 194 }
95f72d1e
YL
195}
196
10d06439 197static int __init_memblock memblock_double_array(struct memblock_type *type)
142b45a7
BH
198{
199 struct memblock_region *new_array, *old_array;
200 phys_addr_t old_size, new_size, addr;
201 int use_slab = slab_is_available();
202
203 /* We don't allow resizing until we know about the reserved regions
204 * of memory that aren't suitable for allocation
205 */
206 if (!memblock_can_resize)
207 return -1;
208
142b45a7
BH
209 /* Calculate new doubled size */
210 old_size = type->max * sizeof(struct memblock_region);
211 new_size = old_size << 1;
212
213 /* Try to find some space for it.
214 *
215 * WARNING: We assume that either slab_is_available() and we use it or
216 * we use MEMBLOCK for allocations. That means that this is unsafe to use
217 * when bootmem is currently active (unless bootmem itself is implemented
218 * on top of MEMBLOCK which isn't the case yet)
219 *
220 * This should however not be an issue for now, as we currently only
221 * call into MEMBLOCK while it's still active, or much later when slab is
222 * active for memory hotplug operations
223 */
224 if (use_slab) {
225 new_array = kmalloc(new_size, GFP_KERNEL);
1f5026a7 226 addr = new_array ? __pa(new_array) : 0;
142b45a7 227 } else
fc769a8e 228 addr = memblock_find_in_range(0, MEMBLOCK_ALLOC_ACCESSIBLE, new_size, sizeof(phys_addr_t));
1f5026a7 229 if (!addr) {
142b45a7
BH
230 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
231 memblock_type_name(type), type->max, type->max * 2);
232 return -1;
233 }
234 new_array = __va(addr);
235
ea9e4376
YL
236 memblock_dbg("memblock: %s array is doubled to %ld at [%#010llx-%#010llx]",
237 memblock_type_name(type), type->max * 2, (u64)addr, (u64)addr + new_size - 1);
238
142b45a7
BH
239 /* Found space, we now need to move the array over before
240 * we add the reserved region since it may be our reserved
241 * array itself that is full.
242 */
243 memcpy(new_array, type->regions, old_size);
244 memset(new_array + type->max, 0, old_size);
245 old_array = type->regions;
246 type->regions = new_array;
247 type->max <<= 1;
248
249 /* If we use SLAB that's it, we are done */
250 if (use_slab)
251 return 0;
252
253 /* Add the new reserved region now. Should not fail ! */
9c8c27e2 254 BUG_ON(memblock_reserve(addr, new_size));
142b45a7
BH
255
256 /* If the array wasn't our static init one, then free it. We only do
257 * that before SLAB is available as later on, we don't know whether
258 * to use kfree or free_bootmem_pages(). Shouldn't be a big deal
259 * anyways
260 */
261 if (old_array != memblock_memory_init_regions &&
262 old_array != memblock_reserved_init_regions)
263 memblock_free(__pa(old_array), old_size);
264
265 return 0;
266}
267
784656f9
TH
268/**
269 * memblock_merge_regions - merge neighboring compatible regions
270 * @type: memblock type to scan
271 *
272 * Scan @type and merge neighboring compatible regions.
273 */
274static void __init_memblock memblock_merge_regions(struct memblock_type *type)
95f72d1e 275{
784656f9 276 int i = 0;
95f72d1e 277
784656f9
TH
278 /* cnt never goes below 1 */
279 while (i < type->cnt - 1) {
280 struct memblock_region *this = &type->regions[i];
281 struct memblock_region *next = &type->regions[i + 1];
95f72d1e 282
7c0caeb8
TH
283 if (this->base + this->size != next->base ||
284 memblock_get_region_node(this) !=
285 memblock_get_region_node(next)) {
784656f9
TH
286 BUG_ON(this->base + this->size > next->base);
287 i++;
288 continue;
8f7a6605
BH
289 }
290
784656f9
TH
291 this->size += next->size;
292 memmove(next, next + 1, (type->cnt - (i + 1)) * sizeof(*next));
293 type->cnt--;
95f72d1e 294 }
784656f9 295}
95f72d1e 296
784656f9
TH
297/**
298 * memblock_insert_region - insert new memblock region
299 * @type: memblock type to insert into
300 * @idx: index for the insertion point
301 * @base: base address of the new region
302 * @size: size of the new region
303 *
304 * Insert new memblock region [@base,@base+@size) into @type at @idx.
305 * @type must already have extra room to accomodate the new region.
306 */
307static void __init_memblock memblock_insert_region(struct memblock_type *type,
308 int idx, phys_addr_t base,
7c0caeb8 309 phys_addr_t size, int nid)
784656f9
TH
310{
311 struct memblock_region *rgn = &type->regions[idx];
312
313 BUG_ON(type->cnt >= type->max);
314 memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
315 rgn->base = base;
316 rgn->size = size;
7c0caeb8 317 memblock_set_region_node(rgn, nid);
784656f9 318 type->cnt++;
1440c4e2 319 type->total_size += size;
784656f9
TH
320}
321
322/**
323 * memblock_add_region - add new memblock region
324 * @type: memblock type to add new region into
325 * @base: base address of the new region
326 * @size: size of the new region
327 *
328 * Add new memblock region [@base,@base+@size) into @type. The new region
329 * is allowed to overlap with existing ones - overlaps don't affect already
330 * existing regions. @type is guaranteed to be minimal (all neighbouring
331 * compatible regions are merged) after the addition.
332 *
333 * RETURNS:
334 * 0 on success, -errno on failure.
335 */
581adcbe
TH
336static int __init_memblock memblock_add_region(struct memblock_type *type,
337 phys_addr_t base, phys_addr_t size)
784656f9
TH
338{
339 bool insert = false;
eb18f1b5
TH
340 phys_addr_t obase = base;
341 phys_addr_t end = base + memblock_cap_size(base, &size);
784656f9
TH
342 int i, nr_new;
343
344 /* special case for empty array */
345 if (type->regions[0].size == 0) {
1440c4e2 346 WARN_ON(type->cnt != 1 || type->total_size);
8f7a6605
BH
347 type->regions[0].base = base;
348 type->regions[0].size = size;
7c0caeb8 349 memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
1440c4e2 350 type->total_size = size;
8f7a6605 351 return 0;
95f72d1e 352 }
784656f9
TH
353repeat:
354 /*
355 * The following is executed twice. Once with %false @insert and
356 * then with %true. The first counts the number of regions needed
357 * to accomodate the new area. The second actually inserts them.
142b45a7 358 */
784656f9
TH
359 base = obase;
360 nr_new = 0;
95f72d1e 361
784656f9
TH
362 for (i = 0; i < type->cnt; i++) {
363 struct memblock_region *rgn = &type->regions[i];
364 phys_addr_t rbase = rgn->base;
365 phys_addr_t rend = rbase + rgn->size;
366
367 if (rbase >= end)
95f72d1e 368 break;
784656f9
TH
369 if (rend <= base)
370 continue;
371 /*
372 * @rgn overlaps. If it separates the lower part of new
373 * area, insert that portion.
374 */
375 if (rbase > base) {
376 nr_new++;
377 if (insert)
378 memblock_insert_region(type, i++, base,
7c0caeb8 379 rbase - base, MAX_NUMNODES);
95f72d1e 380 }
784656f9
TH
381 /* area below @rend is dealt with, forget about it */
382 base = min(rend, end);
95f72d1e 383 }
784656f9
TH
384
385 /* insert the remaining portion */
386 if (base < end) {
387 nr_new++;
388 if (insert)
7c0caeb8
TH
389 memblock_insert_region(type, i, base, end - base,
390 MAX_NUMNODES);
95f72d1e 391 }
95f72d1e 392
784656f9
TH
393 /*
394 * If this was the first round, resize array and repeat for actual
395 * insertions; otherwise, merge and return.
142b45a7 396 */
784656f9
TH
397 if (!insert) {
398 while (type->cnt + nr_new > type->max)
399 if (memblock_double_array(type) < 0)
400 return -ENOMEM;
401 insert = true;
402 goto repeat;
403 } else {
404 memblock_merge_regions(type);
405 return 0;
142b45a7 406 }
95f72d1e
YL
407}
408
581adcbe 409int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
95f72d1e 410{
e3239ff9 411 return memblock_add_region(&memblock.memory, base, size);
95f72d1e
YL
412}
413
6a9ceb31
TH
414/**
415 * memblock_isolate_range - isolate given range into disjoint memblocks
416 * @type: memblock type to isolate range for
417 * @base: base of range to isolate
418 * @size: size of range to isolate
419 * @start_rgn: out parameter for the start of isolated region
420 * @end_rgn: out parameter for the end of isolated region
421 *
422 * Walk @type and ensure that regions don't cross the boundaries defined by
423 * [@base,@base+@size). Crossing regions are split at the boundaries,
424 * which may create at most two more regions. The index of the first
425 * region inside the range is returned in *@start_rgn and end in *@end_rgn.
426 *
427 * RETURNS:
428 * 0 on success, -errno on failure.
429 */
430static int __init_memblock memblock_isolate_range(struct memblock_type *type,
431 phys_addr_t base, phys_addr_t size,
432 int *start_rgn, int *end_rgn)
433{
eb18f1b5 434 phys_addr_t end = base + memblock_cap_size(base, &size);
6a9ceb31
TH
435 int i;
436
437 *start_rgn = *end_rgn = 0;
438
439 /* we'll create at most two more regions */
440 while (type->cnt + 2 > type->max)
441 if (memblock_double_array(type) < 0)
442 return -ENOMEM;
443
444 for (i = 0; i < type->cnt; i++) {
445 struct memblock_region *rgn = &type->regions[i];
446 phys_addr_t rbase = rgn->base;
447 phys_addr_t rend = rbase + rgn->size;
448
449 if (rbase >= end)
450 break;
451 if (rend <= base)
452 continue;
453
454 if (rbase < base) {
455 /*
456 * @rgn intersects from below. Split and continue
457 * to process the next region - the new top half.
458 */
459 rgn->base = base;
1440c4e2
TH
460 rgn->size -= base - rbase;
461 type->total_size -= base - rbase;
6a9ceb31 462 memblock_insert_region(type, i, rbase, base - rbase,
71936180 463 memblock_get_region_node(rgn));
6a9ceb31
TH
464 } else if (rend > end) {
465 /*
466 * @rgn intersects from above. Split and redo the
467 * current region - the new bottom half.
468 */
469 rgn->base = end;
1440c4e2
TH
470 rgn->size -= end - rbase;
471 type->total_size -= end - rbase;
6a9ceb31 472 memblock_insert_region(type, i--, rbase, end - rbase,
71936180 473 memblock_get_region_node(rgn));
6a9ceb31
TH
474 } else {
475 /* @rgn is fully contained, record it */
476 if (!*end_rgn)
477 *start_rgn = i;
478 *end_rgn = i + 1;
479 }
480 }
481
482 return 0;
483}
6a9ceb31 484
581adcbe
TH
485static int __init_memblock __memblock_remove(struct memblock_type *type,
486 phys_addr_t base, phys_addr_t size)
95f72d1e 487{
71936180
TH
488 int start_rgn, end_rgn;
489 int i, ret;
95f72d1e 490
71936180
TH
491 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
492 if (ret)
493 return ret;
95f72d1e 494
71936180
TH
495 for (i = end_rgn - 1; i >= start_rgn; i--)
496 memblock_remove_region(type, i);
8f7a6605 497 return 0;
95f72d1e
YL
498}
499
581adcbe 500int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
95f72d1e
YL
501{
502 return __memblock_remove(&memblock.memory, base, size);
503}
504
581adcbe 505int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
95f72d1e 506{
24aa0788 507 memblock_dbg(" memblock_free: [%#016llx-%#016llx] %pF\n",
a150439c
PA
508 (unsigned long long)base,
509 (unsigned long long)base + size,
510 (void *)_RET_IP_);
24aa0788 511
95f72d1e
YL
512 return __memblock_remove(&memblock.reserved, base, size);
513}
514
581adcbe 515int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
95f72d1e 516{
e3239ff9 517 struct memblock_type *_rgn = &memblock.reserved;
95f72d1e 518
24aa0788 519 memblock_dbg("memblock_reserve: [%#016llx-%#016llx] %pF\n",
a150439c
PA
520 (unsigned long long)base,
521 (unsigned long long)base + size,
522 (void *)_RET_IP_);
95f72d1e
YL
523 BUG_ON(0 == size);
524
525 return memblock_add_region(_rgn, base, size);
526}
527
35fd0808
TH
528/**
529 * __next_free_mem_range - next function for for_each_free_mem_range()
530 * @idx: pointer to u64 loop variable
531 * @nid: nid: node selector, %MAX_NUMNODES for all nodes
532 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
533 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
534 * @p_nid: ptr to int for nid of the range, can be %NULL
535 *
536 * Find the first free area from *@idx which matches @nid, fill the out
537 * parameters, and update *@idx for the next iteration. The lower 32bit of
538 * *@idx contains index into memory region and the upper 32bit indexes the
539 * areas before each reserved region. For example, if reserved regions
540 * look like the following,
541 *
542 * 0:[0-16), 1:[32-48), 2:[128-130)
543 *
544 * The upper 32bit indexes the following regions.
545 *
546 * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX)
547 *
548 * As both region arrays are sorted, the function advances the two indices
549 * in lockstep and returns each intersection.
550 */
551void __init_memblock __next_free_mem_range(u64 *idx, int nid,
552 phys_addr_t *out_start,
553 phys_addr_t *out_end, int *out_nid)
554{
555 struct memblock_type *mem = &memblock.memory;
556 struct memblock_type *rsv = &memblock.reserved;
557 int mi = *idx & 0xffffffff;
558 int ri = *idx >> 32;
559
560 for ( ; mi < mem->cnt; mi++) {
561 struct memblock_region *m = &mem->regions[mi];
562 phys_addr_t m_start = m->base;
563 phys_addr_t m_end = m->base + m->size;
564
565 /* only memory regions are associated with nodes, check it */
566 if (nid != MAX_NUMNODES && nid != memblock_get_region_node(m))
567 continue;
568
569 /* scan areas before each reservation for intersection */
570 for ( ; ri < rsv->cnt + 1; ri++) {
571 struct memblock_region *r = &rsv->regions[ri];
572 phys_addr_t r_start = ri ? r[-1].base + r[-1].size : 0;
573 phys_addr_t r_end = ri < rsv->cnt ? r->base : ULLONG_MAX;
574
575 /* if ri advanced past mi, break out to advance mi */
576 if (r_start >= m_end)
577 break;
578 /* if the two regions intersect, we're done */
579 if (m_start < r_end) {
580 if (out_start)
581 *out_start = max(m_start, r_start);
582 if (out_end)
583 *out_end = min(m_end, r_end);
584 if (out_nid)
585 *out_nid = memblock_get_region_node(m);
586 /*
587 * The region which ends first is advanced
588 * for the next iteration.
589 */
590 if (m_end <= r_end)
591 mi++;
592 else
593 ri++;
594 *idx = (u32)mi | (u64)ri << 32;
595 return;
596 }
597 }
598 }
599
600 /* signal end of iteration */
601 *idx = ULLONG_MAX;
602}
603
7c0caeb8
TH
604#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
605/*
606 * Common iterator interface used to define for_each_mem_range().
607 */
608void __init_memblock __next_mem_pfn_range(int *idx, int nid,
609 unsigned long *out_start_pfn,
610 unsigned long *out_end_pfn, int *out_nid)
611{
612 struct memblock_type *type = &memblock.memory;
613 struct memblock_region *r;
614
615 while (++*idx < type->cnt) {
616 r = &type->regions[*idx];
617
618 if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
619 continue;
620 if (nid == MAX_NUMNODES || nid == r->nid)
621 break;
622 }
623 if (*idx >= type->cnt) {
624 *idx = -1;
625 return;
626 }
627
628 if (out_start_pfn)
629 *out_start_pfn = PFN_UP(r->base);
630 if (out_end_pfn)
631 *out_end_pfn = PFN_DOWN(r->base + r->size);
632 if (out_nid)
633 *out_nid = r->nid;
634}
635
636/**
637 * memblock_set_node - set node ID on memblock regions
638 * @base: base of area to set node ID for
639 * @size: size of area to set node ID for
640 * @nid: node ID to set
641 *
642 * Set the nid of memblock memory regions in [@base,@base+@size) to @nid.
643 * Regions which cross the area boundaries are split as necessary.
644 *
645 * RETURNS:
646 * 0 on success, -errno on failure.
647 */
648int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
649 int nid)
650{
651 struct memblock_type *type = &memblock.memory;
6a9ceb31
TH
652 int start_rgn, end_rgn;
653 int i, ret;
7c0caeb8 654
6a9ceb31
TH
655 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
656 if (ret)
657 return ret;
7c0caeb8 658
6a9ceb31
TH
659 for (i = start_rgn; i < end_rgn; i++)
660 type->regions[i].nid = nid;
7c0caeb8
TH
661
662 memblock_merge_regions(type);
663 return 0;
664}
665#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
666
6ed311b2 667phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
95f72d1e 668{
6ed311b2 669 phys_addr_t found;
95f72d1e 670
6ed311b2
BH
671 /* We align the size to limit fragmentation. Without this, a lot of
672 * small allocs quickly eat up the whole reserve array on sparc
673 */
348968eb 674 size = round_up(size, align);
95f72d1e 675
fc769a8e 676 found = memblock_find_in_range(0, max_addr, size, align);
9c8c27e2 677 if (found && !memblock_reserve(found, size))
6ed311b2 678 return found;
95f72d1e 679
6ed311b2 680 return 0;
95f72d1e
YL
681}
682
6ed311b2 683phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
95f72d1e 684{
6ed311b2
BH
685 phys_addr_t alloc;
686
687 alloc = __memblock_alloc_base(size, align, max_addr);
688
689 if (alloc == 0)
690 panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
691 (unsigned long long) size, (unsigned long long) max_addr);
692
693 return alloc;
95f72d1e
YL
694}
695
6ed311b2 696phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
95f72d1e 697{
6ed311b2
BH
698 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
699}
95f72d1e 700
95f72d1e 701
6ed311b2 702/*
34e18455 703 * Additional node-local top-down allocators.
c196f76f
BH
704 *
705 * WARNING: Only available after early_node_map[] has been populated,
706 * on some architectures, that is after all the calls to add_active_range()
707 * have been done to populate it.
6ed311b2 708 */
95f72d1e 709
34e18455
TH
710static phys_addr_t __init memblock_nid_range_rev(phys_addr_t start,
711 phys_addr_t end, int *nid)
c3f72b57 712{
c196f76f 713#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
c196f76f
BH
714 unsigned long start_pfn, end_pfn;
715 int i;
716
b2fea988 717 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, nid)
34e18455
TH
718 if (end > PFN_PHYS(start_pfn) && end <= PFN_PHYS(end_pfn))
719 return max(start, PFN_PHYS(start_pfn));
c196f76f 720#endif
c3f72b57 721 *nid = 0;
34e18455 722 return start;
c3f72b57
BH
723}
724
e6498040
TH
725phys_addr_t __init memblock_find_in_range_node(phys_addr_t start,
726 phys_addr_t end,
2898cc4c
BH
727 phys_addr_t size,
728 phys_addr_t align, int nid)
95f72d1e 729{
e6498040
TH
730 struct memblock_type *mem = &memblock.memory;
731 int i;
95f72d1e 732
e6498040 733 BUG_ON(0 == size);
95f72d1e 734
e6498040
TH
735 /* Pump up max_addr */
736 if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
737 end = memblock.current_limit;
95f72d1e 738
e6498040
TH
739 for (i = mem->cnt - 1; i >= 0; i--) {
740 struct memblock_region *r = &mem->regions[i];
741 phys_addr_t base = max(start, r->base);
742 phys_addr_t top = min(end, r->base + r->size);
743
744 while (base < top) {
745 phys_addr_t tbase, ret;
746 int tnid;
747
748 tbase = memblock_nid_range_rev(base, top, &tnid);
749 if (nid == MAX_NUMNODES || tnid == nid) {
750 ret = memblock_find_region(tbase, top, size, align);
751 if (ret)
752 return ret;
753 }
754 top = tbase;
95f72d1e 755 }
95f72d1e 756 }
e6498040 757
1f5026a7 758 return 0;
95f72d1e
YL
759}
760
2898cc4c 761phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
95f72d1e 762{
e6498040 763 phys_addr_t found;
95f72d1e 764
e6498040
TH
765 /*
766 * We align the size to limit fragmentation. Without this, a lot of
7f219c73
BH
767 * small allocs quickly eat up the whole reserve array on sparc
768 */
348968eb 769 size = round_up(size, align);
7f219c73 770
e6498040
TH
771 found = memblock_find_in_range_node(0, MEMBLOCK_ALLOC_ACCESSIBLE,
772 size, align, nid);
9c8c27e2 773 if (found && !memblock_reserve(found, size))
e6498040 774 return found;
95f72d1e 775
9d1e2492
BH
776 return 0;
777}
778
779phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
780{
781 phys_addr_t res = memblock_alloc_nid(size, align, nid);
782
783 if (res)
784 return res;
15fb0972 785 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
95f72d1e
YL
786}
787
9d1e2492
BH
788
789/*
790 * Remaining API functions
791 */
792
2898cc4c 793phys_addr_t __init memblock_phys_mem_size(void)
95f72d1e 794{
1440c4e2 795 return memblock.memory.total_size;
95f72d1e
YL
796}
797
0a93ebef
SR
798/* lowest address */
799phys_addr_t __init_memblock memblock_start_of_DRAM(void)
800{
801 return memblock.memory.regions[0].base;
802}
803
10d06439 804phys_addr_t __init_memblock memblock_end_of_DRAM(void)
95f72d1e
YL
805{
806 int idx = memblock.memory.cnt - 1;
807
e3239ff9 808 return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
95f72d1e
YL
809}
810
c0ce8fef 811void __init memblock_enforce_memory_limit(phys_addr_t limit)
95f72d1e
YL
812{
813 unsigned long i;
c0ce8fef 814 phys_addr_t max_addr = (phys_addr_t)ULLONG_MAX;
95f72d1e 815
c0ce8fef 816 if (!limit)
95f72d1e
YL
817 return;
818
c0ce8fef 819 /* find out max address */
95f72d1e 820 for (i = 0; i < memblock.memory.cnt; i++) {
c0ce8fef 821 struct memblock_region *r = &memblock.memory.regions[i];
95f72d1e 822
c0ce8fef
TH
823 if (limit <= r->size) {
824 max_addr = r->base + limit;
825 break;
95f72d1e 826 }
c0ce8fef 827 limit -= r->size;
95f72d1e 828 }
c0ce8fef
TH
829
830 /* truncate both memory and reserved regions */
831 __memblock_remove(&memblock.memory, max_addr, (phys_addr_t)ULLONG_MAX);
832 __memblock_remove(&memblock.reserved, max_addr, (phys_addr_t)ULLONG_MAX);
95f72d1e
YL
833}
834
cd79481d 835static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
72d4b0b4
BH
836{
837 unsigned int left = 0, right = type->cnt;
838
839 do {
840 unsigned int mid = (right + left) / 2;
841
842 if (addr < type->regions[mid].base)
843 right = mid;
844 else if (addr >= (type->regions[mid].base +
845 type->regions[mid].size))
846 left = mid + 1;
847 else
848 return mid;
849 } while (left < right);
850 return -1;
851}
852
2898cc4c 853int __init memblock_is_reserved(phys_addr_t addr)
95f72d1e 854{
72d4b0b4
BH
855 return memblock_search(&memblock.reserved, addr) != -1;
856}
95f72d1e 857
3661ca66 858int __init_memblock memblock_is_memory(phys_addr_t addr)
72d4b0b4
BH
859{
860 return memblock_search(&memblock.memory, addr) != -1;
861}
862
3661ca66 863int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
72d4b0b4 864{
abb65272 865 int idx = memblock_search(&memblock.memory, base);
eb18f1b5 866 phys_addr_t end = base + memblock_cap_size(base, &size);
72d4b0b4
BH
867
868 if (idx == -1)
869 return 0;
abb65272
TV
870 return memblock.memory.regions[idx].base <= base &&
871 (memblock.memory.regions[idx].base +
eb18f1b5 872 memblock.memory.regions[idx].size) >= end;
95f72d1e
YL
873}
874
10d06439 875int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
95f72d1e 876{
eb18f1b5 877 memblock_cap_size(base, &size);
f1c2c19c 878 return memblock_overlaps_region(&memblock.reserved, base, size) >= 0;
95f72d1e
YL
879}
880
e63075a3 881
3661ca66 882void __init_memblock memblock_set_current_limit(phys_addr_t limit)
e63075a3
BH
883{
884 memblock.current_limit = limit;
885}
886
7c0caeb8 887static void __init_memblock memblock_dump(struct memblock_type *type, char *name)
6ed311b2
BH
888{
889 unsigned long long base, size;
890 int i;
891
7c0caeb8 892 pr_info(" %s.cnt = 0x%lx\n", name, type->cnt);
6ed311b2 893
7c0caeb8
TH
894 for (i = 0; i < type->cnt; i++) {
895 struct memblock_region *rgn = &type->regions[i];
896 char nid_buf[32] = "";
897
898 base = rgn->base;
899 size = rgn->size;
900#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
901 if (memblock_get_region_node(rgn) != MAX_NUMNODES)
902 snprintf(nid_buf, sizeof(nid_buf), " on node %d",
903 memblock_get_region_node(rgn));
904#endif
905 pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes%s\n",
906 name, i, base, base + size - 1, size, nid_buf);
6ed311b2
BH
907 }
908}
909
4ff7b82f 910void __init_memblock __memblock_dump_all(void)
6ed311b2 911{
6ed311b2 912 pr_info("MEMBLOCK configuration:\n");
1440c4e2
TH
913 pr_info(" memory size = %#llx reserved size = %#llx\n",
914 (unsigned long long)memblock.memory.total_size,
915 (unsigned long long)memblock.reserved.total_size);
6ed311b2
BH
916
917 memblock_dump(&memblock.memory, "memory");
918 memblock_dump(&memblock.reserved, "reserved");
919}
920
1aadc056 921void __init memblock_allow_resize(void)
6ed311b2 922{
142b45a7 923 memblock_can_resize = 1;
6ed311b2
BH
924}
925
6ed311b2
BH
926static int __init early_memblock(char *p)
927{
928 if (p && strstr(p, "debug"))
929 memblock_debug = 1;
930 return 0;
931}
932early_param("memblock", early_memblock);
933
c378ddd5 934#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK)
6d03b885
BH
935
936static int memblock_debug_show(struct seq_file *m, void *private)
937{
938 struct memblock_type *type = m->private;
939 struct memblock_region *reg;
940 int i;
941
942 for (i = 0; i < type->cnt; i++) {
943 reg = &type->regions[i];
944 seq_printf(m, "%4d: ", i);
945 if (sizeof(phys_addr_t) == 4)
946 seq_printf(m, "0x%08lx..0x%08lx\n",
947 (unsigned long)reg->base,
948 (unsigned long)(reg->base + reg->size - 1));
949 else
950 seq_printf(m, "0x%016llx..0x%016llx\n",
951 (unsigned long long)reg->base,
952 (unsigned long long)(reg->base + reg->size - 1));
953
954 }
955 return 0;
956}
957
958static int memblock_debug_open(struct inode *inode, struct file *file)
959{
960 return single_open(file, memblock_debug_show, inode->i_private);
961}
962
963static const struct file_operations memblock_debug_fops = {
964 .open = memblock_debug_open,
965 .read = seq_read,
966 .llseek = seq_lseek,
967 .release = single_release,
968};
969
970static int __init memblock_init_debugfs(void)
971{
972 struct dentry *root = debugfs_create_dir("memblock", NULL);
973 if (!root)
974 return -ENXIO;
975 debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops);
976 debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops);
977
978 return 0;
979}
980__initcall(memblock_init_debugfs);
981
982#endif /* CONFIG_DEBUG_FS */
This page took 0.136579 seconds and 5 git commands to generate.