aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memblock.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memblock.c')
-rw-r--r--mm/memblock.c961
1 files changed, 530 insertions, 431 deletions
diff --git a/mm/memblock.c b/mm/memblock.c
index 84bec4969ed5..2f55f19b7c86 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -20,12 +20,23 @@
20#include <linux/seq_file.h> 20#include <linux/seq_file.h>
21#include <linux/memblock.h> 21#include <linux/memblock.h>
22 22
23struct memblock memblock __initdata_memblock; 23static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
24static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
25
26struct memblock memblock __initdata_memblock = {
27 .memory.regions = memblock_memory_init_regions,
28 .memory.cnt = 1, /* empty dummy entry */
29 .memory.max = INIT_MEMBLOCK_REGIONS,
30
31 .reserved.regions = memblock_reserved_init_regions,
32 .reserved.cnt = 1, /* empty dummy entry */
33 .reserved.max = INIT_MEMBLOCK_REGIONS,
34
35 .current_limit = MEMBLOCK_ALLOC_ANYWHERE,
36};
24 37
25int memblock_debug __initdata_memblock; 38int memblock_debug __initdata_memblock;
26int memblock_can_resize __initdata_memblock; 39static int memblock_can_resize __initdata_memblock;
27static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS + 1] __initdata_memblock;
28static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS + 1] __initdata_memblock;
29 40
30/* inline so we don't get a warning when pr_debug is compiled out */ 41/* inline so we don't get a warning when pr_debug is compiled out */
31static inline const char *memblock_type_name(struct memblock_type *type) 42static inline const char *memblock_type_name(struct memblock_type *type)
@@ -38,20 +49,15 @@ static inline const char *memblock_type_name(struct memblock_type *type)
38 return "unknown"; 49 return "unknown";
39} 50}
40 51
41/* 52/* adjust *@size so that (@base + *@size) doesn't overflow, return new size */
42 * Address comparison utilities 53static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
43 */
44
45static phys_addr_t __init_memblock memblock_align_down(phys_addr_t addr, phys_addr_t size)
46{
47 return addr & ~(size - 1);
48}
49
50static phys_addr_t __init_memblock memblock_align_up(phys_addr_t addr, phys_addr_t size)
51{ 54{
52 return (addr + (size - 1)) & ~(size - 1); 55 return *size = min(*size, (phys_addr_t)ULLONG_MAX - base);
53} 56}
54 57
58/*
59 * Address comparison utilities
60 */
55static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1, 61static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
56 phys_addr_t base2, phys_addr_t size2) 62 phys_addr_t base2, phys_addr_t size2)
57{ 63{
@@ -73,83 +79,66 @@ static long __init_memblock memblock_overlaps_region(struct memblock_type *type,
73 return (i < type->cnt) ? i : -1; 79 return (i < type->cnt) ? i : -1;
74} 80}
75 81
76/* 82/**
77 * Find, allocate, deallocate or reserve unreserved regions. All allocations 83 * memblock_find_in_range_node - find free area in given range and node
78 * are top-down. 84 * @start: start of candidate range
85 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
86 * @size: size of free area to find
87 * @align: alignment of free area to find
88 * @nid: nid of the free area to find, %MAX_NUMNODES for any node
89 *
90 * Find @size free area aligned to @align in the specified range and node.
91 *
92 * RETURNS:
93 * Found address on success, %0 on failure.
79 */ 94 */
80 95phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start,
81static phys_addr_t __init_memblock memblock_find_region(phys_addr_t start, phys_addr_t end, 96 phys_addr_t end, phys_addr_t size,
82 phys_addr_t size, phys_addr_t align) 97 phys_addr_t align, int nid)
83{ 98{
84 phys_addr_t base, res_base; 99 phys_addr_t this_start, this_end, cand;
85 long j; 100 u64 i;
86
87 /* In case, huge size is requested */
88 if (end < size)
89 return MEMBLOCK_ERROR;
90
91 base = memblock_align_down((end - size), align);
92 101
93 /* Prevent allocations returning 0 as it's also used to 102 /* align @size to avoid excessive fragmentation on reserved array */
94 * indicate an allocation failure 103 size = round_up(size, align);
95 */
96 if (start == 0)
97 start = PAGE_SIZE;
98
99 while (start <= base) {
100 j = memblock_overlaps_region(&memblock.reserved, base, size);
101 if (j < 0)
102 return base;
103 res_base = memblock.reserved.regions[j].base;
104 if (res_base < size)
105 break;
106 base = memblock_align_down(res_base - size, align);
107 }
108 104
109 return MEMBLOCK_ERROR; 105 /* pump up @end */
110}
111
112static phys_addr_t __init_memblock memblock_find_base(phys_addr_t size,
113 phys_addr_t align, phys_addr_t start, phys_addr_t end)
114{
115 long i;
116
117 BUG_ON(0 == size);
118
119 /* Pump up max_addr */
120 if (end == MEMBLOCK_ALLOC_ACCESSIBLE) 106 if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
121 end = memblock.current_limit; 107 end = memblock.current_limit;
122 108
123 /* We do a top-down search, this tends to limit memory 109 /* adjust @start to avoid underflow and allocating the first page */
124 * fragmentation by keeping early boot allocs near the 110 start = max3(start, size, (phys_addr_t)PAGE_SIZE);
125 * top of memory 111 end = max(start, end);
126 */
127 for (i = memblock.memory.cnt - 1; i >= 0; i--) {
128 phys_addr_t memblockbase = memblock.memory.regions[i].base;
129 phys_addr_t memblocksize = memblock.memory.regions[i].size;
130 phys_addr_t bottom, top, found;
131 112
132 if (memblocksize < size) 113 for_each_free_mem_range_reverse(i, nid, &this_start, &this_end, NULL) {
133 continue; 114 this_start = clamp(this_start, start, end);
134 if ((memblockbase + memblocksize) <= start) 115 this_end = clamp(this_end, start, end);
135 break; 116
136 bottom = max(memblockbase, start); 117 cand = round_down(this_end - size, align);
137 top = min(memblockbase + memblocksize, end); 118 if (cand >= this_start)
138 if (bottom >= top) 119 return cand;
139 continue;
140 found = memblock_find_region(bottom, top, size, align);
141 if (found != MEMBLOCK_ERROR)
142 return found;
143 } 120 }
144 return MEMBLOCK_ERROR; 121 return 0;
145} 122}
146 123
147/* 124/**
148 * Find a free area with specified alignment in a specific range. 125 * memblock_find_in_range - find free area in given range
126 * @start: start of candidate range
127 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
128 * @size: size of free area to find
129 * @align: alignment of free area to find
130 *
131 * Find @size free area aligned to @align in the specified range.
132 *
133 * RETURNS:
134 * Found address on success, %0 on failure.
149 */ 135 */
150u64 __init_memblock memblock_find_in_range(u64 start, u64 end, u64 size, u64 align) 136phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
137 phys_addr_t end, phys_addr_t size,
138 phys_addr_t align)
151{ 139{
152 return memblock_find_base(size, align, start, end); 140 return memblock_find_in_range_node(start, end, size, align,
141 MAX_NUMNODES);
153} 142}
154 143
155/* 144/*
@@ -178,25 +167,21 @@ int __init_memblock memblock_reserve_reserved_regions(void)
178 167
179static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r) 168static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
180{ 169{
181 unsigned long i; 170 type->total_size -= type->regions[r].size;
182 171 memmove(&type->regions[r], &type->regions[r + 1],
183 for (i = r; i < type->cnt - 1; i++) { 172 (type->cnt - (r + 1)) * sizeof(type->regions[r]));
184 type->regions[i].base = type->regions[i + 1].base;
185 type->regions[i].size = type->regions[i + 1].size;
186 }
187 type->cnt--; 173 type->cnt--;
188 174
189 /* Special case for empty arrays */ 175 /* Special case for empty arrays */
190 if (type->cnt == 0) { 176 if (type->cnt == 0) {
177 WARN_ON(type->total_size != 0);
191 type->cnt = 1; 178 type->cnt = 1;
192 type->regions[0].base = 0; 179 type->regions[0].base = 0;
193 type->regions[0].size = 0; 180 type->regions[0].size = 0;
181 memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
194 } 182 }
195} 183}
196 184
197/* Defined below but needed now */
198static long memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size);
199
200static int __init_memblock memblock_double_array(struct memblock_type *type) 185static int __init_memblock memblock_double_array(struct memblock_type *type)
201{ 186{
202 struct memblock_region *new_array, *old_array; 187 struct memblock_region *new_array, *old_array;
@@ -226,10 +211,10 @@ static int __init_memblock memblock_double_array(struct memblock_type *type)
226 */ 211 */
227 if (use_slab) { 212 if (use_slab) {
228 new_array = kmalloc(new_size, GFP_KERNEL); 213 new_array = kmalloc(new_size, GFP_KERNEL);
229 addr = new_array == NULL ? MEMBLOCK_ERROR : __pa(new_array); 214 addr = new_array ? __pa(new_array) : 0;
230 } else 215 } else
231 addr = memblock_find_base(new_size, sizeof(phys_addr_t), 0, MEMBLOCK_ALLOC_ACCESSIBLE); 216 addr = memblock_find_in_range(0, MEMBLOCK_ALLOC_ACCESSIBLE, new_size, sizeof(phys_addr_t));
232 if (addr == MEMBLOCK_ERROR) { 217 if (!addr) {
233 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n", 218 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
234 memblock_type_name(type), type->max, type->max * 2); 219 memblock_type_name(type), type->max, type->max * 2);
235 return -1; 220 return -1;
@@ -254,7 +239,7 @@ static int __init_memblock memblock_double_array(struct memblock_type *type)
254 return 0; 239 return 0;
255 240
256 /* Add the new reserved region now. Should not fail ! */ 241 /* Add the new reserved region now. Should not fail ! */
257 BUG_ON(memblock_add_region(&memblock.reserved, addr, new_size)); 242 BUG_ON(memblock_reserve(addr, new_size));
258 243
259 /* If the array wasn't our static init one, then free it. We only do 244 /* If the array wasn't our static init one, then free it. We only do
260 * that before SLAB is available as later on, we don't know whether 245 * that before SLAB is available as later on, we don't know whether
@@ -268,343 +253,514 @@ static int __init_memblock memblock_double_array(struct memblock_type *type)
268 return 0; 253 return 0;
269} 254}
270 255
271int __init_memblock __weak memblock_memory_can_coalesce(phys_addr_t addr1, phys_addr_t size1, 256/**
272 phys_addr_t addr2, phys_addr_t size2) 257 * memblock_merge_regions - merge neighboring compatible regions
273{ 258 * @type: memblock type to scan
274 return 1; 259 *
275} 260 * Scan @type and merge neighboring compatible regions.
276 261 */
277static long __init_memblock memblock_add_region(struct memblock_type *type, 262static void __init_memblock memblock_merge_regions(struct memblock_type *type)
278 phys_addr_t base, phys_addr_t size)
279{ 263{
280 phys_addr_t end = base + size; 264 int i = 0;
281 int i, slot = -1;
282
283 /* First try and coalesce this MEMBLOCK with others */
284 for (i = 0; i < type->cnt; i++) {
285 struct memblock_region *rgn = &type->regions[i];
286 phys_addr_t rend = rgn->base + rgn->size;
287 265
288 /* Exit if there's no possible hits */ 266 /* cnt never goes below 1 */
289 if (rgn->base > end || rgn->size == 0) 267 while (i < type->cnt - 1) {
290 break; 268 struct memblock_region *this = &type->regions[i];
269 struct memblock_region *next = &type->regions[i + 1];
291 270
292 /* Check if we are fully enclosed within an existing 271 if (this->base + this->size != next->base ||
293 * block 272 memblock_get_region_node(this) !=
294 */ 273 memblock_get_region_node(next)) {
295 if (rgn->base <= base && rend >= end) 274 BUG_ON(this->base + this->size > next->base);
296 return 0; 275 i++;
276 continue;
277 }
297 278
298 /* Check if we overlap or are adjacent with the bottom 279 this->size += next->size;
299 * of a block. 280 memmove(next, next + 1, (type->cnt - (i + 1)) * sizeof(*next));
300 */ 281 type->cnt--;
301 if (base < rgn->base && end >= rgn->base) { 282 }
302 /* If we can't coalesce, create a new block */ 283}
303 if (!memblock_memory_can_coalesce(base, size,
304 rgn->base,
305 rgn->size)) {
306 /* Overlap & can't coalesce are mutually
307 * exclusive, if you do that, be prepared
308 * for trouble
309 */
310 WARN_ON(end != rgn->base);
311 goto new_block;
312 }
313 /* We extend the bottom of the block down to our
314 * base
315 */
316 rgn->base = base;
317 rgn->size = rend - base;
318 284
319 /* Return if we have nothing else to allocate 285/**
320 * (fully coalesced) 286 * memblock_insert_region - insert new memblock region
321 */ 287 * @type: memblock type to insert into
322 if (rend >= end) 288 * @idx: index for the insertion point
323 return 0; 289 * @base: base address of the new region
290 * @size: size of the new region
291 *
292 * Insert new memblock region [@base,@base+@size) into @type at @idx.
293 * @type must already have extra room to accomodate the new region.
294 */
295static void __init_memblock memblock_insert_region(struct memblock_type *type,
296 int idx, phys_addr_t base,
297 phys_addr_t size, int nid)
298{
299 struct memblock_region *rgn = &type->regions[idx];
324 300
325 /* We continue processing from the end of the 301 BUG_ON(type->cnt >= type->max);
326 * coalesced block. 302 memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
327 */ 303 rgn->base = base;
328 base = rend; 304 rgn->size = size;
329 size = end - base; 305 memblock_set_region_node(rgn, nid);
330 } 306 type->cnt++;
307 type->total_size += size;
308}
331 309
332 /* Now check if we overlap or are adjacent with the 310/**
333 * top of a block 311 * memblock_add_region - add new memblock region
334 */ 312 * @type: memblock type to add new region into
335 if (base <= rend && end >= rend) { 313 * @base: base address of the new region
336 /* If we can't coalesce, create a new block */ 314 * @size: size of the new region
337 if (!memblock_memory_can_coalesce(rgn->base, 315 * @nid: nid of the new region
338 rgn->size, 316 *
339 base, size)) { 317 * Add new memblock region [@base,@base+@size) into @type. The new region
340 /* Overlap & can't coalesce are mutually 318 * is allowed to overlap with existing ones - overlaps don't affect already
341 * exclusive, if you do that, be prepared 319 * existing regions. @type is guaranteed to be minimal (all neighbouring
342 * for trouble 320 * compatible regions are merged) after the addition.
343 */ 321 *
344 WARN_ON(rend != base); 322 * RETURNS:
345 goto new_block; 323 * 0 on success, -errno on failure.
346 } 324 */
347 /* We adjust our base down to enclose the 325static int __init_memblock memblock_add_region(struct memblock_type *type,
348 * original block and destroy it. It will be 326 phys_addr_t base, phys_addr_t size, int nid)
349 * part of our new allocation. Since we've 327{
350 * freed an entry, we know we won't fail 328 bool insert = false;
351 * to allocate one later, so we won't risk 329 phys_addr_t obase = base;
352 * losing the original block allocation. 330 phys_addr_t end = base + memblock_cap_size(base, &size);
353 */ 331 int i, nr_new;
354 size += (base - rgn->base);
355 base = rgn->base;
356 memblock_remove_region(type, i--);
357 }
358 }
359 332
360 /* If the array is empty, special case, replace the fake 333 /* special case for empty array */
361 * filler region and return 334 if (type->regions[0].size == 0) {
362 */ 335 WARN_ON(type->cnt != 1 || type->total_size);
363 if ((type->cnt == 1) && (type->regions[0].size == 0)) {
364 type->regions[0].base = base; 336 type->regions[0].base = base;
365 type->regions[0].size = size; 337 type->regions[0].size = size;
338 memblock_set_region_node(&type->regions[0], nid);
339 type->total_size = size;
366 return 0; 340 return 0;
367 } 341 }
368 342repeat:
369 new_block: 343 /*
370 /* If we are out of space, we fail. It's too late to resize the array 344 * The following is executed twice. Once with %false @insert and
371 * but then this shouldn't have happened in the first place. 345 * then with %true. The first counts the number of regions needed
346 * to accomodate the new area. The second actually inserts them.
372 */ 347 */
373 if (WARN_ON(type->cnt >= type->max)) 348 base = obase;
374 return -1; 349 nr_new = 0;
375 350
376 /* Couldn't coalesce the MEMBLOCK, so add it to the sorted table. */ 351 for (i = 0; i < type->cnt; i++) {
377 for (i = type->cnt - 1; i >= 0; i--) { 352 struct memblock_region *rgn = &type->regions[i];
378 if (base < type->regions[i].base) { 353 phys_addr_t rbase = rgn->base;
379 type->regions[i+1].base = type->regions[i].base; 354 phys_addr_t rend = rbase + rgn->size;
380 type->regions[i+1].size = type->regions[i].size; 355
381 } else { 356 if (rbase >= end)
382 type->regions[i+1].base = base;
383 type->regions[i+1].size = size;
384 slot = i + 1;
385 break; 357 break;
358 if (rend <= base)
359 continue;
360 /*
361 * @rgn overlaps. If it separates the lower part of new
362 * area, insert that portion.
363 */
364 if (rbase > base) {
365 nr_new++;
366 if (insert)
367 memblock_insert_region(type, i++, base,
368 rbase - base, nid);
386 } 369 }
370 /* area below @rend is dealt with, forget about it */
371 base = min(rend, end);
387 } 372 }
388 if (base < type->regions[0].base) { 373
389 type->regions[0].base = base; 374 /* insert the remaining portion */
390 type->regions[0].size = size; 375 if (base < end) {
391 slot = 0; 376 nr_new++;
377 if (insert)
378 memblock_insert_region(type, i, base, end - base, nid);
392 } 379 }
393 type->cnt++;
394 380
395 /* The array is full ? Try to resize it. If that fails, we undo 381 /*
396 * our allocation and return an error 382 * If this was the first round, resize array and repeat for actual
383 * insertions; otherwise, merge and return.
397 */ 384 */
398 if (type->cnt == type->max && memblock_double_array(type)) { 385 if (!insert) {
399 BUG_ON(slot < 0); 386 while (type->cnt + nr_new > type->max)
400 memblock_remove_region(type, slot); 387 if (memblock_double_array(type) < 0)
401 return -1; 388 return -ENOMEM;
389 insert = true;
390 goto repeat;
391 } else {
392 memblock_merge_regions(type);
393 return 0;
402 } 394 }
403
404 return 0;
405} 395}
406 396
407long __init_memblock memblock_add(phys_addr_t base, phys_addr_t size) 397int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
398 int nid)
408{ 399{
409 return memblock_add_region(&memblock.memory, base, size); 400 return memblock_add_region(&memblock.memory, base, size, nid);
401}
410 402
403int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
404{
405 return memblock_add_region(&memblock.memory, base, size, MAX_NUMNODES);
411} 406}
412 407
413static long __init_memblock __memblock_remove(struct memblock_type *type, 408/**
414 phys_addr_t base, phys_addr_t size) 409 * memblock_isolate_range - isolate given range into disjoint memblocks
410 * @type: memblock type to isolate range for
411 * @base: base of range to isolate
412 * @size: size of range to isolate
413 * @start_rgn: out parameter for the start of isolated region
414 * @end_rgn: out parameter for the end of isolated region
415 *
416 * Walk @type and ensure that regions don't cross the boundaries defined by
417 * [@base,@base+@size). Crossing regions are split at the boundaries,
418 * which may create at most two more regions. The index of the first
419 * region inside the range is returned in *@start_rgn and end in *@end_rgn.
420 *
421 * RETURNS:
422 * 0 on success, -errno on failure.
423 */
424static int __init_memblock memblock_isolate_range(struct memblock_type *type,
425 phys_addr_t base, phys_addr_t size,
426 int *start_rgn, int *end_rgn)
415{ 427{
416 phys_addr_t end = base + size; 428 phys_addr_t end = base + memblock_cap_size(base, &size);
417 int i; 429 int i;
418 430
419 /* Walk through the array for collisions */ 431 *start_rgn = *end_rgn = 0;
432
433 /* we'll create at most two more regions */
434 while (type->cnt + 2 > type->max)
435 if (memblock_double_array(type) < 0)
436 return -ENOMEM;
437
420 for (i = 0; i < type->cnt; i++) { 438 for (i = 0; i < type->cnt; i++) {
421 struct memblock_region *rgn = &type->regions[i]; 439 struct memblock_region *rgn = &type->regions[i];
422 phys_addr_t rend = rgn->base + rgn->size; 440 phys_addr_t rbase = rgn->base;
441 phys_addr_t rend = rbase + rgn->size;
423 442
424 /* Nothing more to do, exit */ 443 if (rbase >= end)
425 if (rgn->base > end || rgn->size == 0)
426 break; 444 break;
427 445 if (rend <= base)
428 /* If we fully enclose the block, drop it */
429 if (base <= rgn->base && end >= rend) {
430 memblock_remove_region(type, i--);
431 continue; 446 continue;
432 }
433 447
434 /* If we are fully enclosed within a block 448 if (rbase < base) {
435 * then we need to split it and we are done 449 /*
436 */ 450 * @rgn intersects from below. Split and continue
437 if (base > rgn->base && end < rend) { 451 * to process the next region - the new top half.
438 rgn->size = base - rgn->base; 452 */
439 if (!memblock_add_region(type, end, rend - end)) 453 rgn->base = base;
440 return 0; 454 rgn->size -= base - rbase;
441 /* Failure to split is bad, we at least 455 type->total_size -= base - rbase;
442 * restore the block before erroring 456 memblock_insert_region(type, i, rbase, base - rbase,
457 memblock_get_region_node(rgn));
458 } else if (rend > end) {
459 /*
460 * @rgn intersects from above. Split and redo the
461 * current region - the new bottom half.
443 */ 462 */
444 rgn->size = rend - rgn->base;
445 WARN_ON(1);
446 return -1;
447 }
448
449 /* Check if we need to trim the bottom of a block */
450 if (rgn->base < end && rend > end) {
451 rgn->size -= end - rgn->base;
452 rgn->base = end; 463 rgn->base = end;
453 break; 464 rgn->size -= end - rbase;
465 type->total_size -= end - rbase;
466 memblock_insert_region(type, i--, rbase, end - rbase,
467 memblock_get_region_node(rgn));
468 } else {
469 /* @rgn is fully contained, record it */
470 if (!*end_rgn)
471 *start_rgn = i;
472 *end_rgn = i + 1;
454 } 473 }
474 }
455 475
456 /* And check if we need to trim the top of a block */ 476 return 0;
457 if (base < rend) 477}
458 rgn->size -= rend - base;
459 478
460 } 479static int __init_memblock __memblock_remove(struct memblock_type *type,
480 phys_addr_t base, phys_addr_t size)
481{
482 int start_rgn, end_rgn;
483 int i, ret;
484
485 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
486 if (ret)
487 return ret;
488
489 for (i = end_rgn - 1; i >= start_rgn; i--)
490 memblock_remove_region(type, i);
461 return 0; 491 return 0;
462} 492}
463 493
464long __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size) 494int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
465{ 495{
466 return __memblock_remove(&memblock.memory, base, size); 496 return __memblock_remove(&memblock.memory, base, size);
467} 497}
468 498
469long __init_memblock memblock_free(phys_addr_t base, phys_addr_t size) 499int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
470{ 500{
501 memblock_dbg(" memblock_free: [%#016llx-%#016llx] %pF\n",
502 (unsigned long long)base,
503 (unsigned long long)base + size,
504 (void *)_RET_IP_);
505
471 return __memblock_remove(&memblock.reserved, base, size); 506 return __memblock_remove(&memblock.reserved, base, size);
472} 507}
473 508
474long __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size) 509int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
475{ 510{
476 struct memblock_type *_rgn = &memblock.reserved; 511 struct memblock_type *_rgn = &memblock.reserved;
477 512
513 memblock_dbg("memblock_reserve: [%#016llx-%#016llx] %pF\n",
514 (unsigned long long)base,
515 (unsigned long long)base + size,
516 (void *)_RET_IP_);
478 BUG_ON(0 == size); 517 BUG_ON(0 == size);
479 518
480 return memblock_add_region(_rgn, base, size); 519 return memblock_add_region(_rgn, base, size, MAX_NUMNODES);
481} 520}
482 521
483phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) 522/**
523 * __next_free_mem_range - next function for for_each_free_mem_range()
524 * @idx: pointer to u64 loop variable
525 * @nid: nid: node selector, %MAX_NUMNODES for all nodes
526 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
527 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
528 * @p_nid: ptr to int for nid of the range, can be %NULL
529 *
530 * Find the first free area from *@idx which matches @nid, fill the out
531 * parameters, and update *@idx for the next iteration. The lower 32bit of
532 * *@idx contains index into memory region and the upper 32bit indexes the
533 * areas before each reserved region. For example, if reserved regions
534 * look like the following,
535 *
536 * 0:[0-16), 1:[32-48), 2:[128-130)
537 *
538 * The upper 32bit indexes the following regions.
539 *
540 * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX)
541 *
542 * As both region arrays are sorted, the function advances the two indices
543 * in lockstep and returns each intersection.
544 */
545void __init_memblock __next_free_mem_range(u64 *idx, int nid,
546 phys_addr_t *out_start,
547 phys_addr_t *out_end, int *out_nid)
484{ 548{
485 phys_addr_t found; 549 struct memblock_type *mem = &memblock.memory;
550 struct memblock_type *rsv = &memblock.reserved;
551 int mi = *idx & 0xffffffff;
552 int ri = *idx >> 32;
486 553
487 /* We align the size to limit fragmentation. Without this, a lot of 554 for ( ; mi < mem->cnt; mi++) {
488 * small allocs quickly eat up the whole reserve array on sparc 555 struct memblock_region *m = &mem->regions[mi];
489 */ 556 phys_addr_t m_start = m->base;
490 size = memblock_align_up(size, align); 557 phys_addr_t m_end = m->base + m->size;
491 558
492 found = memblock_find_base(size, align, 0, max_addr); 559 /* only memory regions are associated with nodes, check it */
493 if (found != MEMBLOCK_ERROR && 560 if (nid != MAX_NUMNODES && nid != memblock_get_region_node(m))
494 !memblock_add_region(&memblock.reserved, found, size)) 561 continue;
495 return found;
496 562
497 return 0; 563 /* scan areas before each reservation for intersection */
564 for ( ; ri < rsv->cnt + 1; ri++) {
565 struct memblock_region *r = &rsv->regions[ri];
566 phys_addr_t r_start = ri ? r[-1].base + r[-1].size : 0;
567 phys_addr_t r_end = ri < rsv->cnt ? r->base : ULLONG_MAX;
568
569 /* if ri advanced past mi, break out to advance mi */
570 if (r_start >= m_end)
571 break;
572 /* if the two regions intersect, we're done */
573 if (m_start < r_end) {
574 if (out_start)
575 *out_start = max(m_start, r_start);
576 if (out_end)
577 *out_end = min(m_end, r_end);
578 if (out_nid)
579 *out_nid = memblock_get_region_node(m);
580 /*
581 * The region which ends first is advanced
582 * for the next iteration.
583 */
584 if (m_end <= r_end)
585 mi++;
586 else
587 ri++;
588 *idx = (u32)mi | (u64)ri << 32;
589 return;
590 }
591 }
592 }
593
594 /* signal end of iteration */
595 *idx = ULLONG_MAX;
498} 596}
499 597
500phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) 598/**
599 * __next_free_mem_range_rev - next function for for_each_free_mem_range_reverse()
600 * @idx: pointer to u64 loop variable
601 * @nid: nid: node selector, %MAX_NUMNODES for all nodes
602 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
603 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
604 * @p_nid: ptr to int for nid of the range, can be %NULL
605 *
606 * Reverse of __next_free_mem_range().
607 */
608void __init_memblock __next_free_mem_range_rev(u64 *idx, int nid,
609 phys_addr_t *out_start,
610 phys_addr_t *out_end, int *out_nid)
501{ 611{
502 phys_addr_t alloc; 612 struct memblock_type *mem = &memblock.memory;
613 struct memblock_type *rsv = &memblock.reserved;
614 int mi = *idx & 0xffffffff;
615 int ri = *idx >> 32;
503 616
504 alloc = __memblock_alloc_base(size, align, max_addr); 617 if (*idx == (u64)ULLONG_MAX) {
618 mi = mem->cnt - 1;
619 ri = rsv->cnt;
620 }
505 621
506 if (alloc == 0) 622 for ( ; mi >= 0; mi--) {
507 panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n", 623 struct memblock_region *m = &mem->regions[mi];
508 (unsigned long long) size, (unsigned long long) max_addr); 624 phys_addr_t m_start = m->base;
625 phys_addr_t m_end = m->base + m->size;
509 626
510 return alloc; 627 /* only memory regions are associated with nodes, check it */
511} 628 if (nid != MAX_NUMNODES && nid != memblock_get_region_node(m))
629 continue;
512 630
513phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align) 631 /* scan areas before each reservation for intersection */
514{ 632 for ( ; ri >= 0; ri--) {
515 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); 633 struct memblock_region *r = &rsv->regions[ri];
516} 634 phys_addr_t r_start = ri ? r[-1].base + r[-1].size : 0;
635 phys_addr_t r_end = ri < rsv->cnt ? r->base : ULLONG_MAX;
636
637 /* if ri advanced past mi, break out to advance mi */
638 if (r_end <= m_start)
639 break;
640 /* if the two regions intersect, we're done */
641 if (m_end > r_start) {
642 if (out_start)
643 *out_start = max(m_start, r_start);
644 if (out_end)
645 *out_end = min(m_end, r_end);
646 if (out_nid)
647 *out_nid = memblock_get_region_node(m);
648
649 if (m_start >= r_start)
650 mi--;
651 else
652 ri--;
653 *idx = (u32)mi | (u64)ri << 32;
654 return;
655 }
656 }
657 }
517 658
659 *idx = ULLONG_MAX;
660}
518 661
662#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
519/* 663/*
520 * Additional node-local allocators. Search for node memory is bottom up 664 * Common iterator interface used to define for_each_mem_range().
521 * and walks memblock regions within that node bottom-up as well, but allocation
522 * within an memblock region is top-down. XXX I plan to fix that at some stage
523 *
524 * WARNING: Only available after early_node_map[] has been populated,
525 * on some architectures, that is after all the calls to add_active_range()
526 * have been done to populate it.
527 */ 665 */
528 666void __init_memblock __next_mem_pfn_range(int *idx, int nid,
529phys_addr_t __weak __init memblock_nid_range(phys_addr_t start, phys_addr_t end, int *nid) 667 unsigned long *out_start_pfn,
668 unsigned long *out_end_pfn, int *out_nid)
530{ 669{
531#ifdef CONFIG_ARCH_POPULATES_NODE_MAP 670 struct memblock_type *type = &memblock.memory;
532 /* 671 struct memblock_region *r;
533 * This code originates from sparc which really wants use to walk by addresses
534 * and returns the nid. This is not very convenient for early_pfn_map[] users
535 * as the map isn't sorted yet, and it really wants to be walked by nid.
536 *
537 * For now, I implement the inefficient method below which walks the early
538 * map multiple times. Eventually we may want to use an ARCH config option
539 * to implement a completely different method for both case.
540 */
541 unsigned long start_pfn, end_pfn;
542 int i;
543 672
544 for (i = 0; i < MAX_NUMNODES; i++) { 673 while (++*idx < type->cnt) {
545 get_pfn_range_for_nid(i, &start_pfn, &end_pfn); 674 r = &type->regions[*idx];
546 if (start < PFN_PHYS(start_pfn) || start >= PFN_PHYS(end_pfn)) 675
676 if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
547 continue; 677 continue;
548 *nid = i; 678 if (nid == MAX_NUMNODES || nid == r->nid)
549 return min(end, PFN_PHYS(end_pfn)); 679 break;
680 }
681 if (*idx >= type->cnt) {
682 *idx = -1;
683 return;
550 } 684 }
551#endif
552 *nid = 0;
553 685
554 return end; 686 if (out_start_pfn)
687 *out_start_pfn = PFN_UP(r->base);
688 if (out_end_pfn)
689 *out_end_pfn = PFN_DOWN(r->base + r->size);
690 if (out_nid)
691 *out_nid = r->nid;
555} 692}
556 693
557static phys_addr_t __init memblock_alloc_nid_region(struct memblock_region *mp, 694/**
558 phys_addr_t size, 695 * memblock_set_node - set node ID on memblock regions
559 phys_addr_t align, int nid) 696 * @base: base of area to set node ID for
697 * @size: size of area to set node ID for
698 * @nid: node ID to set
699 *
700 * Set the nid of memblock memory regions in [@base,@base+@size) to @nid.
701 * Regions which cross the area boundaries are split as necessary.
702 *
703 * RETURNS:
704 * 0 on success, -errno on failure.
705 */
706int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
707 int nid)
560{ 708{
561 phys_addr_t start, end; 709 struct memblock_type *type = &memblock.memory;
710 int start_rgn, end_rgn;
711 int i, ret;
562 712
563 start = mp->base; 713 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
564 end = start + mp->size; 714 if (ret)
715 return ret;
565 716
566 start = memblock_align_up(start, align); 717 for (i = start_rgn; i < end_rgn; i++)
567 while (start < end) { 718 type->regions[i].nid = nid;
568 phys_addr_t this_end;
569 int this_nid;
570 719
571 this_end = memblock_nid_range(start, end, &this_nid); 720 memblock_merge_regions(type);
572 if (this_nid == nid) { 721 return 0;
573 phys_addr_t ret = memblock_find_region(start, this_end, size, align); 722}
574 if (ret != MEMBLOCK_ERROR && 723#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
575 !memblock_add_region(&memblock.reserved, ret, size)) 724
576 return ret; 725static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size,
577 } 726 phys_addr_t align, phys_addr_t max_addr,
578 start = this_end; 727 int nid)
579 } 728{
729 phys_addr_t found;
580 730
581 return MEMBLOCK_ERROR; 731 found = memblock_find_in_range_node(0, max_addr, size, align, nid);
732 if (found && !memblock_reserve(found, size))
733 return found;
734
735 return 0;
582} 736}
583 737
584phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid) 738phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
585{ 739{
586 struct memblock_type *mem = &memblock.memory; 740 return memblock_alloc_base_nid(size, align, MEMBLOCK_ALLOC_ACCESSIBLE, nid);
587 int i; 741}
588 742
589 BUG_ON(0 == size); 743phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
744{
745 return memblock_alloc_base_nid(size, align, max_addr, MAX_NUMNODES);
746}
590 747
591 /* We align the size to limit fragmentation. Without this, a lot of 748phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
592 * small allocs quickly eat up the whole reserve array on sparc 749{
593 */ 750 phys_addr_t alloc;
594 size = memblock_align_up(size, align);
595 751
596 /* We do a bottom-up search for a region with the right 752 alloc = __memblock_alloc_base(size, align, max_addr);
597 * nid since that's easier considering how memblock_nid_range()
598 * works
599 */
600 for (i = 0; i < mem->cnt; i++) {
601 phys_addr_t ret = memblock_alloc_nid_region(&mem->regions[i],
602 size, align, nid);
603 if (ret != MEMBLOCK_ERROR)
604 return ret;
605 }
606 753
607 return 0; 754 if (alloc == 0)
755 panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
756 (unsigned long long) size, (unsigned long long) max_addr);
757
758 return alloc;
759}
760
761phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
762{
763 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
608} 764}
609 765
610phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid) 766phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
@@ -613,7 +769,7 @@ phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, i
613 769
614 if (res) 770 if (res)
615 return res; 771 return res;
616 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE); 772 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
617} 773}
618 774
619 775
@@ -621,10 +777,9 @@ phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, i
621 * Remaining API functions 777 * Remaining API functions
622 */ 778 */
623 779
624/* You must call memblock_analyze() before this. */
625phys_addr_t __init memblock_phys_mem_size(void) 780phys_addr_t __init memblock_phys_mem_size(void)
626{ 781{
627 return memblock.memory_size; 782 return memblock.memory.total_size;
628} 783}
629 784
630/* lowest address */ 785/* lowest address */
@@ -640,45 +795,28 @@ phys_addr_t __init_memblock memblock_end_of_DRAM(void)
640 return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size); 795 return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
641} 796}
642 797
643/* You must call memblock_analyze() after this. */ 798void __init memblock_enforce_memory_limit(phys_addr_t limit)
644void __init memblock_enforce_memory_limit(phys_addr_t memory_limit)
645{ 799{
646 unsigned long i; 800 unsigned long i;
647 phys_addr_t limit; 801 phys_addr_t max_addr = (phys_addr_t)ULLONG_MAX;
648 struct memblock_region *p;
649 802
650 if (!memory_limit) 803 if (!limit)
651 return; 804 return;
652 805
653 /* Truncate the memblock regions to satisfy the memory limit. */ 806 /* find out max address */
654 limit = memory_limit;
655 for (i = 0; i < memblock.memory.cnt; i++) { 807 for (i = 0; i < memblock.memory.cnt; i++) {
656 if (limit > memblock.memory.regions[i].size) { 808 struct memblock_region *r = &memblock.memory.regions[i];
657 limit -= memblock.memory.regions[i].size;
658 continue;
659 }
660
661 memblock.memory.regions[i].size = limit;
662 memblock.memory.cnt = i + 1;
663 break;
664 }
665
666 memory_limit = memblock_end_of_DRAM();
667 809
668 /* And truncate any reserves above the limit also. */ 810 if (limit <= r->size) {
669 for (i = 0; i < memblock.reserved.cnt; i++) { 811 max_addr = r->base + limit;
670 p = &memblock.reserved.regions[i]; 812 break;
671
672 if (p->base > memory_limit)
673 p->size = 0;
674 else if ((p->base + p->size) > memory_limit)
675 p->size = memory_limit - p->base;
676
677 if (p->size == 0) {
678 memblock_remove_region(&memblock.reserved, i);
679 i--;
680 } 813 }
814 limit -= r->size;
681 } 815 }
816
817 /* truncate both memory and reserved regions */
818 __memblock_remove(&memblock.memory, max_addr, (phys_addr_t)ULLONG_MAX);
819 __memblock_remove(&memblock.reserved, max_addr, (phys_addr_t)ULLONG_MAX);
682} 820}
683 821
684static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr) 822static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
@@ -712,16 +850,18 @@ int __init_memblock memblock_is_memory(phys_addr_t addr)
712int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size) 850int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
713{ 851{
714 int idx = memblock_search(&memblock.memory, base); 852 int idx = memblock_search(&memblock.memory, base);
853 phys_addr_t end = base + memblock_cap_size(base, &size);
715 854
716 if (idx == -1) 855 if (idx == -1)
717 return 0; 856 return 0;
718 return memblock.memory.regions[idx].base <= base && 857 return memblock.memory.regions[idx].base <= base &&
719 (memblock.memory.regions[idx].base + 858 (memblock.memory.regions[idx].base +
720 memblock.memory.regions[idx].size) >= (base + size); 859 memblock.memory.regions[idx].size) >= end;
721} 860}
722 861
723int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) 862int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
724{ 863{
864 memblock_cap_size(base, &size);
725 return memblock_overlaps_region(&memblock.reserved, base, size) >= 0; 865 return memblock_overlaps_region(&memblock.reserved, base, size) >= 0;
726} 866}
727 867
@@ -731,86 +871,45 @@ void __init_memblock memblock_set_current_limit(phys_addr_t limit)
731 memblock.current_limit = limit; 871 memblock.current_limit = limit;
732} 872}
733 873
734static void __init_memblock memblock_dump(struct memblock_type *region, char *name) 874static void __init_memblock memblock_dump(struct memblock_type *type, char *name)
735{ 875{
736 unsigned long long base, size; 876 unsigned long long base, size;
737 int i; 877 int i;
738 878
739 pr_info(" %s.cnt = 0x%lx\n", name, region->cnt); 879 pr_info(" %s.cnt = 0x%lx\n", name, type->cnt);
740 880
741 for (i = 0; i < region->cnt; i++) { 881 for (i = 0; i < type->cnt; i++) {
742 base = region->regions[i].base; 882 struct memblock_region *rgn = &type->regions[i];
743 size = region->regions[i].size; 883 char nid_buf[32] = "";
744 884
745 pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes\n", 885 base = rgn->base;
746 name, i, base, base + size - 1, size); 886 size = rgn->size;
887#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
888 if (memblock_get_region_node(rgn) != MAX_NUMNODES)
889 snprintf(nid_buf, sizeof(nid_buf), " on node %d",
890 memblock_get_region_node(rgn));
891#endif
892 pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes%s\n",
893 name, i, base, base + size - 1, size, nid_buf);
747 } 894 }
748} 895}
749 896
750void __init_memblock memblock_dump_all(void) 897void __init_memblock __memblock_dump_all(void)
751{ 898{
752 if (!memblock_debug)
753 return;
754
755 pr_info("MEMBLOCK configuration:\n"); 899 pr_info("MEMBLOCK configuration:\n");
756 pr_info(" memory size = 0x%llx\n", (unsigned long long)memblock.memory_size); 900 pr_info(" memory size = %#llx reserved size = %#llx\n",
901 (unsigned long long)memblock.memory.total_size,
902 (unsigned long long)memblock.reserved.total_size);
757 903
758 memblock_dump(&memblock.memory, "memory"); 904 memblock_dump(&memblock.memory, "memory");
759 memblock_dump(&memblock.reserved, "reserved"); 905 memblock_dump(&memblock.reserved, "reserved");
760} 906}
761 907
762void __init memblock_analyze(void) 908void __init memblock_allow_resize(void)
763{ 909{
764 int i;
765
766 /* Check marker in the unused last array entry */
767 WARN_ON(memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS].base
768 != MEMBLOCK_INACTIVE);
769 WARN_ON(memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS].base
770 != MEMBLOCK_INACTIVE);
771
772 memblock.memory_size = 0;
773
774 for (i = 0; i < memblock.memory.cnt; i++)
775 memblock.memory_size += memblock.memory.regions[i].size;
776
777 /* We allow resizing from there */
778 memblock_can_resize = 1; 910 memblock_can_resize = 1;
779} 911}
780 912
781void __init memblock_init(void)
782{
783 static int init_done __initdata = 0;
784
785 if (init_done)
786 return;
787 init_done = 1;
788
789 /* Hookup the initial arrays */
790 memblock.memory.regions = memblock_memory_init_regions;
791 memblock.memory.max = INIT_MEMBLOCK_REGIONS;
792 memblock.reserved.regions = memblock_reserved_init_regions;
793 memblock.reserved.max = INIT_MEMBLOCK_REGIONS;
794
795 /* Write a marker in the unused last array entry */
796 memblock.memory.regions[INIT_MEMBLOCK_REGIONS].base = MEMBLOCK_INACTIVE;
797 memblock.reserved.regions[INIT_MEMBLOCK_REGIONS].base = MEMBLOCK_INACTIVE;
798
799 /* Create a dummy zero size MEMBLOCK which will get coalesced away later.
800 * This simplifies the memblock_add() code below...
801 */
802 memblock.memory.regions[0].base = 0;
803 memblock.memory.regions[0].size = 0;
804 memblock.memory.cnt = 1;
805
806 /* Ditto. */
807 memblock.reserved.regions[0].base = 0;
808 memblock.reserved.regions[0].size = 0;
809 memblock.reserved.cnt = 1;
810
811 memblock.current_limit = MEMBLOCK_ALLOC_ANYWHERE;
812}
813
814static int __init early_memblock(char *p) 913static int __init early_memblock(char *p)
815{ 914{
816 if (p && strstr(p, "debug")) 915 if (p && strstr(p, "debug"))
@@ -819,7 +918,7 @@ static int __init early_memblock(char *p)
819} 918}
820early_param("memblock", early_memblock); 919early_param("memblock", early_memblock);
821 920
822#if defined(CONFIG_DEBUG_FS) && !defined(ARCH_DISCARD_MEMBLOCK) 921#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK)
823 922
824static int memblock_debug_show(struct seq_file *m, void *private) 923static int memblock_debug_show(struct seq_file *m, void *private)
825{ 924{