aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memblock.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memblock.c')
-rw-r--r--mm/memblock.c944
1 files changed, 633 insertions, 311 deletions
diff --git a/mm/memblock.c b/mm/memblock.c
index 43840b305ecb..a0562d1a6ad4 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -11,446 +11,634 @@
11 */ 11 */
12 12
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/slab.h>
14#include <linux/init.h> 15#include <linux/init.h>
15#include <linux/bitops.h> 16#include <linux/bitops.h>
17#include <linux/poison.h>
18#include <linux/pfn.h>
19#include <linux/debugfs.h>
20#include <linux/seq_file.h>
16#include <linux/memblock.h> 21#include <linux/memblock.h>
17 22
18#define MEMBLOCK_ALLOC_ANYWHERE 0 23struct memblock memblock __initdata_memblock;
19 24
20struct memblock memblock; 25int memblock_debug __initdata_memblock;
26int memblock_can_resize __initdata_memblock;
27static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS + 1] __initdata_memblock;
28static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS + 1] __initdata_memblock;
21 29
22static int memblock_debug; 30/* inline so we don't get a warning when pr_debug is compiled out */
31static inline const char *memblock_type_name(struct memblock_type *type)
32{
33 if (type == &memblock.memory)
34 return "memory";
35 else if (type == &memblock.reserved)
36 return "reserved";
37 else
38 return "unknown";
39}
23 40
24static int __init early_memblock(char *p) 41/*
42 * Address comparison utilities
43 */
44
45static phys_addr_t __init_memblock memblock_align_down(phys_addr_t addr, phys_addr_t size)
25{ 46{
26 if (p && strstr(p, "debug")) 47 return addr & ~(size - 1);
27 memblock_debug = 1;
28 return 0;
29} 48}
30early_param("memblock", early_memblock);
31 49
32static void memblock_dump(struct memblock_region *region, char *name) 50static phys_addr_t __init_memblock memblock_align_up(phys_addr_t addr, phys_addr_t size)
33{ 51{
34 unsigned long long base, size; 52 return (addr + (size - 1)) & ~(size - 1);
35 int i; 53}
36 54
37 pr_info(" %s.cnt = 0x%lx\n", name, region->cnt); 55static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
56 phys_addr_t base2, phys_addr_t size2)
57{
58 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
59}
38 60
39 for (i = 0; i < region->cnt; i++) { 61long __init_memblock memblock_overlaps_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size)
40 base = region->region[i].base; 62{
41 size = region->region[i].size; 63 unsigned long i;
42 64
43 pr_info(" %s[0x%x]\t0x%016llx - 0x%016llx, 0x%llx bytes\n", 65 for (i = 0; i < type->cnt; i++) {
44 name, i, base, base + size - 1, size); 66 phys_addr_t rgnbase = type->regions[i].base;
67 phys_addr_t rgnsize = type->regions[i].size;
68 if (memblock_addrs_overlap(base, size, rgnbase, rgnsize))
69 break;
45 } 70 }
71
72 return (i < type->cnt) ? i : -1;
46} 73}
47 74
48void memblock_dump_all(void) 75/*
76 * Find, allocate, deallocate or reserve unreserved regions. All allocations
77 * are top-down.
78 */
79
80static phys_addr_t __init_memblock memblock_find_region(phys_addr_t start, phys_addr_t end,
81 phys_addr_t size, phys_addr_t align)
49{ 82{
50 if (!memblock_debug) 83 phys_addr_t base, res_base;
51 return; 84 long j;
52 85
53 pr_info("MEMBLOCK configuration:\n"); 86 /* In case, huge size is requested */
54 pr_info(" rmo_size = 0x%llx\n", (unsigned long long)memblock.rmo_size); 87 if (end < size)
55 pr_info(" memory.size = 0x%llx\n", (unsigned long long)memblock.memory.size); 88 return MEMBLOCK_ERROR;
56 89
57 memblock_dump(&memblock.memory, "memory"); 90 base = memblock_align_down((end - size), align);
58 memblock_dump(&memblock.reserved, "reserved"); 91
92 /* Prevent allocations returning 0 as it's also used to
93 * indicate an allocation failure
94 */
95 if (start == 0)
96 start = PAGE_SIZE;
97
98 while (start <= base) {
99 j = memblock_overlaps_region(&memblock.reserved, base, size);
100 if (j < 0)
101 return base;
102 res_base = memblock.reserved.regions[j].base;
103 if (res_base < size)
104 break;
105 base = memblock_align_down(res_base - size, align);
106 }
107
108 return MEMBLOCK_ERROR;
59} 109}
60 110
61static unsigned long memblock_addrs_overlap(u64 base1, u64 size1, u64 base2, 111static phys_addr_t __init_memblock memblock_find_base(phys_addr_t size,
62 u64 size2) 112 phys_addr_t align, phys_addr_t start, phys_addr_t end)
63{ 113{
64 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); 114 long i;
115
116 BUG_ON(0 == size);
117
118 /* Pump up max_addr */
119 if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
120 end = memblock.current_limit;
121
122 /* We do a top-down search, this tends to limit memory
123 * fragmentation by keeping early boot allocs near the
124 * top of memory
125 */
126 for (i = memblock.memory.cnt - 1; i >= 0; i--) {
127 phys_addr_t memblockbase = memblock.memory.regions[i].base;
128 phys_addr_t memblocksize = memblock.memory.regions[i].size;
129 phys_addr_t bottom, top, found;
130
131 if (memblocksize < size)
132 continue;
133 if ((memblockbase + memblocksize) <= start)
134 break;
135 bottom = max(memblockbase, start);
136 top = min(memblockbase + memblocksize, end);
137 if (bottom >= top)
138 continue;
139 found = memblock_find_region(bottom, top, size, align);
140 if (found != MEMBLOCK_ERROR)
141 return found;
142 }
143 return MEMBLOCK_ERROR;
65} 144}
66 145
67static long memblock_addrs_adjacent(u64 base1, u64 size1, u64 base2, u64 size2) 146/*
147 * Find a free area with specified alignment in a specific range.
148 */
149u64 __init_memblock memblock_find_in_range(u64 start, u64 end, u64 size, u64 align)
68{ 150{
69 if (base2 == base1 + size1) 151 return memblock_find_base(size, align, start, end);
70 return 1; 152}
71 else if (base1 == base2 + size2)
72 return -1;
73 153
74 return 0; 154/*
155 * Free memblock.reserved.regions
156 */
157int __init_memblock memblock_free_reserved_regions(void)
158{
159 if (memblock.reserved.regions == memblock_reserved_init_regions)
160 return 0;
161
162 return memblock_free(__pa(memblock.reserved.regions),
163 sizeof(struct memblock_region) * memblock.reserved.max);
75} 164}
76 165
77static long memblock_regions_adjacent(struct memblock_region *rgn, 166/*
78 unsigned long r1, unsigned long r2) 167 * Reserve memblock.reserved.regions
168 */
169int __init_memblock memblock_reserve_reserved_regions(void)
79{ 170{
80 u64 base1 = rgn->region[r1].base; 171 if (memblock.reserved.regions == memblock_reserved_init_regions)
81 u64 size1 = rgn->region[r1].size; 172 return 0;
82 u64 base2 = rgn->region[r2].base;
83 u64 size2 = rgn->region[r2].size;
84 173
85 return memblock_addrs_adjacent(base1, size1, base2, size2); 174 return memblock_reserve(__pa(memblock.reserved.regions),
175 sizeof(struct memblock_region) * memblock.reserved.max);
86} 176}
87 177
88static void memblock_remove_region(struct memblock_region *rgn, unsigned long r) 178static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
89{ 179{
90 unsigned long i; 180 unsigned long i;
91 181
92 for (i = r; i < rgn->cnt - 1; i++) { 182 for (i = r; i < type->cnt - 1; i++) {
93 rgn->region[i].base = rgn->region[i + 1].base; 183 type->regions[i].base = type->regions[i + 1].base;
94 rgn->region[i].size = rgn->region[i + 1].size; 184 type->regions[i].size = type->regions[i + 1].size;
95 } 185 }
96 rgn->cnt--; 186 type->cnt--;
97}
98 187
99/* Assumption: base addr of region 1 < base addr of region 2 */ 188 /* Special case for empty arrays */
100static void memblock_coalesce_regions(struct memblock_region *rgn, 189 if (type->cnt == 0) {
101 unsigned long r1, unsigned long r2) 190 type->cnt = 1;
102{ 191 type->regions[0].base = 0;
103 rgn->region[r1].size += rgn->region[r2].size; 192 type->regions[0].size = 0;
104 memblock_remove_region(rgn, r2); 193 }
105} 194}
106 195
107void __init memblock_init(void) 196/* Defined below but needed now */
197static long memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size);
198
199static int __init_memblock memblock_double_array(struct memblock_type *type)
108{ 200{
109 /* Create a dummy zero size MEMBLOCK which will get coalesced away later. 201 struct memblock_region *new_array, *old_array;
110 * This simplifies the memblock_add() code below... 202 phys_addr_t old_size, new_size, addr;
203 int use_slab = slab_is_available();
204
205 /* We don't allow resizing until we know about the reserved regions
206 * of memory that aren't suitable for allocation
111 */ 207 */
112 memblock.memory.region[0].base = 0; 208 if (!memblock_can_resize)
113 memblock.memory.region[0].size = 0; 209 return -1;
114 memblock.memory.cnt = 1;
115 210
116 /* Ditto. */ 211 /* Calculate new doubled size */
117 memblock.reserved.region[0].base = 0; 212 old_size = type->max * sizeof(struct memblock_region);
118 memblock.reserved.region[0].size = 0; 213 new_size = old_size << 1;
119 memblock.reserved.cnt = 1; 214
120} 215 /* Try to find some space for it.
216 *
217 * WARNING: We assume that either slab_is_available() and we use it or
218 * we use MEMBLOCK for allocations. That means that this is unsafe to use
219 * when bootmem is currently active (unless bootmem itself is implemented
220 * on top of MEMBLOCK which isn't the case yet)
221 *
222 * This should however not be an issue for now, as we currently only
223 * call into MEMBLOCK while it's still active, or much later when slab is
224 * active for memory hotplug operations
225 */
226 if (use_slab) {
227 new_array = kmalloc(new_size, GFP_KERNEL);
228 addr = new_array == NULL ? MEMBLOCK_ERROR : __pa(new_array);
229 } else
230 addr = memblock_find_base(new_size, sizeof(phys_addr_t), 0, MEMBLOCK_ALLOC_ACCESSIBLE);
231 if (addr == MEMBLOCK_ERROR) {
232 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
233 memblock_type_name(type), type->max, type->max * 2);
234 return -1;
235 }
236 new_array = __va(addr);
121 237
122void __init memblock_analyze(void) 238 memblock_dbg("memblock: %s array is doubled to %ld at [%#010llx-%#010llx]",
123{ 239 memblock_type_name(type), type->max * 2, (u64)addr, (u64)addr + new_size - 1);
124 int i;
125 240
126 memblock.memory.size = 0; 241 /* Found space, we now need to move the array over before
242 * we add the reserved region since it may be our reserved
243 * array itself that is full.
244 */
245 memcpy(new_array, type->regions, old_size);
246 memset(new_array + type->max, 0, old_size);
247 old_array = type->regions;
248 type->regions = new_array;
249 type->max <<= 1;
250
251 /* If we use SLAB that's it, we are done */
252 if (use_slab)
253 return 0;
127 254
128 for (i = 0; i < memblock.memory.cnt; i++) 255 /* Add the new reserved region now. Should not fail ! */
129 memblock.memory.size += memblock.memory.region[i].size; 256 BUG_ON(memblock_add_region(&memblock.reserved, addr, new_size));
257
258 /* If the array wasn't our static init one, then free it. We only do
259 * that before SLAB is available as later on, we don't know whether
260 * to use kfree or free_bootmem_pages(). Shouldn't be a big deal
261 * anyways
262 */
263 if (old_array != memblock_memory_init_regions &&
264 old_array != memblock_reserved_init_regions)
265 memblock_free(__pa(old_array), old_size);
266
267 return 0;
268}
269
270extern int __init_memblock __weak memblock_memory_can_coalesce(phys_addr_t addr1, phys_addr_t size1,
271 phys_addr_t addr2, phys_addr_t size2)
272{
273 return 1;
130} 274}
131 275
132static long memblock_add_region(struct memblock_region *rgn, u64 base, u64 size) 276static long __init_memblock memblock_add_region(struct memblock_type *type,
277 phys_addr_t base, phys_addr_t size)
133{ 278{
134 unsigned long coalesced = 0; 279 phys_addr_t end = base + size;
135 long adjacent, i; 280 int i, slot = -1;
136 281
137 if ((rgn->cnt == 1) && (rgn->region[0].size == 0)) { 282 /* First try and coalesce this MEMBLOCK with others */
138 rgn->region[0].base = base; 283 for (i = 0; i < type->cnt; i++) {
139 rgn->region[0].size = size; 284 struct memblock_region *rgn = &type->regions[i];
140 return 0; 285 phys_addr_t rend = rgn->base + rgn->size;
141 }
142 286
143 /* First try and coalesce this MEMBLOCK with another. */ 287 /* Exit if there's no possible hits */
144 for (i = 0; i < rgn->cnt; i++) { 288 if (rgn->base > end || rgn->size == 0)
145 u64 rgnbase = rgn->region[i].base; 289 break;
146 u64 rgnsize = rgn->region[i].size;
147 290
148 if ((rgnbase == base) && (rgnsize == size)) 291 /* Check if we are fully enclosed within an existing
149 /* Already have this region, so we're done */ 292 * block
293 */
294 if (rgn->base <= base && rend >= end)
150 return 0; 295 return 0;
151 296
152 adjacent = memblock_addrs_adjacent(base, size, rgnbase, rgnsize); 297 /* Check if we overlap or are adjacent with the bottom
153 if (adjacent > 0) { 298 * of a block.
154 rgn->region[i].base -= size; 299 */
155 rgn->region[i].size += size; 300 if (base < rgn->base && end >= rgn->base) {
156 coalesced++; 301 /* If we can't coalesce, create a new block */
157 break; 302 if (!memblock_memory_can_coalesce(base, size,
158 } else if (adjacent < 0) { 303 rgn->base,
159 rgn->region[i].size += size; 304 rgn->size)) {
160 coalesced++; 305 /* Overlap & can't coalesce are mutually
161 break; 306 * exclusive, if you do that, be prepared
307 * for trouble
308 */
309 WARN_ON(end != rgn->base);
310 goto new_block;
311 }
312 /* We extend the bottom of the block down to our
313 * base
314 */
315 rgn->base = base;
316 rgn->size = rend - base;
317
318 /* Return if we have nothing else to allocate
319 * (fully coalesced)
320 */
321 if (rend >= end)
322 return 0;
323
324 /* We continue processing from the end of the
325 * coalesced block.
326 */
327 base = rend;
328 size = end - base;
329 }
330
331 /* Now check if we overlap or are adjacent with the
332 * top of a block
333 */
334 if (base <= rend && end >= rend) {
335 /* If we can't coalesce, create a new block */
336 if (!memblock_memory_can_coalesce(rgn->base,
337 rgn->size,
338 base, size)) {
339 /* Overlap & can't coalesce are mutually
340 * exclusive, if you do that, be prepared
341 * for trouble
342 */
343 WARN_ON(rend != base);
344 goto new_block;
345 }
346 /* We adjust our base down to enclose the
347 * original block and destroy it. It will be
348 * part of our new allocation. Since we've
349 * freed an entry, we know we won't fail
350 * to allocate one later, so we won't risk
351 * losing the original block allocation.
352 */
353 size += (base - rgn->base);
354 base = rgn->base;
355 memblock_remove_region(type, i--);
162 } 356 }
163 } 357 }
164 358
165 if ((i < rgn->cnt - 1) && memblock_regions_adjacent(rgn, i, i+1)) { 359 /* If the array is empty, special case, replace the fake
166 memblock_coalesce_regions(rgn, i, i+1); 360 * filler region and return
167 coalesced++; 361 */
362 if ((type->cnt == 1) && (type->regions[0].size == 0)) {
363 type->regions[0].base = base;
364 type->regions[0].size = size;
365 return 0;
168 } 366 }
169 367
170 if (coalesced) 368 new_block:
171 return coalesced; 369 /* If we are out of space, we fail. It's too late to resize the array
172 if (rgn->cnt >= MAX_MEMBLOCK_REGIONS) 370 * but then this shouldn't have happened in the first place.
371 */
372 if (WARN_ON(type->cnt >= type->max))
173 return -1; 373 return -1;
174 374
175 /* Couldn't coalesce the MEMBLOCK, so add it to the sorted table. */ 375 /* Couldn't coalesce the MEMBLOCK, so add it to the sorted table. */
176 for (i = rgn->cnt - 1; i >= 0; i--) { 376 for (i = type->cnt - 1; i >= 0; i--) {
177 if (base < rgn->region[i].base) { 377 if (base < type->regions[i].base) {
178 rgn->region[i+1].base = rgn->region[i].base; 378 type->regions[i+1].base = type->regions[i].base;
179 rgn->region[i+1].size = rgn->region[i].size; 379 type->regions[i+1].size = type->regions[i].size;
180 } else { 380 } else {
181 rgn->region[i+1].base = base; 381 type->regions[i+1].base = base;
182 rgn->region[i+1].size = size; 382 type->regions[i+1].size = size;
383 slot = i + 1;
183 break; 384 break;
184 } 385 }
185 } 386 }
387 if (base < type->regions[0].base) {
388 type->regions[0].base = base;
389 type->regions[0].size = size;
390 slot = 0;
391 }
392 type->cnt++;
186 393
187 if (base < rgn->region[0].base) { 394 /* The array is full ? Try to resize it. If that fails, we undo
188 rgn->region[0].base = base; 395 * our allocation and return an error
189 rgn->region[0].size = size; 396 */
397 if (type->cnt == type->max && memblock_double_array(type)) {
398 BUG_ON(slot < 0);
399 memblock_remove_region(type, slot);
400 return -1;
190 } 401 }
191 rgn->cnt++;
192 402
193 return 0; 403 return 0;
194} 404}
195 405
196long memblock_add(u64 base, u64 size) 406long __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
197{ 407{
198 struct memblock_region *_rgn = &memblock.memory; 408 return memblock_add_region(&memblock.memory, base, size);
199
200 /* On pSeries LPAR systems, the first MEMBLOCK is our RMO region. */
201 if (base == 0)
202 memblock.rmo_size = size;
203
204 return memblock_add_region(_rgn, base, size);
205 409
206} 410}
207 411
208static long __memblock_remove(struct memblock_region *rgn, u64 base, u64 size) 412static long __init_memblock __memblock_remove(struct memblock_type *type,
413 phys_addr_t base, phys_addr_t size)
209{ 414{
210 u64 rgnbegin, rgnend; 415 phys_addr_t end = base + size;
211 u64 end = base + size;
212 int i; 416 int i;
213 417
214 rgnbegin = rgnend = 0; /* supress gcc warnings */ 418 /* Walk through the array for collisions */
215 419 for (i = 0; i < type->cnt; i++) {
216 /* Find the region where (base, size) belongs to */ 420 struct memblock_region *rgn = &type->regions[i];
217 for (i=0; i < rgn->cnt; i++) { 421 phys_addr_t rend = rgn->base + rgn->size;
218 rgnbegin = rgn->region[i].base;
219 rgnend = rgnbegin + rgn->region[i].size;
220 422
221 if ((rgnbegin <= base) && (end <= rgnend)) 423 /* Nothing more to do, exit */
424 if (rgn->base > end || rgn->size == 0)
222 break; 425 break;
223 }
224 426
225 /* Didn't find the region */ 427 /* If we fully enclose the block, drop it */
226 if (i == rgn->cnt) 428 if (base <= rgn->base && end >= rend) {
227 return -1; 429 memblock_remove_region(type, i--);
430 continue;
431 }
228 432
229 /* Check to see if we are removing entire region */ 433 /* If we are fully enclosed within a block
230 if ((rgnbegin == base) && (rgnend == end)) { 434 * then we need to split it and we are done
231 memblock_remove_region(rgn, i); 435 */
232 return 0; 436 if (base > rgn->base && end < rend) {
233 } 437 rgn->size = base - rgn->base;
438 if (!memblock_add_region(type, end, rend - end))
439 return 0;
440 /* Failure to split is bad, we at least
441 * restore the block before erroring
442 */
443 rgn->size = rend - rgn->base;
444 WARN_ON(1);
445 return -1;
446 }
234 447
235 /* Check to see if region is matching at the front */ 448 /* Check if we need to trim the bottom of a block */
236 if (rgnbegin == base) { 449 if (rgn->base < end && rend > end) {
237 rgn->region[i].base = end; 450 rgn->size -= end - rgn->base;
238 rgn->region[i].size -= size; 451 rgn->base = end;
239 return 0; 452 break;
240 } 453 }
241 454
242 /* Check to see if the region is matching at the end */ 455 /* And check if we need to trim the top of a block */
243 if (rgnend == end) { 456 if (base < rend)
244 rgn->region[i].size -= size; 457 rgn->size -= rend - base;
245 return 0;
246 }
247 458
248 /* 459 }
249 * We need to split the entry - adjust the current one to the 460 return 0;
250 * beginging of the hole and add the region after hole.
251 */
252 rgn->region[i].size = base - rgn->region[i].base;
253 return memblock_add_region(rgn, end, rgnend - end);
254} 461}
255 462
256long memblock_remove(u64 base, u64 size) 463long __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
257{ 464{
258 return __memblock_remove(&memblock.memory, base, size); 465 return __memblock_remove(&memblock.memory, base, size);
259} 466}
260 467
261long __init memblock_free(u64 base, u64 size) 468long __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
262{ 469{
263 return __memblock_remove(&memblock.reserved, base, size); 470 return __memblock_remove(&memblock.reserved, base, size);
264} 471}
265 472
266long __init memblock_reserve(u64 base, u64 size) 473long __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
267{ 474{
268 struct memblock_region *_rgn = &memblock.reserved; 475 struct memblock_type *_rgn = &memblock.reserved;
269 476
270 BUG_ON(0 == size); 477 BUG_ON(0 == size);
271 478
272 return memblock_add_region(_rgn, base, size); 479 return memblock_add_region(_rgn, base, size);
273} 480}
274 481
275long memblock_overlaps_region(struct memblock_region *rgn, u64 base, u64 size) 482phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
276{ 483{
277 unsigned long i; 484 phys_addr_t found;
278 485
279 for (i = 0; i < rgn->cnt; i++) { 486 /* We align the size to limit fragmentation. Without this, a lot of
280 u64 rgnbase = rgn->region[i].base; 487 * small allocs quickly eat up the whole reserve array on sparc
281 u64 rgnsize = rgn->region[i].size; 488 */
282 if (memblock_addrs_overlap(base, size, rgnbase, rgnsize)) 489 size = memblock_align_up(size, align);
283 break;
284 }
285 490
286 return (i < rgn->cnt) ? i : -1; 491 found = memblock_find_base(size, align, 0, max_addr);
492 if (found != MEMBLOCK_ERROR &&
493 !memblock_add_region(&memblock.reserved, found, size))
494 return found;
495
496 return 0;
287} 497}
288 498
289static u64 memblock_align_down(u64 addr, u64 size) 499phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
290{ 500{
291 return addr & ~(size - 1); 501 phys_addr_t alloc;
502
503 alloc = __memblock_alloc_base(size, align, max_addr);
504
505 if (alloc == 0)
506 panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
507 (unsigned long long) size, (unsigned long long) max_addr);
508
509 return alloc;
292} 510}
293 511
294static u64 memblock_align_up(u64 addr, u64 size) 512phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
295{ 513{
296 return (addr + (size - 1)) & ~(size - 1); 514 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
297} 515}
298 516
299static u64 __init memblock_alloc_nid_unreserved(u64 start, u64 end, 517
300 u64 size, u64 align) 518/*
519 * Additional node-local allocators. Search for node memory is bottom up
520 * and walks memblock regions within that node bottom-up as well, but allocation
521 * within an memblock region is top-down. XXX I plan to fix that at some stage
522 *
523 * WARNING: Only available after early_node_map[] has been populated,
524 * on some architectures, that is after all the calls to add_active_range()
525 * have been done to populate it.
526 */
527
528phys_addr_t __weak __init memblock_nid_range(phys_addr_t start, phys_addr_t end, int *nid)
301{ 529{
302 u64 base, res_base; 530#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
303 long j; 531 /*
532 * This code originates from sparc which really wants use to walk by addresses
533 * and returns the nid. This is not very convenient for early_pfn_map[] users
534 * as the map isn't sorted yet, and it really wants to be walked by nid.
535 *
536 * For now, I implement the inefficient method below which walks the early
537 * map multiple times. Eventually we may want to use an ARCH config option
538 * to implement a completely different method for both case.
539 */
540 unsigned long start_pfn, end_pfn;
541 int i;
304 542
305 base = memblock_align_down((end - size), align); 543 for (i = 0; i < MAX_NUMNODES; i++) {
306 while (start <= base) { 544 get_pfn_range_for_nid(i, &start_pfn, &end_pfn);
307 j = memblock_overlaps_region(&memblock.reserved, base, size); 545 if (start < PFN_PHYS(start_pfn) || start >= PFN_PHYS(end_pfn))
308 if (j < 0) { 546 continue;
309 /* this area isn't reserved, take it */ 547 *nid = i;
310 if (memblock_add_region(&memblock.reserved, base, size) < 0) 548 return min(end, PFN_PHYS(end_pfn));
311 base = ~(u64)0;
312 return base;
313 }
314 res_base = memblock.reserved.region[j].base;
315 if (res_base < size)
316 break;
317 base = memblock_align_down(res_base - size, align);
318 } 549 }
550#endif
551 *nid = 0;
319 552
320 return ~(u64)0; 553 return end;
321} 554}
322 555
323static u64 __init memblock_alloc_nid_region(struct memblock_property *mp, 556static phys_addr_t __init memblock_alloc_nid_region(struct memblock_region *mp,
324 u64 (*nid_range)(u64, u64, int *), 557 phys_addr_t size,
325 u64 size, u64 align, int nid) 558 phys_addr_t align, int nid)
326{ 559{
327 u64 start, end; 560 phys_addr_t start, end;
328 561
329 start = mp->base; 562 start = mp->base;
330 end = start + mp->size; 563 end = start + mp->size;
331 564
332 start = memblock_align_up(start, align); 565 start = memblock_align_up(start, align);
333 while (start < end) { 566 while (start < end) {
334 u64 this_end; 567 phys_addr_t this_end;
335 int this_nid; 568 int this_nid;
336 569
337 this_end = nid_range(start, end, &this_nid); 570 this_end = memblock_nid_range(start, end, &this_nid);
338 if (this_nid == nid) { 571 if (this_nid == nid) {
339 u64 ret = memblock_alloc_nid_unreserved(start, this_end, 572 phys_addr_t ret = memblock_find_region(start, this_end, size, align);
340 size, align); 573 if (ret != MEMBLOCK_ERROR &&
341 if (ret != ~(u64)0) 574 !memblock_add_region(&memblock.reserved, ret, size))
342 return ret; 575 return ret;
343 } 576 }
344 start = this_end; 577 start = this_end;
345 } 578 }
346 579
347 return ~(u64)0; 580 return MEMBLOCK_ERROR;
348} 581}
349 582
350u64 __init memblock_alloc_nid(u64 size, u64 align, int nid, 583phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
351 u64 (*nid_range)(u64 start, u64 end, int *nid))
352{ 584{
353 struct memblock_region *mem = &memblock.memory; 585 struct memblock_type *mem = &memblock.memory;
354 int i; 586 int i;
355 587
356 BUG_ON(0 == size); 588 BUG_ON(0 == size);
357 589
590 /* We align the size to limit fragmentation. Without this, a lot of
591 * small allocs quickly eat up the whole reserve array on sparc
592 */
358 size = memblock_align_up(size, align); 593 size = memblock_align_up(size, align);
359 594
595 /* We do a bottom-up search for a region with the right
596 * nid since that's easier considering how memblock_nid_range()
597 * works
598 */
360 for (i = 0; i < mem->cnt; i++) { 599 for (i = 0; i < mem->cnt; i++) {
361 u64 ret = memblock_alloc_nid_region(&mem->region[i], 600 phys_addr_t ret = memblock_alloc_nid_region(&mem->regions[i],
362 nid_range,
363 size, align, nid); 601 size, align, nid);
364 if (ret != ~(u64)0) 602 if (ret != MEMBLOCK_ERROR)
365 return ret; 603 return ret;
366 } 604 }
367 605
368 return memblock_alloc(size, align); 606 return 0;
369}
370
371u64 __init memblock_alloc(u64 size, u64 align)
372{
373 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE);
374} 607}
375 608
376u64 __init memblock_alloc_base(u64 size, u64 align, u64 max_addr) 609phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
377{ 610{
378 u64 alloc; 611 phys_addr_t res = memblock_alloc_nid(size, align, nid);
379 612
380 alloc = __memblock_alloc_base(size, align, max_addr); 613 if (res)
381 614 return res;
382 if (alloc == 0) 615 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE);
383 panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
384 (unsigned long long) size, (unsigned long long) max_addr);
385
386 return alloc;
387} 616}
388 617
389u64 __init __memblock_alloc_base(u64 size, u64 align, u64 max_addr)
390{
391 long i, j;
392 u64 base = 0;
393 u64 res_base;
394
395 BUG_ON(0 == size);
396
397 size = memblock_align_up(size, align);
398
399 /* On some platforms, make sure we allocate lowmem */
400 /* Note that MEMBLOCK_REAL_LIMIT may be MEMBLOCK_ALLOC_ANYWHERE */
401 if (max_addr == MEMBLOCK_ALLOC_ANYWHERE)
402 max_addr = MEMBLOCK_REAL_LIMIT;
403 618
404 for (i = memblock.memory.cnt - 1; i >= 0; i--) { 619/*
405 u64 memblockbase = memblock.memory.region[i].base; 620 * Remaining API functions
406 u64 memblocksize = memblock.memory.region[i].size; 621 */
407
408 if (memblocksize < size)
409 continue;
410 if (max_addr == MEMBLOCK_ALLOC_ANYWHERE)
411 base = memblock_align_down(memblockbase + memblocksize - size, align);
412 else if (memblockbase < max_addr) {
413 base = min(memblockbase + memblocksize, max_addr);
414 base = memblock_align_down(base - size, align);
415 } else
416 continue;
417
418 while (base && memblockbase <= base) {
419 j = memblock_overlaps_region(&memblock.reserved, base, size);
420 if (j < 0) {
421 /* this area isn't reserved, take it */
422 if (memblock_add_region(&memblock.reserved, base, size) < 0)
423 return 0;
424 return base;
425 }
426 res_base = memblock.reserved.region[j].base;
427 if (res_base < size)
428 break;
429 base = memblock_align_down(res_base - size, align);
430 }
431 }
432 return 0;
433}
434 622
435/* You must call memblock_analyze() before this. */ 623/* You must call memblock_analyze() before this. */
436u64 __init memblock_phys_mem_size(void) 624phys_addr_t __init memblock_phys_mem_size(void)
437{ 625{
438 return memblock.memory.size; 626 return memblock.memory_size;
439} 627}
440 628
441u64 memblock_end_of_DRAM(void) 629phys_addr_t __init_memblock memblock_end_of_DRAM(void)
442{ 630{
443 int idx = memblock.memory.cnt - 1; 631 int idx = memblock.memory.cnt - 1;
444 632
445 return (memblock.memory.region[idx].base + memblock.memory.region[idx].size); 633 return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
446} 634}
447 635
448/* You must call memblock_analyze() after this. */ 636/* You must call memblock_analyze() after this. */
449void __init memblock_enforce_memory_limit(u64 memory_limit) 637void __init memblock_enforce_memory_limit(phys_addr_t memory_limit)
450{ 638{
451 unsigned long i; 639 unsigned long i;
452 u64 limit; 640 phys_addr_t limit;
453 struct memblock_property *p; 641 struct memblock_region *p;
454 642
455 if (!memory_limit) 643 if (!memory_limit)
456 return; 644 return;
@@ -458,24 +646,21 @@ void __init memblock_enforce_memory_limit(u64 memory_limit)
458 /* Truncate the memblock regions to satisfy the memory limit. */ 646 /* Truncate the memblock regions to satisfy the memory limit. */
459 limit = memory_limit; 647 limit = memory_limit;
460 for (i = 0; i < memblock.memory.cnt; i++) { 648 for (i = 0; i < memblock.memory.cnt; i++) {
461 if (limit > memblock.memory.region[i].size) { 649 if (limit > memblock.memory.regions[i].size) {
462 limit -= memblock.memory.region[i].size; 650 limit -= memblock.memory.regions[i].size;
463 continue; 651 continue;
464 } 652 }
465 653
466 memblock.memory.region[i].size = limit; 654 memblock.memory.regions[i].size = limit;
467 memblock.memory.cnt = i + 1; 655 memblock.memory.cnt = i + 1;
468 break; 656 break;
469 } 657 }
470 658
471 if (memblock.memory.region[0].size < memblock.rmo_size)
472 memblock.rmo_size = memblock.memory.region[0].size;
473
474 memory_limit = memblock_end_of_DRAM(); 659 memory_limit = memblock_end_of_DRAM();
475 660
476 /* And truncate any reserves above the limit also. */ 661 /* And truncate any reserves above the limit also. */
477 for (i = 0; i < memblock.reserved.cnt; i++) { 662 for (i = 0; i < memblock.reserved.cnt; i++) {
478 p = &memblock.reserved.region[i]; 663 p = &memblock.reserved.regions[i];
479 664
480 if (p->base > memory_limit) 665 if (p->base > memory_limit)
481 p->size = 0; 666 p->size = 0;
@@ -489,53 +674,190 @@ void __init memblock_enforce_memory_limit(u64 memory_limit)
489 } 674 }
490} 675}
491 676
492int __init memblock_is_reserved(u64 addr) 677static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
678{
679 unsigned int left = 0, right = type->cnt;
680
681 do {
682 unsigned int mid = (right + left) / 2;
683
684 if (addr < type->regions[mid].base)
685 right = mid;
686 else if (addr >= (type->regions[mid].base +
687 type->regions[mid].size))
688 left = mid + 1;
689 else
690 return mid;
691 } while (left < right);
692 return -1;
693}
694
695int __init memblock_is_reserved(phys_addr_t addr)
696{
697 return memblock_search(&memblock.reserved, addr) != -1;
698}
699
700int __init_memblock memblock_is_memory(phys_addr_t addr)
701{
702 return memblock_search(&memblock.memory, addr) != -1;
703}
704
705int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
706{
707 int idx = memblock_search(&memblock.memory, base);
708
709 if (idx == -1)
710 return 0;
711 return memblock.memory.regions[idx].base <= base &&
712 (memblock.memory.regions[idx].base +
713 memblock.memory.regions[idx].size) >= (base + size);
714}
715
716int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
717{
718 return memblock_overlaps_region(&memblock.reserved, base, size) >= 0;
719}
720
721
722void __init_memblock memblock_set_current_limit(phys_addr_t limit)
723{
724 memblock.current_limit = limit;
725}
726
727static void __init_memblock memblock_dump(struct memblock_type *region, char *name)
493{ 728{
729 unsigned long long base, size;
494 int i; 730 int i;
495 731
496 for (i = 0; i < memblock.reserved.cnt; i++) { 732 pr_info(" %s.cnt = 0x%lx\n", name, region->cnt);
497 u64 upper = memblock.reserved.region[i].base + 733
498 memblock.reserved.region[i].size - 1; 734 for (i = 0; i < region->cnt; i++) {
499 if ((addr >= memblock.reserved.region[i].base) && (addr <= upper)) 735 base = region->regions[i].base;
500 return 1; 736 size = region->regions[i].size;
737
738 pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes\n",
739 name, i, base, base + size - 1, size);
501 } 740 }
502 return 0;
503} 741}
504 742
505int memblock_is_region_reserved(u64 base, u64 size) 743void __init_memblock memblock_dump_all(void)
506{ 744{
507 return memblock_overlaps_region(&memblock.reserved, base, size) >= 0; 745 if (!memblock_debug)
746 return;
747
748 pr_info("MEMBLOCK configuration:\n");
749 pr_info(" memory size = 0x%llx\n", (unsigned long long)memblock.memory_size);
750
751 memblock_dump(&memblock.memory, "memory");
752 memblock_dump(&memblock.reserved, "reserved");
508} 753}
509 754
510/* 755void __init memblock_analyze(void)
511 * Given a <base, len>, find which memory regions belong to this range.
512 * Adjust the request and return a contiguous chunk.
513 */
514int memblock_find(struct memblock_property *res)
515{ 756{
516 int i; 757 int i;
517 u64 rstart, rend;
518 758
519 rstart = res->base; 759 /* Check marker in the unused last array entry */
520 rend = rstart + res->size - 1; 760 WARN_ON(memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS].base
761 != (phys_addr_t)RED_INACTIVE);
762 WARN_ON(memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS].base
763 != (phys_addr_t)RED_INACTIVE);
521 764
522 for (i = 0; i < memblock.memory.cnt; i++) { 765 memblock.memory_size = 0;
523 u64 start = memblock.memory.region[i].base;
524 u64 end = start + memblock.memory.region[i].size - 1;
525 766
526 if (start > rend) 767 for (i = 0; i < memblock.memory.cnt; i++)
527 return -1; 768 memblock.memory_size += memblock.memory.regions[i].size;
769
770 /* We allow resizing from there */
771 memblock_can_resize = 1;
772}
773
774void __init memblock_init(void)
775{
776 static int init_done __initdata = 0;
777
778 if (init_done)
779 return;
780 init_done = 1;
781
782 /* Hookup the initial arrays */
783 memblock.memory.regions = memblock_memory_init_regions;
784 memblock.memory.max = INIT_MEMBLOCK_REGIONS;
785 memblock.reserved.regions = memblock_reserved_init_regions;
786 memblock.reserved.max = INIT_MEMBLOCK_REGIONS;
787
788 /* Write a marker in the unused last array entry */
789 memblock.memory.regions[INIT_MEMBLOCK_REGIONS].base = (phys_addr_t)RED_INACTIVE;
790 memblock.reserved.regions[INIT_MEMBLOCK_REGIONS].base = (phys_addr_t)RED_INACTIVE;
791
792 /* Create a dummy zero size MEMBLOCK which will get coalesced away later.
793 * This simplifies the memblock_add() code below...
794 */
795 memblock.memory.regions[0].base = 0;
796 memblock.memory.regions[0].size = 0;
797 memblock.memory.cnt = 1;
798
799 /* Ditto. */
800 memblock.reserved.regions[0].base = 0;
801 memblock.reserved.regions[0].size = 0;
802 memblock.reserved.cnt = 1;
803
804 memblock.current_limit = MEMBLOCK_ALLOC_ANYWHERE;
805}
806
807static int __init early_memblock(char *p)
808{
809 if (p && strstr(p, "debug"))
810 memblock_debug = 1;
811 return 0;
812}
813early_param("memblock", early_memblock);
814
815#if defined(CONFIG_DEBUG_FS) && !defined(ARCH_DISCARD_MEMBLOCK)
816
817static int memblock_debug_show(struct seq_file *m, void *private)
818{
819 struct memblock_type *type = m->private;
820 struct memblock_region *reg;
821 int i;
822
823 for (i = 0; i < type->cnt; i++) {
824 reg = &type->regions[i];
825 seq_printf(m, "%4d: ", i);
826 if (sizeof(phys_addr_t) == 4)
827 seq_printf(m, "0x%08lx..0x%08lx\n",
828 (unsigned long)reg->base,
829 (unsigned long)(reg->base + reg->size - 1));
830 else
831 seq_printf(m, "0x%016llx..0x%016llx\n",
832 (unsigned long long)reg->base,
833 (unsigned long long)(reg->base + reg->size - 1));
528 834
529 if ((end >= rstart) && (start < rend)) {
530 /* adjust the request */
531 if (rstart < start)
532 rstart = start;
533 if (rend > end)
534 rend = end;
535 res->base = rstart;
536 res->size = rend - rstart + 1;
537 return 0;
538 }
539 } 835 }
540 return -1; 836 return 0;
837}
838
839static int memblock_debug_open(struct inode *inode, struct file *file)
840{
841 return single_open(file, memblock_debug_show, inode->i_private);
541} 842}
843
844static const struct file_operations memblock_debug_fops = {
845 .open = memblock_debug_open,
846 .read = seq_read,
847 .llseek = seq_lseek,
848 .release = single_release,
849};
850
851static int __init memblock_init_debugfs(void)
852{
853 struct dentry *root = debugfs_create_dir("memblock", NULL);
854 if (!root)
855 return -ENXIO;
856 debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops);
857 debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops);
858
859 return 0;
860}
861__initcall(memblock_init_debugfs);
862
863#endif /* CONFIG_DEBUG_FS */