diff options
Diffstat (limited to 'mm/memblock.c')
-rw-r--r-- | mm/memblock.c | 970 |
1 files changed, 538 insertions, 432 deletions
diff --git a/mm/memblock.c b/mm/memblock.c index ccbf97339592..2f55f19b7c86 100644 --- a/mm/memblock.c +++ b/mm/memblock.c | |||
@@ -20,12 +20,23 @@ | |||
20 | #include <linux/seq_file.h> | 20 | #include <linux/seq_file.h> |
21 | #include <linux/memblock.h> | 21 | #include <linux/memblock.h> |
22 | 22 | ||
23 | struct memblock memblock __initdata_memblock; | 23 | static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; |
24 | static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; | ||
25 | |||
26 | struct memblock memblock __initdata_memblock = { | ||
27 | .memory.regions = memblock_memory_init_regions, | ||
28 | .memory.cnt = 1, /* empty dummy entry */ | ||
29 | .memory.max = INIT_MEMBLOCK_REGIONS, | ||
30 | |||
31 | .reserved.regions = memblock_reserved_init_regions, | ||
32 | .reserved.cnt = 1, /* empty dummy entry */ | ||
33 | .reserved.max = INIT_MEMBLOCK_REGIONS, | ||
34 | |||
35 | .current_limit = MEMBLOCK_ALLOC_ANYWHERE, | ||
36 | }; | ||
24 | 37 | ||
25 | int memblock_debug __initdata_memblock; | 38 | int memblock_debug __initdata_memblock; |
26 | int memblock_can_resize __initdata_memblock; | 39 | static int memblock_can_resize __initdata_memblock; |
27 | static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS + 1] __initdata_memblock; | ||
28 | static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS + 1] __initdata_memblock; | ||
29 | 40 | ||
30 | /* inline so we don't get a warning when pr_debug is compiled out */ | 41 | /* inline so we don't get a warning when pr_debug is compiled out */ |
31 | static inline const char *memblock_type_name(struct memblock_type *type) | 42 | static inline const char *memblock_type_name(struct memblock_type *type) |
@@ -38,27 +49,23 @@ static inline const char *memblock_type_name(struct memblock_type *type) | |||
38 | return "unknown"; | 49 | return "unknown"; |
39 | } | 50 | } |
40 | 51 | ||
41 | /* | 52 | /* adjust *@size so that (@base + *@size) doesn't overflow, return new size */ |
42 | * Address comparison utilities | 53 | static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size) |
43 | */ | ||
44 | |||
45 | static phys_addr_t __init_memblock memblock_align_down(phys_addr_t addr, phys_addr_t size) | ||
46 | { | ||
47 | return addr & ~(size - 1); | ||
48 | } | ||
49 | |||
50 | static phys_addr_t __init_memblock memblock_align_up(phys_addr_t addr, phys_addr_t size) | ||
51 | { | 54 | { |
52 | return (addr + (size - 1)) & ~(size - 1); | 55 | return *size = min(*size, (phys_addr_t)ULLONG_MAX - base); |
53 | } | 56 | } |
54 | 57 | ||
58 | /* | ||
59 | * Address comparison utilities | ||
60 | */ | ||
55 | static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1, | 61 | static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1, |
56 | phys_addr_t base2, phys_addr_t size2) | 62 | phys_addr_t base2, phys_addr_t size2) |
57 | { | 63 | { |
58 | return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); | 64 | return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); |
59 | } | 65 | } |
60 | 66 | ||
61 | long __init_memblock memblock_overlaps_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size) | 67 | static long __init_memblock memblock_overlaps_region(struct memblock_type *type, |
68 | phys_addr_t base, phys_addr_t size) | ||
62 | { | 69 | { |
63 | unsigned long i; | 70 | unsigned long i; |
64 | 71 | ||
@@ -72,83 +79,66 @@ long __init_memblock memblock_overlaps_region(struct memblock_type *type, phys_a | |||
72 | return (i < type->cnt) ? i : -1; | 79 | return (i < type->cnt) ? i : -1; |
73 | } | 80 | } |
74 | 81 | ||
75 | /* | 82 | /** |
76 | * Find, allocate, deallocate or reserve unreserved regions. All allocations | 83 | * memblock_find_in_range_node - find free area in given range and node |
77 | * are top-down. | 84 | * @start: start of candidate range |
85 | * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} | ||
86 | * @size: size of free area to find | ||
87 | * @align: alignment of free area to find | ||
88 | * @nid: nid of the free area to find, %MAX_NUMNODES for any node | ||
89 | * | ||
90 | * Find @size free area aligned to @align in the specified range and node. | ||
91 | * | ||
92 | * RETURNS: | ||
93 | * Found address on success, %0 on failure. | ||
78 | */ | 94 | */ |
79 | 95 | phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start, | |
80 | static phys_addr_t __init_memblock memblock_find_region(phys_addr_t start, phys_addr_t end, | 96 | phys_addr_t end, phys_addr_t size, |
81 | phys_addr_t size, phys_addr_t align) | 97 | phys_addr_t align, int nid) |
82 | { | 98 | { |
83 | phys_addr_t base, res_base; | 99 | phys_addr_t this_start, this_end, cand; |
84 | long j; | 100 | u64 i; |
85 | |||
86 | /* In case, huge size is requested */ | ||
87 | if (end < size) | ||
88 | return MEMBLOCK_ERROR; | ||
89 | |||
90 | base = memblock_align_down((end - size), align); | ||
91 | 101 | ||
92 | /* Prevent allocations returning 0 as it's also used to | 102 | /* align @size to avoid excessive fragmentation on reserved array */ |
93 | * indicate an allocation failure | 103 | size = round_up(size, align); |
94 | */ | ||
95 | if (start == 0) | ||
96 | start = PAGE_SIZE; | ||
97 | |||
98 | while (start <= base) { | ||
99 | j = memblock_overlaps_region(&memblock.reserved, base, size); | ||
100 | if (j < 0) | ||
101 | return base; | ||
102 | res_base = memblock.reserved.regions[j].base; | ||
103 | if (res_base < size) | ||
104 | break; | ||
105 | base = memblock_align_down(res_base - size, align); | ||
106 | } | ||
107 | 104 | ||
108 | return MEMBLOCK_ERROR; | 105 | /* pump up @end */ |
109 | } | ||
110 | |||
111 | static phys_addr_t __init_memblock memblock_find_base(phys_addr_t size, | ||
112 | phys_addr_t align, phys_addr_t start, phys_addr_t end) | ||
113 | { | ||
114 | long i; | ||
115 | |||
116 | BUG_ON(0 == size); | ||
117 | |||
118 | /* Pump up max_addr */ | ||
119 | if (end == MEMBLOCK_ALLOC_ACCESSIBLE) | 106 | if (end == MEMBLOCK_ALLOC_ACCESSIBLE) |
120 | end = memblock.current_limit; | 107 | end = memblock.current_limit; |
121 | 108 | ||
122 | /* We do a top-down search, this tends to limit memory | 109 | /* adjust @start to avoid underflow and allocating the first page */ |
123 | * fragmentation by keeping early boot allocs near the | 110 | start = max3(start, size, (phys_addr_t)PAGE_SIZE); |
124 | * top of memory | 111 | end = max(start, end); |
125 | */ | ||
126 | for (i = memblock.memory.cnt - 1; i >= 0; i--) { | ||
127 | phys_addr_t memblockbase = memblock.memory.regions[i].base; | ||
128 | phys_addr_t memblocksize = memblock.memory.regions[i].size; | ||
129 | phys_addr_t bottom, top, found; | ||
130 | 112 | ||
131 | if (memblocksize < size) | 113 | for_each_free_mem_range_reverse(i, nid, &this_start, &this_end, NULL) { |
132 | continue; | 114 | this_start = clamp(this_start, start, end); |
133 | if ((memblockbase + memblocksize) <= start) | 115 | this_end = clamp(this_end, start, end); |
134 | break; | 116 | |
135 | bottom = max(memblockbase, start); | 117 | cand = round_down(this_end - size, align); |
136 | top = min(memblockbase + memblocksize, end); | 118 | if (cand >= this_start) |
137 | if (bottom >= top) | 119 | return cand; |
138 | continue; | ||
139 | found = memblock_find_region(bottom, top, size, align); | ||
140 | if (found != MEMBLOCK_ERROR) | ||
141 | return found; | ||
142 | } | 120 | } |
143 | return MEMBLOCK_ERROR; | 121 | return 0; |
144 | } | 122 | } |
145 | 123 | ||
146 | /* | 124 | /** |
147 | * Find a free area with specified alignment in a specific range. | 125 | * memblock_find_in_range - find free area in given range |
126 | * @start: start of candidate range | ||
127 | * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} | ||
128 | * @size: size of free area to find | ||
129 | * @align: alignment of free area to find | ||
130 | * | ||
131 | * Find @size free area aligned to @align in the specified range. | ||
132 | * | ||
133 | * RETURNS: | ||
134 | * Found address on success, %0 on failure. | ||
148 | */ | 135 | */ |
149 | u64 __init_memblock memblock_find_in_range(u64 start, u64 end, u64 size, u64 align) | 136 | phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start, |
137 | phys_addr_t end, phys_addr_t size, | ||
138 | phys_addr_t align) | ||
150 | { | 139 | { |
151 | return memblock_find_base(size, align, start, end); | 140 | return memblock_find_in_range_node(start, end, size, align, |
141 | MAX_NUMNODES); | ||
152 | } | 142 | } |
153 | 143 | ||
154 | /* | 144 | /* |
@@ -177,25 +167,21 @@ int __init_memblock memblock_reserve_reserved_regions(void) | |||
177 | 167 | ||
178 | static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r) | 168 | static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r) |
179 | { | 169 | { |
180 | unsigned long i; | 170 | type->total_size -= type->regions[r].size; |
181 | 171 | memmove(&type->regions[r], &type->regions[r + 1], | |
182 | for (i = r; i < type->cnt - 1; i++) { | 172 | (type->cnt - (r + 1)) * sizeof(type->regions[r])); |
183 | type->regions[i].base = type->regions[i + 1].base; | ||
184 | type->regions[i].size = type->regions[i + 1].size; | ||
185 | } | ||
186 | type->cnt--; | 173 | type->cnt--; |
187 | 174 | ||
188 | /* Special case for empty arrays */ | 175 | /* Special case for empty arrays */ |
189 | if (type->cnt == 0) { | 176 | if (type->cnt == 0) { |
177 | WARN_ON(type->total_size != 0); | ||
190 | type->cnt = 1; | 178 | type->cnt = 1; |
191 | type->regions[0].base = 0; | 179 | type->regions[0].base = 0; |
192 | type->regions[0].size = 0; | 180 | type->regions[0].size = 0; |
181 | memblock_set_region_node(&type->regions[0], MAX_NUMNODES); | ||
193 | } | 182 | } |
194 | } | 183 | } |
195 | 184 | ||
196 | /* Defined below but needed now */ | ||
197 | static long memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size); | ||
198 | |||
199 | static int __init_memblock memblock_double_array(struct memblock_type *type) | 185 | static int __init_memblock memblock_double_array(struct memblock_type *type) |
200 | { | 186 | { |
201 | struct memblock_region *new_array, *old_array; | 187 | struct memblock_region *new_array, *old_array; |
@@ -225,10 +211,10 @@ static int __init_memblock memblock_double_array(struct memblock_type *type) | |||
225 | */ | 211 | */ |
226 | if (use_slab) { | 212 | if (use_slab) { |
227 | new_array = kmalloc(new_size, GFP_KERNEL); | 213 | new_array = kmalloc(new_size, GFP_KERNEL); |
228 | addr = new_array == NULL ? MEMBLOCK_ERROR : __pa(new_array); | 214 | addr = new_array ? __pa(new_array) : 0; |
229 | } else | 215 | } else |
230 | addr = memblock_find_base(new_size, sizeof(phys_addr_t), 0, MEMBLOCK_ALLOC_ACCESSIBLE); | 216 | addr = memblock_find_in_range(0, MEMBLOCK_ALLOC_ACCESSIBLE, new_size, sizeof(phys_addr_t)); |
231 | if (addr == MEMBLOCK_ERROR) { | 217 | if (!addr) { |
232 | pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n", | 218 | pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n", |
233 | memblock_type_name(type), type->max, type->max * 2); | 219 | memblock_type_name(type), type->max, type->max * 2); |
234 | return -1; | 220 | return -1; |
@@ -253,7 +239,7 @@ static int __init_memblock memblock_double_array(struct memblock_type *type) | |||
253 | return 0; | 239 | return 0; |
254 | 240 | ||
255 | /* Add the new reserved region now. Should not fail ! */ | 241 | /* Add the new reserved region now. Should not fail ! */ |
256 | BUG_ON(memblock_add_region(&memblock.reserved, addr, new_size)); | 242 | BUG_ON(memblock_reserve(addr, new_size)); |
257 | 243 | ||
258 | /* If the array wasn't our static init one, then free it. We only do | 244 | /* If the array wasn't our static init one, then free it. We only do |
259 | * that before SLAB is available as later on, we don't know whether | 245 | * that before SLAB is available as later on, we don't know whether |
@@ -267,343 +253,514 @@ static int __init_memblock memblock_double_array(struct memblock_type *type) | |||
267 | return 0; | 253 | return 0; |
268 | } | 254 | } |
269 | 255 | ||
270 | extern int __init_memblock __weak memblock_memory_can_coalesce(phys_addr_t addr1, phys_addr_t size1, | 256 | /** |
271 | phys_addr_t addr2, phys_addr_t size2) | 257 | * memblock_merge_regions - merge neighboring compatible regions |
258 | * @type: memblock type to scan | ||
259 | * | ||
260 | * Scan @type and merge neighboring compatible regions. | ||
261 | */ | ||
262 | static void __init_memblock memblock_merge_regions(struct memblock_type *type) | ||
272 | { | 263 | { |
273 | return 1; | 264 | int i = 0; |
274 | } | ||
275 | 265 | ||
276 | static long __init_memblock memblock_add_region(struct memblock_type *type, | 266 | /* cnt never goes below 1 */ |
277 | phys_addr_t base, phys_addr_t size) | 267 | while (i < type->cnt - 1) { |
278 | { | 268 | struct memblock_region *this = &type->regions[i]; |
279 | phys_addr_t end = base + size; | 269 | struct memblock_region *next = &type->regions[i + 1]; |
280 | int i, slot = -1; | ||
281 | 270 | ||
282 | /* First try and coalesce this MEMBLOCK with others */ | 271 | if (this->base + this->size != next->base || |
283 | for (i = 0; i < type->cnt; i++) { | 272 | memblock_get_region_node(this) != |
284 | struct memblock_region *rgn = &type->regions[i]; | 273 | memblock_get_region_node(next)) { |
285 | phys_addr_t rend = rgn->base + rgn->size; | 274 | BUG_ON(this->base + this->size > next->base); |
286 | 275 | i++; | |
287 | /* Exit if there's no possible hits */ | 276 | continue; |
288 | if (rgn->base > end || rgn->size == 0) | 277 | } |
289 | break; | ||
290 | |||
291 | /* Check if we are fully enclosed within an existing | ||
292 | * block | ||
293 | */ | ||
294 | if (rgn->base <= base && rend >= end) | ||
295 | return 0; | ||
296 | 278 | ||
297 | /* Check if we overlap or are adjacent with the bottom | 279 | this->size += next->size; |
298 | * of a block. | 280 | memmove(next, next + 1, (type->cnt - (i + 1)) * sizeof(*next)); |
299 | */ | 281 | type->cnt--; |
300 | if (base < rgn->base && end >= rgn->base) { | 282 | } |
301 | /* If we can't coalesce, create a new block */ | 283 | } |
302 | if (!memblock_memory_can_coalesce(base, size, | ||
303 | rgn->base, | ||
304 | rgn->size)) { | ||
305 | /* Overlap & can't coalesce are mutually | ||
306 | * exclusive, if you do that, be prepared | ||
307 | * for trouble | ||
308 | */ | ||
309 | WARN_ON(end != rgn->base); | ||
310 | goto new_block; | ||
311 | } | ||
312 | /* We extend the bottom of the block down to our | ||
313 | * base | ||
314 | */ | ||
315 | rgn->base = base; | ||
316 | rgn->size = rend - base; | ||
317 | 284 | ||
318 | /* Return if we have nothing else to allocate | 285 | /** |
319 | * (fully coalesced) | 286 | * memblock_insert_region - insert new memblock region |
320 | */ | 287 | * @type: memblock type to insert into |
321 | if (rend >= end) | 288 | * @idx: index for the insertion point |
322 | return 0; | 289 | * @base: base address of the new region |
290 | * @size: size of the new region | ||
291 | * | ||
292 | * Insert new memblock region [@base,@base+@size) into @type at @idx. | ||
293 | * @type must already have extra room to accomodate the new region. | ||
294 | */ | ||
295 | static void __init_memblock memblock_insert_region(struct memblock_type *type, | ||
296 | int idx, phys_addr_t base, | ||
297 | phys_addr_t size, int nid) | ||
298 | { | ||
299 | struct memblock_region *rgn = &type->regions[idx]; | ||
323 | 300 | ||
324 | /* We continue processing from the end of the | 301 | BUG_ON(type->cnt >= type->max); |
325 | * coalesced block. | 302 | memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn)); |
326 | */ | 303 | rgn->base = base; |
327 | base = rend; | 304 | rgn->size = size; |
328 | size = end - base; | 305 | memblock_set_region_node(rgn, nid); |
329 | } | 306 | type->cnt++; |
307 | type->total_size += size; | ||
308 | } | ||
330 | 309 | ||
331 | /* Now check if we overlap or are adjacent with the | 310 | /** |
332 | * top of a block | 311 | * memblock_add_region - add new memblock region |
333 | */ | 312 | * @type: memblock type to add new region into |
334 | if (base <= rend && end >= rend) { | 313 | * @base: base address of the new region |
335 | /* If we can't coalesce, create a new block */ | 314 | * @size: size of the new region |
336 | if (!memblock_memory_can_coalesce(rgn->base, | 315 | * @nid: nid of the new region |
337 | rgn->size, | 316 | * |
338 | base, size)) { | 317 | * Add new memblock region [@base,@base+@size) into @type. The new region |
339 | /* Overlap & can't coalesce are mutually | 318 | * is allowed to overlap with existing ones - overlaps don't affect already |
340 | * exclusive, if you do that, be prepared | 319 | * existing regions. @type is guaranteed to be minimal (all neighbouring |
341 | * for trouble | 320 | * compatible regions are merged) after the addition. |
342 | */ | 321 | * |
343 | WARN_ON(rend != base); | 322 | * RETURNS: |
344 | goto new_block; | 323 | * 0 on success, -errno on failure. |
345 | } | 324 | */ |
346 | /* We adjust our base down to enclose the | 325 | static int __init_memblock memblock_add_region(struct memblock_type *type, |
347 | * original block and destroy it. It will be | 326 | phys_addr_t base, phys_addr_t size, int nid) |
348 | * part of our new allocation. Since we've | 327 | { |
349 | * freed an entry, we know we won't fail | 328 | bool insert = false; |
350 | * to allocate one later, so we won't risk | 329 | phys_addr_t obase = base; |
351 | * losing the original block allocation. | 330 | phys_addr_t end = base + memblock_cap_size(base, &size); |
352 | */ | 331 | int i, nr_new; |
353 | size += (base - rgn->base); | ||
354 | base = rgn->base; | ||
355 | memblock_remove_region(type, i--); | ||
356 | } | ||
357 | } | ||
358 | 332 | ||
359 | /* If the array is empty, special case, replace the fake | 333 | /* special case for empty array */ |
360 | * filler region and return | 334 | if (type->regions[0].size == 0) { |
361 | */ | 335 | WARN_ON(type->cnt != 1 || type->total_size); |
362 | if ((type->cnt == 1) && (type->regions[0].size == 0)) { | ||
363 | type->regions[0].base = base; | 336 | type->regions[0].base = base; |
364 | type->regions[0].size = size; | 337 | type->regions[0].size = size; |
338 | memblock_set_region_node(&type->regions[0], nid); | ||
339 | type->total_size = size; | ||
365 | return 0; | 340 | return 0; |
366 | } | 341 | } |
367 | 342 | repeat: | |
368 | new_block: | 343 | /* |
369 | /* If we are out of space, we fail. It's too late to resize the array | 344 | * The following is executed twice. Once with %false @insert and |
370 | * but then this shouldn't have happened in the first place. | 345 | * then with %true. The first counts the number of regions needed |
346 | * to accomodate the new area. The second actually inserts them. | ||
371 | */ | 347 | */ |
372 | if (WARN_ON(type->cnt >= type->max)) | 348 | base = obase; |
373 | return -1; | 349 | nr_new = 0; |
374 | 350 | ||
375 | /* Couldn't coalesce the MEMBLOCK, so add it to the sorted table. */ | 351 | for (i = 0; i < type->cnt; i++) { |
376 | for (i = type->cnt - 1; i >= 0; i--) { | 352 | struct memblock_region *rgn = &type->regions[i]; |
377 | if (base < type->regions[i].base) { | 353 | phys_addr_t rbase = rgn->base; |
378 | type->regions[i+1].base = type->regions[i].base; | 354 | phys_addr_t rend = rbase + rgn->size; |
379 | type->regions[i+1].size = type->regions[i].size; | 355 | |
380 | } else { | 356 | if (rbase >= end) |
381 | type->regions[i+1].base = base; | ||
382 | type->regions[i+1].size = size; | ||
383 | slot = i + 1; | ||
384 | break; | 357 | break; |
358 | if (rend <= base) | ||
359 | continue; | ||
360 | /* | ||
361 | * @rgn overlaps. If it separates the lower part of new | ||
362 | * area, insert that portion. | ||
363 | */ | ||
364 | if (rbase > base) { | ||
365 | nr_new++; | ||
366 | if (insert) | ||
367 | memblock_insert_region(type, i++, base, | ||
368 | rbase - base, nid); | ||
385 | } | 369 | } |
370 | /* area below @rend is dealt with, forget about it */ | ||
371 | base = min(rend, end); | ||
386 | } | 372 | } |
387 | if (base < type->regions[0].base) { | 373 | |
388 | type->regions[0].base = base; | 374 | /* insert the remaining portion */ |
389 | type->regions[0].size = size; | 375 | if (base < end) { |
390 | slot = 0; | 376 | nr_new++; |
377 | if (insert) | ||
378 | memblock_insert_region(type, i, base, end - base, nid); | ||
391 | } | 379 | } |
392 | type->cnt++; | ||
393 | 380 | ||
394 | /* The array is full ? Try to resize it. If that fails, we undo | 381 | /* |
395 | * our allocation and return an error | 382 | * If this was the first round, resize array and repeat for actual |
383 | * insertions; otherwise, merge and return. | ||
396 | */ | 384 | */ |
397 | if (type->cnt == type->max && memblock_double_array(type)) { | 385 | if (!insert) { |
398 | BUG_ON(slot < 0); | 386 | while (type->cnt + nr_new > type->max) |
399 | memblock_remove_region(type, slot); | 387 | if (memblock_double_array(type) < 0) |
400 | return -1; | 388 | return -ENOMEM; |
389 | insert = true; | ||
390 | goto repeat; | ||
391 | } else { | ||
392 | memblock_merge_regions(type); | ||
393 | return 0; | ||
401 | } | 394 | } |
402 | |||
403 | return 0; | ||
404 | } | 395 | } |
405 | 396 | ||
406 | long __init_memblock memblock_add(phys_addr_t base, phys_addr_t size) | 397 | int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size, |
398 | int nid) | ||
407 | { | 399 | { |
408 | return memblock_add_region(&memblock.memory, base, size); | 400 | return memblock_add_region(&memblock.memory, base, size, nid); |
401 | } | ||
409 | 402 | ||
403 | int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size) | ||
404 | { | ||
405 | return memblock_add_region(&memblock.memory, base, size, MAX_NUMNODES); | ||
410 | } | 406 | } |
411 | 407 | ||
412 | static long __init_memblock __memblock_remove(struct memblock_type *type, | 408 | /** |
413 | phys_addr_t base, phys_addr_t size) | 409 | * memblock_isolate_range - isolate given range into disjoint memblocks |
410 | * @type: memblock type to isolate range for | ||
411 | * @base: base of range to isolate | ||
412 | * @size: size of range to isolate | ||
413 | * @start_rgn: out parameter for the start of isolated region | ||
414 | * @end_rgn: out parameter for the end of isolated region | ||
415 | * | ||
416 | * Walk @type and ensure that regions don't cross the boundaries defined by | ||
417 | * [@base,@base+@size). Crossing regions are split at the boundaries, | ||
418 | * which may create at most two more regions. The index of the first | ||
419 | * region inside the range is returned in *@start_rgn and end in *@end_rgn. | ||
420 | * | ||
421 | * RETURNS: | ||
422 | * 0 on success, -errno on failure. | ||
423 | */ | ||
424 | static int __init_memblock memblock_isolate_range(struct memblock_type *type, | ||
425 | phys_addr_t base, phys_addr_t size, | ||
426 | int *start_rgn, int *end_rgn) | ||
414 | { | 427 | { |
415 | phys_addr_t end = base + size; | 428 | phys_addr_t end = base + memblock_cap_size(base, &size); |
416 | int i; | 429 | int i; |
417 | 430 | ||
418 | /* Walk through the array for collisions */ | 431 | *start_rgn = *end_rgn = 0; |
432 | |||
433 | /* we'll create at most two more regions */ | ||
434 | while (type->cnt + 2 > type->max) | ||
435 | if (memblock_double_array(type) < 0) | ||
436 | return -ENOMEM; | ||
437 | |||
419 | for (i = 0; i < type->cnt; i++) { | 438 | for (i = 0; i < type->cnt; i++) { |
420 | struct memblock_region *rgn = &type->regions[i]; | 439 | struct memblock_region *rgn = &type->regions[i]; |
421 | phys_addr_t rend = rgn->base + rgn->size; | 440 | phys_addr_t rbase = rgn->base; |
441 | phys_addr_t rend = rbase + rgn->size; | ||
422 | 442 | ||
423 | /* Nothing more to do, exit */ | 443 | if (rbase >= end) |
424 | if (rgn->base > end || rgn->size == 0) | ||
425 | break; | 444 | break; |
426 | 445 | if (rend <= base) | |
427 | /* If we fully enclose the block, drop it */ | ||
428 | if (base <= rgn->base && end >= rend) { | ||
429 | memblock_remove_region(type, i--); | ||
430 | continue; | 446 | continue; |
431 | } | ||
432 | 447 | ||
433 | /* If we are fully enclosed within a block | 448 | if (rbase < base) { |
434 | * then we need to split it and we are done | 449 | /* |
435 | */ | 450 | * @rgn intersects from below. Split and continue |
436 | if (base > rgn->base && end < rend) { | 451 | * to process the next region - the new top half. |
437 | rgn->size = base - rgn->base; | 452 | */ |
438 | if (!memblock_add_region(type, end, rend - end)) | 453 | rgn->base = base; |
439 | return 0; | 454 | rgn->size -= base - rbase; |
440 | /* Failure to split is bad, we at least | 455 | type->total_size -= base - rbase; |
441 | * restore the block before erroring | 456 | memblock_insert_region(type, i, rbase, base - rbase, |
457 | memblock_get_region_node(rgn)); | ||
458 | } else if (rend > end) { | ||
459 | /* | ||
460 | * @rgn intersects from above. Split and redo the | ||
461 | * current region - the new bottom half. | ||
442 | */ | 462 | */ |
443 | rgn->size = rend - rgn->base; | ||
444 | WARN_ON(1); | ||
445 | return -1; | ||
446 | } | ||
447 | |||
448 | /* Check if we need to trim the bottom of a block */ | ||
449 | if (rgn->base < end && rend > end) { | ||
450 | rgn->size -= end - rgn->base; | ||
451 | rgn->base = end; | 463 | rgn->base = end; |
452 | break; | 464 | rgn->size -= end - rbase; |
465 | type->total_size -= end - rbase; | ||
466 | memblock_insert_region(type, i--, rbase, end - rbase, | ||
467 | memblock_get_region_node(rgn)); | ||
468 | } else { | ||
469 | /* @rgn is fully contained, record it */ | ||
470 | if (!*end_rgn) | ||
471 | *start_rgn = i; | ||
472 | *end_rgn = i + 1; | ||
453 | } | 473 | } |
474 | } | ||
454 | 475 | ||
455 | /* And check if we need to trim the top of a block */ | 476 | return 0; |
456 | if (base < rend) | 477 | } |
457 | rgn->size -= rend - base; | ||
458 | 478 | ||
459 | } | 479 | static int __init_memblock __memblock_remove(struct memblock_type *type, |
480 | phys_addr_t base, phys_addr_t size) | ||
481 | { | ||
482 | int start_rgn, end_rgn; | ||
483 | int i, ret; | ||
484 | |||
485 | ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); | ||
486 | if (ret) | ||
487 | return ret; | ||
488 | |||
489 | for (i = end_rgn - 1; i >= start_rgn; i--) | ||
490 | memblock_remove_region(type, i); | ||
460 | return 0; | 491 | return 0; |
461 | } | 492 | } |
462 | 493 | ||
463 | long __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size) | 494 | int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size) |
464 | { | 495 | { |
465 | return __memblock_remove(&memblock.memory, base, size); | 496 | return __memblock_remove(&memblock.memory, base, size); |
466 | } | 497 | } |
467 | 498 | ||
468 | long __init_memblock memblock_free(phys_addr_t base, phys_addr_t size) | 499 | int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size) |
469 | { | 500 | { |
501 | memblock_dbg(" memblock_free: [%#016llx-%#016llx] %pF\n", | ||
502 | (unsigned long long)base, | ||
503 | (unsigned long long)base + size, | ||
504 | (void *)_RET_IP_); | ||
505 | |||
470 | return __memblock_remove(&memblock.reserved, base, size); | 506 | return __memblock_remove(&memblock.reserved, base, size); |
471 | } | 507 | } |
472 | 508 | ||
473 | long __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size) | 509 | int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size) |
474 | { | 510 | { |
475 | struct memblock_type *_rgn = &memblock.reserved; | 511 | struct memblock_type *_rgn = &memblock.reserved; |
476 | 512 | ||
513 | memblock_dbg("memblock_reserve: [%#016llx-%#016llx] %pF\n", | ||
514 | (unsigned long long)base, | ||
515 | (unsigned long long)base + size, | ||
516 | (void *)_RET_IP_); | ||
477 | BUG_ON(0 == size); | 517 | BUG_ON(0 == size); |
478 | 518 | ||
479 | return memblock_add_region(_rgn, base, size); | 519 | return memblock_add_region(_rgn, base, size, MAX_NUMNODES); |
480 | } | 520 | } |
481 | 521 | ||
482 | phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) | 522 | /** |
523 | * __next_free_mem_range - next function for for_each_free_mem_range() | ||
524 | * @idx: pointer to u64 loop variable | ||
525 | * @nid: nid: node selector, %MAX_NUMNODES for all nodes | ||
526 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL | ||
527 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL | ||
528 | * @p_nid: ptr to int for nid of the range, can be %NULL | ||
529 | * | ||
530 | * Find the first free area from *@idx which matches @nid, fill the out | ||
531 | * parameters, and update *@idx for the next iteration. The lower 32bit of | ||
532 | * *@idx contains index into memory region and the upper 32bit indexes the | ||
533 | * areas before each reserved region. For example, if reserved regions | ||
534 | * look like the following, | ||
535 | * | ||
536 | * 0:[0-16), 1:[32-48), 2:[128-130) | ||
537 | * | ||
538 | * The upper 32bit indexes the following regions. | ||
539 | * | ||
540 | * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX) | ||
541 | * | ||
542 | * As both region arrays are sorted, the function advances the two indices | ||
543 | * in lockstep and returns each intersection. | ||
544 | */ | ||
545 | void __init_memblock __next_free_mem_range(u64 *idx, int nid, | ||
546 | phys_addr_t *out_start, | ||
547 | phys_addr_t *out_end, int *out_nid) | ||
483 | { | 548 | { |
484 | phys_addr_t found; | 549 | struct memblock_type *mem = &memblock.memory; |
550 | struct memblock_type *rsv = &memblock.reserved; | ||
551 | int mi = *idx & 0xffffffff; | ||
552 | int ri = *idx >> 32; | ||
485 | 553 | ||
486 | /* We align the size to limit fragmentation. Without this, a lot of | 554 | for ( ; mi < mem->cnt; mi++) { |
487 | * small allocs quickly eat up the whole reserve array on sparc | 555 | struct memblock_region *m = &mem->regions[mi]; |
488 | */ | 556 | phys_addr_t m_start = m->base; |
489 | size = memblock_align_up(size, align); | 557 | phys_addr_t m_end = m->base + m->size; |
490 | 558 | ||
491 | found = memblock_find_base(size, align, 0, max_addr); | 559 | /* only memory regions are associated with nodes, check it */ |
492 | if (found != MEMBLOCK_ERROR && | 560 | if (nid != MAX_NUMNODES && nid != memblock_get_region_node(m)) |
493 | !memblock_add_region(&memblock.reserved, found, size)) | 561 | continue; |
494 | return found; | ||
495 | 562 | ||
496 | return 0; | 563 | /* scan areas before each reservation for intersection */ |
564 | for ( ; ri < rsv->cnt + 1; ri++) { | ||
565 | struct memblock_region *r = &rsv->regions[ri]; | ||
566 | phys_addr_t r_start = ri ? r[-1].base + r[-1].size : 0; | ||
567 | phys_addr_t r_end = ri < rsv->cnt ? r->base : ULLONG_MAX; | ||
568 | |||
569 | /* if ri advanced past mi, break out to advance mi */ | ||
570 | if (r_start >= m_end) | ||
571 | break; | ||
572 | /* if the two regions intersect, we're done */ | ||
573 | if (m_start < r_end) { | ||
574 | if (out_start) | ||
575 | *out_start = max(m_start, r_start); | ||
576 | if (out_end) | ||
577 | *out_end = min(m_end, r_end); | ||
578 | if (out_nid) | ||
579 | *out_nid = memblock_get_region_node(m); | ||
580 | /* | ||
581 | * The region which ends first is advanced | ||
582 | * for the next iteration. | ||
583 | */ | ||
584 | if (m_end <= r_end) | ||
585 | mi++; | ||
586 | else | ||
587 | ri++; | ||
588 | *idx = (u32)mi | (u64)ri << 32; | ||
589 | return; | ||
590 | } | ||
591 | } | ||
592 | } | ||
593 | |||
594 | /* signal end of iteration */ | ||
595 | *idx = ULLONG_MAX; | ||
497 | } | 596 | } |
498 | 597 | ||
499 | phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) | 598 | /** |
599 | * __next_free_mem_range_rev - next function for for_each_free_mem_range_reverse() | ||
600 | * @idx: pointer to u64 loop variable | ||
601 | * @nid: nid: node selector, %MAX_NUMNODES for all nodes | ||
602 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL | ||
603 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL | ||
604 | * @p_nid: ptr to int for nid of the range, can be %NULL | ||
605 | * | ||
606 | * Reverse of __next_free_mem_range(). | ||
607 | */ | ||
608 | void __init_memblock __next_free_mem_range_rev(u64 *idx, int nid, | ||
609 | phys_addr_t *out_start, | ||
610 | phys_addr_t *out_end, int *out_nid) | ||
500 | { | 611 | { |
501 | phys_addr_t alloc; | 612 | struct memblock_type *mem = &memblock.memory; |
613 | struct memblock_type *rsv = &memblock.reserved; | ||
614 | int mi = *idx & 0xffffffff; | ||
615 | int ri = *idx >> 32; | ||
502 | 616 | ||
503 | alloc = __memblock_alloc_base(size, align, max_addr); | 617 | if (*idx == (u64)ULLONG_MAX) { |
618 | mi = mem->cnt - 1; | ||
619 | ri = rsv->cnt; | ||
620 | } | ||
504 | 621 | ||
505 | if (alloc == 0) | 622 | for ( ; mi >= 0; mi--) { |
506 | panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n", | 623 | struct memblock_region *m = &mem->regions[mi]; |
507 | (unsigned long long) size, (unsigned long long) max_addr); | 624 | phys_addr_t m_start = m->base; |
625 | phys_addr_t m_end = m->base + m->size; | ||
508 | 626 | ||
509 | return alloc; | 627 | /* only memory regions are associated with nodes, check it */ |
510 | } | 628 | if (nid != MAX_NUMNODES && nid != memblock_get_region_node(m)) |
629 | continue; | ||
511 | 630 | ||
512 | phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align) | 631 | /* scan areas before each reservation for intersection */ |
513 | { | 632 | for ( ; ri >= 0; ri--) { |
514 | return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); | 633 | struct memblock_region *r = &rsv->regions[ri]; |
515 | } | 634 | phys_addr_t r_start = ri ? r[-1].base + r[-1].size : 0; |
635 | phys_addr_t r_end = ri < rsv->cnt ? r->base : ULLONG_MAX; | ||
636 | |||
637 | /* if ri advanced past mi, break out to advance mi */ | ||
638 | if (r_end <= m_start) | ||
639 | break; | ||
640 | /* if the two regions intersect, we're done */ | ||
641 | if (m_end > r_start) { | ||
642 | if (out_start) | ||
643 | *out_start = max(m_start, r_start); | ||
644 | if (out_end) | ||
645 | *out_end = min(m_end, r_end); | ||
646 | if (out_nid) | ||
647 | *out_nid = memblock_get_region_node(m); | ||
648 | |||
649 | if (m_start >= r_start) | ||
650 | mi--; | ||
651 | else | ||
652 | ri--; | ||
653 | *idx = (u32)mi | (u64)ri << 32; | ||
654 | return; | ||
655 | } | ||
656 | } | ||
657 | } | ||
516 | 658 | ||
659 | *idx = ULLONG_MAX; | ||
660 | } | ||
517 | 661 | ||
662 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP | ||
518 | /* | 663 | /* |
519 | * Additional node-local allocators. Search for node memory is bottom up | 664 | * Common iterator interface used to define for_each_mem_range(). |
520 | * and walks memblock regions within that node bottom-up as well, but allocation | ||
521 | * within an memblock region is top-down. XXX I plan to fix that at some stage | ||
522 | * | ||
523 | * WARNING: Only available after early_node_map[] has been populated, | ||
524 | * on some architectures, that is after all the calls to add_active_range() | ||
525 | * have been done to populate it. | ||
526 | */ | 665 | */ |
527 | 666 | void __init_memblock __next_mem_pfn_range(int *idx, int nid, | |
528 | phys_addr_t __weak __init memblock_nid_range(phys_addr_t start, phys_addr_t end, int *nid) | 667 | unsigned long *out_start_pfn, |
668 | unsigned long *out_end_pfn, int *out_nid) | ||
529 | { | 669 | { |
530 | #ifdef CONFIG_ARCH_POPULATES_NODE_MAP | 670 | struct memblock_type *type = &memblock.memory; |
531 | /* | 671 | struct memblock_region *r; |
532 | * This code originates from sparc which really wants use to walk by addresses | 672 | |
533 | * and returns the nid. This is not very convenient for early_pfn_map[] users | 673 | while (++*idx < type->cnt) { |
534 | * as the map isn't sorted yet, and it really wants to be walked by nid. | 674 | r = &type->regions[*idx]; |
535 | * | ||
536 | * For now, I implement the inefficient method below which walks the early | ||
537 | * map multiple times. Eventually we may want to use an ARCH config option | ||
538 | * to implement a completely different method for both case. | ||
539 | */ | ||
540 | unsigned long start_pfn, end_pfn; | ||
541 | int i; | ||
542 | 675 | ||
543 | for (i = 0; i < MAX_NUMNODES; i++) { | 676 | if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size)) |
544 | get_pfn_range_for_nid(i, &start_pfn, &end_pfn); | ||
545 | if (start < PFN_PHYS(start_pfn) || start >= PFN_PHYS(end_pfn)) | ||
546 | continue; | 677 | continue; |
547 | *nid = i; | 678 | if (nid == MAX_NUMNODES || nid == r->nid) |
548 | return min(end, PFN_PHYS(end_pfn)); | 679 | break; |
680 | } | ||
681 | if (*idx >= type->cnt) { | ||
682 | *idx = -1; | ||
683 | return; | ||
549 | } | 684 | } |
550 | #endif | ||
551 | *nid = 0; | ||
552 | 685 | ||
553 | return end; | 686 | if (out_start_pfn) |
687 | *out_start_pfn = PFN_UP(r->base); | ||
688 | if (out_end_pfn) | ||
689 | *out_end_pfn = PFN_DOWN(r->base + r->size); | ||
690 | if (out_nid) | ||
691 | *out_nid = r->nid; | ||
554 | } | 692 | } |
555 | 693 | ||
556 | static phys_addr_t __init memblock_alloc_nid_region(struct memblock_region *mp, | 694 | /** |
557 | phys_addr_t size, | 695 | * memblock_set_node - set node ID on memblock regions |
558 | phys_addr_t align, int nid) | 696 | * @base: base of area to set node ID for |
697 | * @size: size of area to set node ID for | ||
698 | * @nid: node ID to set | ||
699 | * | ||
700 | * Set the nid of memblock memory regions in [@base,@base+@size) to @nid. | ||
701 | * Regions which cross the area boundaries are split as necessary. | ||
702 | * | ||
703 | * RETURNS: | ||
704 | * 0 on success, -errno on failure. | ||
705 | */ | ||
706 | int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size, | ||
707 | int nid) | ||
559 | { | 708 | { |
560 | phys_addr_t start, end; | 709 | struct memblock_type *type = &memblock.memory; |
710 | int start_rgn, end_rgn; | ||
711 | int i, ret; | ||
561 | 712 | ||
562 | start = mp->base; | 713 | ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); |
563 | end = start + mp->size; | 714 | if (ret) |
715 | return ret; | ||
564 | 716 | ||
565 | start = memblock_align_up(start, align); | 717 | for (i = start_rgn; i < end_rgn; i++) |
566 | while (start < end) { | 718 | type->regions[i].nid = nid; |
567 | phys_addr_t this_end; | ||
568 | int this_nid; | ||
569 | 719 | ||
570 | this_end = memblock_nid_range(start, end, &this_nid); | 720 | memblock_merge_regions(type); |
571 | if (this_nid == nid) { | 721 | return 0; |
572 | phys_addr_t ret = memblock_find_region(start, this_end, size, align); | 722 | } |
573 | if (ret != MEMBLOCK_ERROR && | 723 | #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ |
574 | !memblock_add_region(&memblock.reserved, ret, size)) | 724 | |
575 | return ret; | 725 | static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size, |
576 | } | 726 | phys_addr_t align, phys_addr_t max_addr, |
577 | start = this_end; | 727 | int nid) |
578 | } | 728 | { |
729 | phys_addr_t found; | ||
730 | |||
731 | found = memblock_find_in_range_node(0, max_addr, size, align, nid); | ||
732 | if (found && !memblock_reserve(found, size)) | ||
733 | return found; | ||
579 | 734 | ||
580 | return MEMBLOCK_ERROR; | 735 | return 0; |
581 | } | 736 | } |
582 | 737 | ||
583 | phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid) | 738 | phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid) |
584 | { | 739 | { |
585 | struct memblock_type *mem = &memblock.memory; | 740 | return memblock_alloc_base_nid(size, align, MEMBLOCK_ALLOC_ACCESSIBLE, nid); |
586 | int i; | 741 | } |
587 | 742 | ||
588 | BUG_ON(0 == size); | 743 | phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) |
744 | { | ||
745 | return memblock_alloc_base_nid(size, align, max_addr, MAX_NUMNODES); | ||
746 | } | ||
589 | 747 | ||
590 | /* We align the size to limit fragmentation. Without this, a lot of | 748 | phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) |
591 | * small allocs quickly eat up the whole reserve array on sparc | 749 | { |
592 | */ | 750 | phys_addr_t alloc; |
593 | size = memblock_align_up(size, align); | ||
594 | 751 | ||
595 | /* We do a bottom-up search for a region with the right | 752 | alloc = __memblock_alloc_base(size, align, max_addr); |
596 | * nid since that's easier considering how memblock_nid_range() | ||
597 | * works | ||
598 | */ | ||
599 | for (i = 0; i < mem->cnt; i++) { | ||
600 | phys_addr_t ret = memblock_alloc_nid_region(&mem->regions[i], | ||
601 | size, align, nid); | ||
602 | if (ret != MEMBLOCK_ERROR) | ||
603 | return ret; | ||
604 | } | ||
605 | 753 | ||
606 | return 0; | 754 | if (alloc == 0) |
755 | panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n", | ||
756 | (unsigned long long) size, (unsigned long long) max_addr); | ||
757 | |||
758 | return alloc; | ||
759 | } | ||
760 | |||
761 | phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align) | ||
762 | { | ||
763 | return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); | ||
607 | } | 764 | } |
608 | 765 | ||
609 | phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid) | 766 | phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid) |
@@ -612,7 +769,7 @@ phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, i | |||
612 | 769 | ||
613 | if (res) | 770 | if (res) |
614 | return res; | 771 | return res; |
615 | return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE); | 772 | return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); |
616 | } | 773 | } |
617 | 774 | ||
618 | 775 | ||
@@ -620,10 +777,15 @@ phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, i | |||
620 | * Remaining API functions | 777 | * Remaining API functions |
621 | */ | 778 | */ |
622 | 779 | ||
623 | /* You must call memblock_analyze() before this. */ | ||
624 | phys_addr_t __init memblock_phys_mem_size(void) | 780 | phys_addr_t __init memblock_phys_mem_size(void) |
625 | { | 781 | { |
626 | return memblock.memory_size; | 782 | return memblock.memory.total_size; |
783 | } | ||
784 | |||
785 | /* lowest address */ | ||
786 | phys_addr_t __init_memblock memblock_start_of_DRAM(void) | ||
787 | { | ||
788 | return memblock.memory.regions[0].base; | ||
627 | } | 789 | } |
628 | 790 | ||
629 | phys_addr_t __init_memblock memblock_end_of_DRAM(void) | 791 | phys_addr_t __init_memblock memblock_end_of_DRAM(void) |
@@ -633,45 +795,28 @@ phys_addr_t __init_memblock memblock_end_of_DRAM(void) | |||
633 | return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size); | 795 | return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size); |
634 | } | 796 | } |
635 | 797 | ||
636 | /* You must call memblock_analyze() after this. */ | 798 | void __init memblock_enforce_memory_limit(phys_addr_t limit) |
637 | void __init memblock_enforce_memory_limit(phys_addr_t memory_limit) | ||
638 | { | 799 | { |
639 | unsigned long i; | 800 | unsigned long i; |
640 | phys_addr_t limit; | 801 | phys_addr_t max_addr = (phys_addr_t)ULLONG_MAX; |
641 | struct memblock_region *p; | ||
642 | 802 | ||
643 | if (!memory_limit) | 803 | if (!limit) |
644 | return; | 804 | return; |
645 | 805 | ||
646 | /* Truncate the memblock regions to satisfy the memory limit. */ | 806 | /* find out max address */ |
647 | limit = memory_limit; | ||
648 | for (i = 0; i < memblock.memory.cnt; i++) { | 807 | for (i = 0; i < memblock.memory.cnt; i++) { |
649 | if (limit > memblock.memory.regions[i].size) { | 808 | struct memblock_region *r = &memblock.memory.regions[i]; |
650 | limit -= memblock.memory.regions[i].size; | ||
651 | continue; | ||
652 | } | ||
653 | |||
654 | memblock.memory.regions[i].size = limit; | ||
655 | memblock.memory.cnt = i + 1; | ||
656 | break; | ||
657 | } | ||
658 | |||
659 | memory_limit = memblock_end_of_DRAM(); | ||
660 | 809 | ||
661 | /* And truncate any reserves above the limit also. */ | 810 | if (limit <= r->size) { |
662 | for (i = 0; i < memblock.reserved.cnt; i++) { | 811 | max_addr = r->base + limit; |
663 | p = &memblock.reserved.regions[i]; | 812 | break; |
664 | |||
665 | if (p->base > memory_limit) | ||
666 | p->size = 0; | ||
667 | else if ((p->base + p->size) > memory_limit) | ||
668 | p->size = memory_limit - p->base; | ||
669 | |||
670 | if (p->size == 0) { | ||
671 | memblock_remove_region(&memblock.reserved, i); | ||
672 | i--; | ||
673 | } | 813 | } |
814 | limit -= r->size; | ||
674 | } | 815 | } |
816 | |||
817 | /* truncate both memory and reserved regions */ | ||
818 | __memblock_remove(&memblock.memory, max_addr, (phys_addr_t)ULLONG_MAX); | ||
819 | __memblock_remove(&memblock.reserved, max_addr, (phys_addr_t)ULLONG_MAX); | ||
675 | } | 820 | } |
676 | 821 | ||
677 | static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr) | 822 | static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr) |
@@ -705,16 +850,18 @@ int __init_memblock memblock_is_memory(phys_addr_t addr) | |||
705 | int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size) | 850 | int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size) |
706 | { | 851 | { |
707 | int idx = memblock_search(&memblock.memory, base); | 852 | int idx = memblock_search(&memblock.memory, base); |
853 | phys_addr_t end = base + memblock_cap_size(base, &size); | ||
708 | 854 | ||
709 | if (idx == -1) | 855 | if (idx == -1) |
710 | return 0; | 856 | return 0; |
711 | return memblock.memory.regions[idx].base <= base && | 857 | return memblock.memory.regions[idx].base <= base && |
712 | (memblock.memory.regions[idx].base + | 858 | (memblock.memory.regions[idx].base + |
713 | memblock.memory.regions[idx].size) >= (base + size); | 859 | memblock.memory.regions[idx].size) >= end; |
714 | } | 860 | } |
715 | 861 | ||
716 | int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) | 862 | int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) |
717 | { | 863 | { |
864 | memblock_cap_size(base, &size); | ||
718 | return memblock_overlaps_region(&memblock.reserved, base, size) >= 0; | 865 | return memblock_overlaps_region(&memblock.reserved, base, size) >= 0; |
719 | } | 866 | } |
720 | 867 | ||
@@ -724,86 +871,45 @@ void __init_memblock memblock_set_current_limit(phys_addr_t limit) | |||
724 | memblock.current_limit = limit; | 871 | memblock.current_limit = limit; |
725 | } | 872 | } |
726 | 873 | ||
727 | static void __init_memblock memblock_dump(struct memblock_type *region, char *name) | 874 | static void __init_memblock memblock_dump(struct memblock_type *type, char *name) |
728 | { | 875 | { |
729 | unsigned long long base, size; | 876 | unsigned long long base, size; |
730 | int i; | 877 | int i; |
731 | 878 | ||
732 | pr_info(" %s.cnt = 0x%lx\n", name, region->cnt); | 879 | pr_info(" %s.cnt = 0x%lx\n", name, type->cnt); |
733 | 880 | ||
734 | for (i = 0; i < region->cnt; i++) { | 881 | for (i = 0; i < type->cnt; i++) { |
735 | base = region->regions[i].base; | 882 | struct memblock_region *rgn = &type->regions[i]; |
736 | size = region->regions[i].size; | 883 | char nid_buf[32] = ""; |
737 | 884 | ||
738 | pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes\n", | 885 | base = rgn->base; |
739 | name, i, base, base + size - 1, size); | 886 | size = rgn->size; |
887 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP | ||
888 | if (memblock_get_region_node(rgn) != MAX_NUMNODES) | ||
889 | snprintf(nid_buf, sizeof(nid_buf), " on node %d", | ||
890 | memblock_get_region_node(rgn)); | ||
891 | #endif | ||
892 | pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes%s\n", | ||
893 | name, i, base, base + size - 1, size, nid_buf); | ||
740 | } | 894 | } |
741 | } | 895 | } |
742 | 896 | ||
743 | void __init_memblock memblock_dump_all(void) | 897 | void __init_memblock __memblock_dump_all(void) |
744 | { | 898 | { |
745 | if (!memblock_debug) | ||
746 | return; | ||
747 | |||
748 | pr_info("MEMBLOCK configuration:\n"); | 899 | pr_info("MEMBLOCK configuration:\n"); |
749 | pr_info(" memory size = 0x%llx\n", (unsigned long long)memblock.memory_size); | 900 | pr_info(" memory size = %#llx reserved size = %#llx\n", |
901 | (unsigned long long)memblock.memory.total_size, | ||
902 | (unsigned long long)memblock.reserved.total_size); | ||
750 | 903 | ||
751 | memblock_dump(&memblock.memory, "memory"); | 904 | memblock_dump(&memblock.memory, "memory"); |
752 | memblock_dump(&memblock.reserved, "reserved"); | 905 | memblock_dump(&memblock.reserved, "reserved"); |
753 | } | 906 | } |
754 | 907 | ||
755 | void __init memblock_analyze(void) | 908 | void __init memblock_allow_resize(void) |
756 | { | 909 | { |
757 | int i; | ||
758 | |||
759 | /* Check marker in the unused last array entry */ | ||
760 | WARN_ON(memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS].base | ||
761 | != MEMBLOCK_INACTIVE); | ||
762 | WARN_ON(memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS].base | ||
763 | != MEMBLOCK_INACTIVE); | ||
764 | |||
765 | memblock.memory_size = 0; | ||
766 | |||
767 | for (i = 0; i < memblock.memory.cnt; i++) | ||
768 | memblock.memory_size += memblock.memory.regions[i].size; | ||
769 | |||
770 | /* We allow resizing from there */ | ||
771 | memblock_can_resize = 1; | 910 | memblock_can_resize = 1; |
772 | } | 911 | } |
773 | 912 | ||
774 | void __init memblock_init(void) | ||
775 | { | ||
776 | static int init_done __initdata = 0; | ||
777 | |||
778 | if (init_done) | ||
779 | return; | ||
780 | init_done = 1; | ||
781 | |||
782 | /* Hookup the initial arrays */ | ||
783 | memblock.memory.regions = memblock_memory_init_regions; | ||
784 | memblock.memory.max = INIT_MEMBLOCK_REGIONS; | ||
785 | memblock.reserved.regions = memblock_reserved_init_regions; | ||
786 | memblock.reserved.max = INIT_MEMBLOCK_REGIONS; | ||
787 | |||
788 | /* Write a marker in the unused last array entry */ | ||
789 | memblock.memory.regions[INIT_MEMBLOCK_REGIONS].base = MEMBLOCK_INACTIVE; | ||
790 | memblock.reserved.regions[INIT_MEMBLOCK_REGIONS].base = MEMBLOCK_INACTIVE; | ||
791 | |||
792 | /* Create a dummy zero size MEMBLOCK which will get coalesced away later. | ||
793 | * This simplifies the memblock_add() code below... | ||
794 | */ | ||
795 | memblock.memory.regions[0].base = 0; | ||
796 | memblock.memory.regions[0].size = 0; | ||
797 | memblock.memory.cnt = 1; | ||
798 | |||
799 | /* Ditto. */ | ||
800 | memblock.reserved.regions[0].base = 0; | ||
801 | memblock.reserved.regions[0].size = 0; | ||
802 | memblock.reserved.cnt = 1; | ||
803 | |||
804 | memblock.current_limit = MEMBLOCK_ALLOC_ANYWHERE; | ||
805 | } | ||
806 | |||
807 | static int __init early_memblock(char *p) | 913 | static int __init early_memblock(char *p) |
808 | { | 914 | { |
809 | if (p && strstr(p, "debug")) | 915 | if (p && strstr(p, "debug")) |
@@ -812,7 +918,7 @@ static int __init early_memblock(char *p) | |||
812 | } | 918 | } |
813 | early_param("memblock", early_memblock); | 919 | early_param("memblock", early_memblock); |
814 | 920 | ||
815 | #if defined(CONFIG_DEBUG_FS) && !defined(ARCH_DISCARD_MEMBLOCK) | 921 | #if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK) |
816 | 922 | ||
817 | static int memblock_debug_show(struct seq_file *m, void *private) | 923 | static int memblock_debug_show(struct seq_file *m, void *private) |
818 | { | 924 | { |