aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memblock.c
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2013-01-17 16:15:55 -0500
committerJonathan Herman <hermanjl@cs.unc.edu>2013-01-17 16:15:55 -0500
commit8dea78da5cee153b8af9c07a2745f6c55057fe12 (patch)
treea8f4d49d63b1ecc92f2fddceba0655b2472c5bd9 /mm/memblock.c
parent406089d01562f1e2bf9f089fd7637009ebaad589 (diff)
Patched in Tegra support.
Diffstat (limited to 'mm/memblock.c')
-rw-r--r--mm/memblock.c1160
1 files changed, 482 insertions, 678 deletions
diff --git a/mm/memblock.c b/mm/memblock.c
index 88adc8afb61..ccbf9733959 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -20,29 +20,15 @@
20#include <linux/seq_file.h> 20#include <linux/seq_file.h>
21#include <linux/memblock.h> 21#include <linux/memblock.h>
22 22
23static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; 23struct memblock memblock __initdata_memblock;
24static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
25
26struct memblock memblock __initdata_memblock = {
27 .memory.regions = memblock_memory_init_regions,
28 .memory.cnt = 1, /* empty dummy entry */
29 .memory.max = INIT_MEMBLOCK_REGIONS,
30
31 .reserved.regions = memblock_reserved_init_regions,
32 .reserved.cnt = 1, /* empty dummy entry */
33 .reserved.max = INIT_MEMBLOCK_REGIONS,
34
35 .current_limit = MEMBLOCK_ALLOC_ANYWHERE,
36};
37 24
38int memblock_debug __initdata_memblock; 25int memblock_debug __initdata_memblock;
39static int memblock_can_resize __initdata_memblock; 26int memblock_can_resize __initdata_memblock;
40static int memblock_memory_in_slab __initdata_memblock = 0; 27static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS + 1] __initdata_memblock;
41static int memblock_reserved_in_slab __initdata_memblock = 0; 28static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS + 1] __initdata_memblock;
42 29
43/* inline so we don't get a warning when pr_debug is compiled out */ 30/* inline so we don't get a warning when pr_debug is compiled out */
44static __init_memblock const char * 31static inline const char *memblock_type_name(struct memblock_type *type)
45memblock_type_name(struct memblock_type *type)
46{ 32{
47 if (type == &memblock.memory) 33 if (type == &memblock.memory)
48 return "memory"; 34 return "memory";
@@ -52,23 +38,27 @@ memblock_type_name(struct memblock_type *type)
52 return "unknown"; 38 return "unknown";
53} 39}
54 40
55/* adjust *@size so that (@base + *@size) doesn't overflow, return new size */
56static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
57{
58 return *size = min(*size, (phys_addr_t)ULLONG_MAX - base);
59}
60
61/* 41/*
62 * Address comparison utilities 42 * Address comparison utilities
63 */ 43 */
44
45static phys_addr_t __init_memblock memblock_align_down(phys_addr_t addr, phys_addr_t size)
46{
47 return addr & ~(size - 1);
48}
49
50static phys_addr_t __init_memblock memblock_align_up(phys_addr_t addr, phys_addr_t size)
51{
52 return (addr + (size - 1)) & ~(size - 1);
53}
54
64static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1, 55static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
65 phys_addr_t base2, phys_addr_t size2) 56 phys_addr_t base2, phys_addr_t size2)
66{ 57{
67 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); 58 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
68} 59}
69 60
70static long __init_memblock memblock_overlaps_region(struct memblock_type *type, 61long __init_memblock memblock_overlaps_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size)
71 phys_addr_t base, phys_addr_t size)
72{ 62{
73 unsigned long i; 63 unsigned long i;
74 64
@@ -82,121 +72,135 @@ static long __init_memblock memblock_overlaps_region(struct memblock_type *type,
82 return (i < type->cnt) ? i : -1; 72 return (i < type->cnt) ? i : -1;
83} 73}
84 74
85/** 75/*
86 * memblock_find_in_range_node - find free area in given range and node 76 * Find, allocate, deallocate or reserve unreserved regions. All allocations
87 * @start: start of candidate range 77 * are top-down.
88 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
89 * @size: size of free area to find
90 * @align: alignment of free area to find
91 * @nid: nid of the free area to find, %MAX_NUMNODES for any node
92 *
93 * Find @size free area aligned to @align in the specified range and node.
94 *
95 * RETURNS:
96 * Found address on success, %0 on failure.
97 */ 78 */
98phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start, 79
99 phys_addr_t end, phys_addr_t size, 80static phys_addr_t __init_memblock memblock_find_region(phys_addr_t start, phys_addr_t end,
100 phys_addr_t align, int nid) 81 phys_addr_t size, phys_addr_t align)
82{
83 phys_addr_t base, res_base;
84 long j;
85
86 /* In case, huge size is requested */
87 if (end < size)
88 return MEMBLOCK_ERROR;
89
90 base = memblock_align_down((end - size), align);
91
92 /* Prevent allocations returning 0 as it's also used to
93 * indicate an allocation failure
94 */
95 if (start == 0)
96 start = PAGE_SIZE;
97
98 while (start <= base) {
99 j = memblock_overlaps_region(&memblock.reserved, base, size);
100 if (j < 0)
101 return base;
102 res_base = memblock.reserved.regions[j].base;
103 if (res_base < size)
104 break;
105 base = memblock_align_down(res_base - size, align);
106 }
107
108 return MEMBLOCK_ERROR;
109}
110
111static phys_addr_t __init_memblock memblock_find_base(phys_addr_t size,
112 phys_addr_t align, phys_addr_t start, phys_addr_t end)
101{ 113{
102 phys_addr_t this_start, this_end, cand; 114 long i;
103 u64 i; 115
116 BUG_ON(0 == size);
104 117
105 /* pump up @end */ 118 /* Pump up max_addr */
106 if (end == MEMBLOCK_ALLOC_ACCESSIBLE) 119 if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
107 end = memblock.current_limit; 120 end = memblock.current_limit;
108 121
109 /* avoid allocating the first page */ 122 /* We do a top-down search, this tends to limit memory
110 start = max_t(phys_addr_t, start, PAGE_SIZE); 123 * fragmentation by keeping early boot allocs near the
111 end = max(start, end); 124 * top of memory
112 125 */
113 for_each_free_mem_range_reverse(i, nid, &this_start, &this_end, NULL) { 126 for (i = memblock.memory.cnt - 1; i >= 0; i--) {
114 this_start = clamp(this_start, start, end); 127 phys_addr_t memblockbase = memblock.memory.regions[i].base;
115 this_end = clamp(this_end, start, end); 128 phys_addr_t memblocksize = memblock.memory.regions[i].size;
129 phys_addr_t bottom, top, found;
116 130
117 if (this_end < size) 131 if (memblocksize < size)
118 continue; 132 continue;
119 133 if ((memblockbase + memblocksize) <= start)
120 cand = round_down(this_end - size, align); 134 break;
121 if (cand >= this_start) 135 bottom = max(memblockbase, start);
122 return cand; 136 top = min(memblockbase + memblocksize, end);
137 if (bottom >= top)
138 continue;
139 found = memblock_find_region(bottom, top, size, align);
140 if (found != MEMBLOCK_ERROR)
141 return found;
123 } 142 }
124 return 0; 143 return MEMBLOCK_ERROR;
125} 144}
126 145
127/** 146/*
128 * memblock_find_in_range - find free area in given range 147 * Find a free area with specified alignment in a specific range.
129 * @start: start of candidate range
130 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
131 * @size: size of free area to find
132 * @align: alignment of free area to find
133 *
134 * Find @size free area aligned to @align in the specified range.
135 *
136 * RETURNS:
137 * Found address on success, %0 on failure.
138 */ 148 */
139phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start, 149u64 __init_memblock memblock_find_in_range(u64 start, u64 end, u64 size, u64 align)
140 phys_addr_t end, phys_addr_t size,
141 phys_addr_t align)
142{ 150{
143 return memblock_find_in_range_node(start, end, size, align, 151 return memblock_find_base(size, align, start, end);
144 MAX_NUMNODES); 152}
153
154/*
155 * Free memblock.reserved.regions
156 */
157int __init_memblock memblock_free_reserved_regions(void)
158{
159 if (memblock.reserved.regions == memblock_reserved_init_regions)
160 return 0;
161
162 return memblock_free(__pa(memblock.reserved.regions),
163 sizeof(struct memblock_region) * memblock.reserved.max);
164}
165
166/*
167 * Reserve memblock.reserved.regions
168 */
169int __init_memblock memblock_reserve_reserved_regions(void)
170{
171 if (memblock.reserved.regions == memblock_reserved_init_regions)
172 return 0;
173
174 return memblock_reserve(__pa(memblock.reserved.regions),
175 sizeof(struct memblock_region) * memblock.reserved.max);
145} 176}
146 177
147static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r) 178static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
148{ 179{
149 type->total_size -= type->regions[r].size; 180 unsigned long i;
150 memmove(&type->regions[r], &type->regions[r + 1], 181
151 (type->cnt - (r + 1)) * sizeof(type->regions[r])); 182 for (i = r; i < type->cnt - 1; i++) {
183 type->regions[i].base = type->regions[i + 1].base;
184 type->regions[i].size = type->regions[i + 1].size;
185 }
152 type->cnt--; 186 type->cnt--;
153 187
154 /* Special case for empty arrays */ 188 /* Special case for empty arrays */
155 if (type->cnt == 0) { 189 if (type->cnt == 0) {
156 WARN_ON(type->total_size != 0);
157 type->cnt = 1; 190 type->cnt = 1;
158 type->regions[0].base = 0; 191 type->regions[0].base = 0;
159 type->regions[0].size = 0; 192 type->regions[0].size = 0;
160 memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
161 } 193 }
162} 194}
163 195
164phys_addr_t __init_memblock get_allocated_memblock_reserved_regions_info( 196/* Defined below but needed now */
165 phys_addr_t *addr) 197static long memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size);
166{
167 if (memblock.reserved.regions == memblock_reserved_init_regions)
168 return 0;
169
170 *addr = __pa(memblock.reserved.regions);
171
172 return PAGE_ALIGN(sizeof(struct memblock_region) *
173 memblock.reserved.max);
174}
175 198
176/** 199static int __init_memblock memblock_double_array(struct memblock_type *type)
177 * memblock_double_array - double the size of the memblock regions array
178 * @type: memblock type of the regions array being doubled
179 * @new_area_start: starting address of memory range to avoid overlap with
180 * @new_area_size: size of memory range to avoid overlap with
181 *
182 * Double the size of the @type regions array. If memblock is being used to
183 * allocate memory for a new reserved regions array and there is a previously
184 * allocated memory range [@new_area_start,@new_area_start+@new_area_size]
185 * waiting to be reserved, ensure the memory used by the new array does
186 * not overlap.
187 *
188 * RETURNS:
189 * 0 on success, -1 on failure.
190 */
191static int __init_memblock memblock_double_array(struct memblock_type *type,
192 phys_addr_t new_area_start,
193 phys_addr_t new_area_size)
194{ 200{
195 struct memblock_region *new_array, *old_array; 201 struct memblock_region *new_array, *old_array;
196 phys_addr_t old_alloc_size, new_alloc_size;
197 phys_addr_t old_size, new_size, addr; 202 phys_addr_t old_size, new_size, addr;
198 int use_slab = slab_is_available(); 203 int use_slab = slab_is_available();
199 int *in_slab;
200 204
201 /* We don't allow resizing until we know about the reserved regions 205 /* We don't allow resizing until we know about the reserved regions
202 * of memory that aren't suitable for allocation 206 * of memory that aren't suitable for allocation
@@ -207,62 +211,36 @@ static int __init_memblock memblock_double_array(struct memblock_type *type,
207 /* Calculate new doubled size */ 211 /* Calculate new doubled size */
208 old_size = type->max * sizeof(struct memblock_region); 212 old_size = type->max * sizeof(struct memblock_region);
209 new_size = old_size << 1; 213 new_size = old_size << 1;
210 /*
211 * We need to allocated new one align to PAGE_SIZE,
212 * so we can free them completely later.
213 */
214 old_alloc_size = PAGE_ALIGN(old_size);
215 new_alloc_size = PAGE_ALIGN(new_size);
216
217 /* Retrieve the slab flag */
218 if (type == &memblock.memory)
219 in_slab = &memblock_memory_in_slab;
220 else
221 in_slab = &memblock_reserved_in_slab;
222 214
223 /* Try to find some space for it. 215 /* Try to find some space for it.
224 * 216 *
225 * WARNING: We assume that either slab_is_available() and we use it or 217 * WARNING: We assume that either slab_is_available() and we use it or
226 * we use MEMBLOCK for allocations. That means that this is unsafe to 218 * we use MEMBLOCK for allocations. That means that this is unsafe to use
227 * use when bootmem is currently active (unless bootmem itself is 219 * when bootmem is currently active (unless bootmem itself is implemented
228 * implemented on top of MEMBLOCK which isn't the case yet) 220 * on top of MEMBLOCK which isn't the case yet)
229 * 221 *
230 * This should however not be an issue for now, as we currently only 222 * This should however not be an issue for now, as we currently only
231 * call into MEMBLOCK while it's still active, or much later when slab 223 * call into MEMBLOCK while it's still active, or much later when slab is
232 * is active for memory hotplug operations 224 * active for memory hotplug operations
233 */ 225 */
234 if (use_slab) { 226 if (use_slab) {
235 new_array = kmalloc(new_size, GFP_KERNEL); 227 new_array = kmalloc(new_size, GFP_KERNEL);
236 addr = new_array ? __pa(new_array) : 0; 228 addr = new_array == NULL ? MEMBLOCK_ERROR : __pa(new_array);
237 } else { 229 } else
238 /* only exclude range when trying to double reserved.regions */ 230 addr = memblock_find_base(new_size, sizeof(phys_addr_t), 0, MEMBLOCK_ALLOC_ACCESSIBLE);
239 if (type != &memblock.reserved) 231 if (addr == MEMBLOCK_ERROR) {
240 new_area_start = new_area_size = 0;
241
242 addr = memblock_find_in_range(new_area_start + new_area_size,
243 memblock.current_limit,
244 new_alloc_size, PAGE_SIZE);
245 if (!addr && new_area_size)
246 addr = memblock_find_in_range(0,
247 min(new_area_start, memblock.current_limit),
248 new_alloc_size, PAGE_SIZE);
249
250 new_array = addr ? __va(addr) : NULL;
251 }
252 if (!addr) {
253 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n", 232 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
254 memblock_type_name(type), type->max, type->max * 2); 233 memblock_type_name(type), type->max, type->max * 2);
255 return -1; 234 return -1;
256 } 235 }
236 new_array = __va(addr);
257 237
258 memblock_dbg("memblock: %s is doubled to %ld at [%#010llx-%#010llx]", 238 memblock_dbg("memblock: %s array is doubled to %ld at [%#010llx-%#010llx]",
259 memblock_type_name(type), type->max * 2, (u64)addr, 239 memblock_type_name(type), type->max * 2, (u64)addr, (u64)addr + new_size - 1);
260 (u64)addr + new_size - 1);
261 240
262 /* 241 /* Found space, we now need to move the array over before
263 * Found space, we now need to move the array over before we add the 242 * we add the reserved region since it may be our reserved
264 * reserved region since it may be our reserved array itself that is 243 * array itself that is full.
265 * full.
266 */ 244 */
267 memcpy(new_array, type->regions, old_size); 245 memcpy(new_array, type->regions, old_size);
268 memset(new_array + type->max, 0, old_size); 246 memset(new_array + type->max, 0, old_size);
@@ -270,543 +248,362 @@ static int __init_memblock memblock_double_array(struct memblock_type *type,
270 type->regions = new_array; 248 type->regions = new_array;
271 type->max <<= 1; 249 type->max <<= 1;
272 250
273 /* Free old array. We needn't free it if the array is the static one */ 251 /* If we use SLAB that's it, we are done */
274 if (*in_slab) 252 if (use_slab)
275 kfree(old_array); 253 return 0;
276 else if (old_array != memblock_memory_init_regions &&
277 old_array != memblock_reserved_init_regions)
278 memblock_free(__pa(old_array), old_alloc_size);
279 254
280 /* 255 /* Add the new reserved region now. Should not fail ! */
281 * Reserve the new array if that comes from the memblock. Otherwise, we 256 BUG_ON(memblock_add_region(&memblock.reserved, addr, new_size));
282 * needn't do it
283 */
284 if (!use_slab)
285 BUG_ON(memblock_reserve(addr, new_alloc_size));
286 257
287 /* Update slab flag */ 258 /* If the array wasn't our static init one, then free it. We only do
288 *in_slab = use_slab; 259 * that before SLAB is available as later on, we don't know whether
260 * to use kfree or free_bootmem_pages(). Shouldn't be a big deal
261 * anyways
262 */
263 if (old_array != memblock_memory_init_regions &&
264 old_array != memblock_reserved_init_regions)
265 memblock_free(__pa(old_array), old_size);
289 266
290 return 0; 267 return 0;
291} 268}
292 269
293/** 270extern int __init_memblock __weak memblock_memory_can_coalesce(phys_addr_t addr1, phys_addr_t size1,
294 * memblock_merge_regions - merge neighboring compatible regions 271 phys_addr_t addr2, phys_addr_t size2)
295 * @type: memblock type to scan
296 *
297 * Scan @type and merge neighboring compatible regions.
298 */
299static void __init_memblock memblock_merge_regions(struct memblock_type *type)
300{ 272{
301 int i = 0; 273 return 1;
302
303 /* cnt never goes below 1 */
304 while (i < type->cnt - 1) {
305 struct memblock_region *this = &type->regions[i];
306 struct memblock_region *next = &type->regions[i + 1];
307
308 if (this->base + this->size != next->base ||
309 memblock_get_region_node(this) !=
310 memblock_get_region_node(next)) {
311 BUG_ON(this->base + this->size > next->base);
312 i++;
313 continue;
314 }
315
316 this->size += next->size;
317 /* move forward from next + 1, index of which is i + 2 */
318 memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next));
319 type->cnt--;
320 }
321} 274}
322 275
323/** 276static long __init_memblock memblock_add_region(struct memblock_type *type,
324 * memblock_insert_region - insert new memblock region 277 phys_addr_t base, phys_addr_t size)
325 * @type: memblock type to insert into
326 * @idx: index for the insertion point
327 * @base: base address of the new region
328 * @size: size of the new region
329 *
330 * Insert new memblock region [@base,@base+@size) into @type at @idx.
331 * @type must already have extra room to accomodate the new region.
332 */
333static void __init_memblock memblock_insert_region(struct memblock_type *type,
334 int idx, phys_addr_t base,
335 phys_addr_t size, int nid)
336{ 278{
337 struct memblock_region *rgn = &type->regions[idx]; 279 phys_addr_t end = base + size;
280 int i, slot = -1;
338 281
339 BUG_ON(type->cnt >= type->max); 282 /* First try and coalesce this MEMBLOCK with others */
340 memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn)); 283 for (i = 0; i < type->cnt; i++) {
341 rgn->base = base; 284 struct memblock_region *rgn = &type->regions[i];
342 rgn->size = size; 285 phys_addr_t rend = rgn->base + rgn->size;
343 memblock_set_region_node(rgn, nid);
344 type->cnt++;
345 type->total_size += size;
346}
347 286
348/** 287 /* Exit if there's no possible hits */
349 * memblock_add_region - add new memblock region 288 if (rgn->base > end || rgn->size == 0)
350 * @type: memblock type to add new region into 289 break;
351 * @base: base address of the new region
352 * @size: size of the new region
353 * @nid: nid of the new region
354 *
355 * Add new memblock region [@base,@base+@size) into @type. The new region
356 * is allowed to overlap with existing ones - overlaps don't affect already
357 * existing regions. @type is guaranteed to be minimal (all neighbouring
358 * compatible regions are merged) after the addition.
359 *
360 * RETURNS:
361 * 0 on success, -errno on failure.
362 */
363static int __init_memblock memblock_add_region(struct memblock_type *type,
364 phys_addr_t base, phys_addr_t size, int nid)
365{
366 bool insert = false;
367 phys_addr_t obase = base;
368 phys_addr_t end = base + memblock_cap_size(base, &size);
369 int i, nr_new;
370 290
371 if (!size) 291 /* Check if we are fully enclosed within an existing
372 return 0; 292 * block
293 */
294 if (rgn->base <= base && rend >= end)
295 return 0;
296
297 /* Check if we overlap or are adjacent with the bottom
298 * of a block.
299 */
300 if (base < rgn->base && end >= rgn->base) {
301 /* If we can't coalesce, create a new block */
302 if (!memblock_memory_can_coalesce(base, size,
303 rgn->base,
304 rgn->size)) {
305 /* Overlap & can't coalesce are mutually
306 * exclusive, if you do that, be prepared
307 * for trouble
308 */
309 WARN_ON(end != rgn->base);
310 goto new_block;
311 }
312 /* We extend the bottom of the block down to our
313 * base
314 */
315 rgn->base = base;
316 rgn->size = rend - base;
317
318 /* Return if we have nothing else to allocate
319 * (fully coalesced)
320 */
321 if (rend >= end)
322 return 0;
323
324 /* We continue processing from the end of the
325 * coalesced block.
326 */
327 base = rend;
328 size = end - base;
329 }
330
331 /* Now check if we overlap or are adjacent with the
332 * top of a block
333 */
334 if (base <= rend && end >= rend) {
335 /* If we can't coalesce, create a new block */
336 if (!memblock_memory_can_coalesce(rgn->base,
337 rgn->size,
338 base, size)) {
339 /* Overlap & can't coalesce are mutually
340 * exclusive, if you do that, be prepared
341 * for trouble
342 */
343 WARN_ON(rend != base);
344 goto new_block;
345 }
346 /* We adjust our base down to enclose the
347 * original block and destroy it. It will be
348 * part of our new allocation. Since we've
349 * freed an entry, we know we won't fail
350 * to allocate one later, so we won't risk
351 * losing the original block allocation.
352 */
353 size += (base - rgn->base);
354 base = rgn->base;
355 memblock_remove_region(type, i--);
356 }
357 }
373 358
374 /* special case for empty array */ 359 /* If the array is empty, special case, replace the fake
375 if (type->regions[0].size == 0) { 360 * filler region and return
376 WARN_ON(type->cnt != 1 || type->total_size); 361 */
362 if ((type->cnt == 1) && (type->regions[0].size == 0)) {
377 type->regions[0].base = base; 363 type->regions[0].base = base;
378 type->regions[0].size = size; 364 type->regions[0].size = size;
379 memblock_set_region_node(&type->regions[0], nid);
380 type->total_size = size;
381 return 0; 365 return 0;
382 } 366 }
383repeat:
384 /*
385 * The following is executed twice. Once with %false @insert and
386 * then with %true. The first counts the number of regions needed
387 * to accomodate the new area. The second actually inserts them.
388 */
389 base = obase;
390 nr_new = 0;
391 367
392 for (i = 0; i < type->cnt; i++) { 368 new_block:
393 struct memblock_region *rgn = &type->regions[i]; 369 /* If we are out of space, we fail. It's too late to resize the array
394 phys_addr_t rbase = rgn->base; 370 * but then this shouldn't have happened in the first place.
395 phys_addr_t rend = rbase + rgn->size; 371 */
372 if (WARN_ON(type->cnt >= type->max))
373 return -1;
396 374
397 if (rbase >= end) 375 /* Couldn't coalesce the MEMBLOCK, so add it to the sorted table. */
376 for (i = type->cnt - 1; i >= 0; i--) {
377 if (base < type->regions[i].base) {
378 type->regions[i+1].base = type->regions[i].base;
379 type->regions[i+1].size = type->regions[i].size;
380 } else {
381 type->regions[i+1].base = base;
382 type->regions[i+1].size = size;
383 slot = i + 1;
398 break; 384 break;
399 if (rend <= base)
400 continue;
401 /*
402 * @rgn overlaps. If it separates the lower part of new
403 * area, insert that portion.
404 */
405 if (rbase > base) {
406 nr_new++;
407 if (insert)
408 memblock_insert_region(type, i++, base,
409 rbase - base, nid);
410 } 385 }
411 /* area below @rend is dealt with, forget about it */
412 base = min(rend, end);
413 } 386 }
414 387 if (base < type->regions[0].base) {
415 /* insert the remaining portion */ 388 type->regions[0].base = base;
416 if (base < end) { 389 type->regions[0].size = size;
417 nr_new++; 390 slot = 0;
418 if (insert)
419 memblock_insert_region(type, i, base, end - base, nid);
420 } 391 }
392 type->cnt++;
421 393
422 /* 394 /* The array is full ? Try to resize it. If that fails, we undo
423 * If this was the first round, resize array and repeat for actual 395 * our allocation and return an error
424 * insertions; otherwise, merge and return.
425 */ 396 */
426 if (!insert) { 397 if (type->cnt == type->max && memblock_double_array(type)) {
427 while (type->cnt + nr_new > type->max) 398 BUG_ON(slot < 0);
428 if (memblock_double_array(type, obase, size) < 0) 399 memblock_remove_region(type, slot);
429 return -ENOMEM; 400 return -1;
430 insert = true;
431 goto repeat;
432 } else {
433 memblock_merge_regions(type);
434 return 0;
435 } 401 }
436}
437 402
438int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size, 403 return 0;
439 int nid)
440{
441 return memblock_add_region(&memblock.memory, base, size, nid);
442} 404}
443 405
444int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size) 406long __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
445{ 407{
446 return memblock_add_region(&memblock.memory, base, size, MAX_NUMNODES); 408 return memblock_add_region(&memblock.memory, base, size);
409
447} 410}
448 411
449/** 412static long __init_memblock __memblock_remove(struct memblock_type *type,
450 * memblock_isolate_range - isolate given range into disjoint memblocks 413 phys_addr_t base, phys_addr_t size)
451 * @type: memblock type to isolate range for
452 * @base: base of range to isolate
453 * @size: size of range to isolate
454 * @start_rgn: out parameter for the start of isolated region
455 * @end_rgn: out parameter for the end of isolated region
456 *
457 * Walk @type and ensure that regions don't cross the boundaries defined by
458 * [@base,@base+@size). Crossing regions are split at the boundaries,
459 * which may create at most two more regions. The index of the first
460 * region inside the range is returned in *@start_rgn and end in *@end_rgn.
461 *
462 * RETURNS:
463 * 0 on success, -errno on failure.
464 */
465static int __init_memblock memblock_isolate_range(struct memblock_type *type,
466 phys_addr_t base, phys_addr_t size,
467 int *start_rgn, int *end_rgn)
468{ 414{
469 phys_addr_t end = base + memblock_cap_size(base, &size); 415 phys_addr_t end = base + size;
470 int i; 416 int i;
471 417
472 *start_rgn = *end_rgn = 0; 418 /* Walk through the array for collisions */
473
474 if (!size)
475 return 0;
476
477 /* we'll create at most two more regions */
478 while (type->cnt + 2 > type->max)
479 if (memblock_double_array(type, base, size) < 0)
480 return -ENOMEM;
481
482 for (i = 0; i < type->cnt; i++) { 419 for (i = 0; i < type->cnt; i++) {
483 struct memblock_region *rgn = &type->regions[i]; 420 struct memblock_region *rgn = &type->regions[i];
484 phys_addr_t rbase = rgn->base; 421 phys_addr_t rend = rgn->base + rgn->size;
485 phys_addr_t rend = rbase + rgn->size;
486 422
487 if (rbase >= end) 423 /* Nothing more to do, exit */
424 if (rgn->base > end || rgn->size == 0)
488 break; 425 break;
489 if (rend <= base) 426
427 /* If we fully enclose the block, drop it */
428 if (base <= rgn->base && end >= rend) {
429 memblock_remove_region(type, i--);
490 continue; 430 continue;
431 }
491 432
492 if (rbase < base) { 433 /* If we are fully enclosed within a block
493 /* 434 * then we need to split it and we are done
494 * @rgn intersects from below. Split and continue 435 */
495 * to process the next region - the new top half. 436 if (base > rgn->base && end < rend) {
496 */ 437 rgn->size = base - rgn->base;
497 rgn->base = base; 438 if (!memblock_add_region(type, end, rend - end))
498 rgn->size -= base - rbase; 439 return 0;
499 type->total_size -= base - rbase; 440 /* Failure to split is bad, we at least
500 memblock_insert_region(type, i, rbase, base - rbase, 441 * restore the block before erroring
501 memblock_get_region_node(rgn));
502 } else if (rend > end) {
503 /*
504 * @rgn intersects from above. Split and redo the
505 * current region - the new bottom half.
506 */ 442 */
507 rgn->base = end; 443 rgn->size = rend - rgn->base;
508 rgn->size -= end - rbase; 444 WARN_ON(1);
509 type->total_size -= end - rbase; 445 return -1;
510 memblock_insert_region(type, i--, rbase, end - rbase,
511 memblock_get_region_node(rgn));
512 } else {
513 /* @rgn is fully contained, record it */
514 if (!*end_rgn)
515 *start_rgn = i;
516 *end_rgn = i + 1;
517 } 446 }
518 }
519 447
520 return 0; 448 /* Check if we need to trim the bottom of a block */
521} 449 if (rgn->base < end && rend > end) {
522 450 rgn->size -= end - rgn->base;
523static int __init_memblock __memblock_remove(struct memblock_type *type, 451 rgn->base = end;
524 phys_addr_t base, phys_addr_t size) 452 break;
525{ 453 }
526 int start_rgn, end_rgn;
527 int i, ret;
528 454
529 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); 455 /* And check if we need to trim the top of a block */
530 if (ret) 456 if (base < rend)
531 return ret; 457 rgn->size -= rend - base;
532 458
533 for (i = end_rgn - 1; i >= start_rgn; i--) 459 }
534 memblock_remove_region(type, i);
535 return 0; 460 return 0;
536} 461}
537 462
538int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size) 463long __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
539{ 464{
540 return __memblock_remove(&memblock.memory, base, size); 465 return __memblock_remove(&memblock.memory, base, size);
541} 466}
542 467
543int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size) 468long __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
544{ 469{
545 memblock_dbg(" memblock_free: [%#016llx-%#016llx] %pF\n",
546 (unsigned long long)base,
547 (unsigned long long)base + size,
548 (void *)_RET_IP_);
549
550 return __memblock_remove(&memblock.reserved, base, size); 470 return __memblock_remove(&memblock.reserved, base, size);
551} 471}
552 472
553int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size) 473long __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
554{ 474{
555 struct memblock_type *_rgn = &memblock.reserved; 475 struct memblock_type *_rgn = &memblock.reserved;
556 476
557 memblock_dbg("memblock_reserve: [%#016llx-%#016llx] %pF\n", 477 BUG_ON(0 == size);
558 (unsigned long long)base,
559 (unsigned long long)base + size,
560 (void *)_RET_IP_);
561 478
562 return memblock_add_region(_rgn, base, size, MAX_NUMNODES); 479 return memblock_add_region(_rgn, base, size);
563} 480}
564 481
565/** 482phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
566 * __next_free_mem_range - next function for for_each_free_mem_range()
567 * @idx: pointer to u64 loop variable
568 * @nid: nid: node selector, %MAX_NUMNODES for all nodes
569 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
570 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
571 * @out_nid: ptr to int for nid of the range, can be %NULL
572 *
573 * Find the first free area from *@idx which matches @nid, fill the out
574 * parameters, and update *@idx for the next iteration. The lower 32bit of
575 * *@idx contains index into memory region and the upper 32bit indexes the
576 * areas before each reserved region. For example, if reserved regions
577 * look like the following,
578 *
579 * 0:[0-16), 1:[32-48), 2:[128-130)
580 *
581 * The upper 32bit indexes the following regions.
582 *
583 * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX)
584 *
585 * As both region arrays are sorted, the function advances the two indices
586 * in lockstep and returns each intersection.
587 */
588void __init_memblock __next_free_mem_range(u64 *idx, int nid,
589 phys_addr_t *out_start,
590 phys_addr_t *out_end, int *out_nid)
591{ 483{
592 struct memblock_type *mem = &memblock.memory; 484 phys_addr_t found;
593 struct memblock_type *rsv = &memblock.reserved;
594 int mi = *idx & 0xffffffff;
595 int ri = *idx >> 32;
596
597 for ( ; mi < mem->cnt; mi++) {
598 struct memblock_region *m = &mem->regions[mi];
599 phys_addr_t m_start = m->base;
600 phys_addr_t m_end = m->base + m->size;
601 485
602 /* only memory regions are associated with nodes, check it */ 486 /* We align the size to limit fragmentation. Without this, a lot of
603 if (nid != MAX_NUMNODES && nid != memblock_get_region_node(m)) 487 * small allocs quickly eat up the whole reserve array on sparc
604 continue; 488 */
489 size = memblock_align_up(size, align);
605 490
606 /* scan areas before each reservation for intersection */ 491 found = memblock_find_base(size, align, 0, max_addr);
607 for ( ; ri < rsv->cnt + 1; ri++) { 492 if (found != MEMBLOCK_ERROR &&
608 struct memblock_region *r = &rsv->regions[ri]; 493 !memblock_add_region(&memblock.reserved, found, size))
609 phys_addr_t r_start = ri ? r[-1].base + r[-1].size : 0; 494 return found;
610 phys_addr_t r_end = ri < rsv->cnt ? r->base : ULLONG_MAX;
611
612 /* if ri advanced past mi, break out to advance mi */
613 if (r_start >= m_end)
614 break;
615 /* if the two regions intersect, we're done */
616 if (m_start < r_end) {
617 if (out_start)
618 *out_start = max(m_start, r_start);
619 if (out_end)
620 *out_end = min(m_end, r_end);
621 if (out_nid)
622 *out_nid = memblock_get_region_node(m);
623 /*
624 * The region which ends first is advanced
625 * for the next iteration.
626 */
627 if (m_end <= r_end)
628 mi++;
629 else
630 ri++;
631 *idx = (u32)mi | (u64)ri << 32;
632 return;
633 }
634 }
635 }
636 495
637 /* signal end of iteration */ 496 return 0;
638 *idx = ULLONG_MAX;
639} 497}
640 498
641/** 499phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
642 * __next_free_mem_range_rev - next function for for_each_free_mem_range_reverse()
643 * @idx: pointer to u64 loop variable
644 * @nid: nid: node selector, %MAX_NUMNODES for all nodes
645 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
646 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
647 * @out_nid: ptr to int for nid of the range, can be %NULL
648 *
649 * Reverse of __next_free_mem_range().
650 */
651void __init_memblock __next_free_mem_range_rev(u64 *idx, int nid,
652 phys_addr_t *out_start,
653 phys_addr_t *out_end, int *out_nid)
654{ 500{
655 struct memblock_type *mem = &memblock.memory; 501 phys_addr_t alloc;
656 struct memblock_type *rsv = &memblock.reserved;
657 int mi = *idx & 0xffffffff;
658 int ri = *idx >> 32;
659
660 if (*idx == (u64)ULLONG_MAX) {
661 mi = mem->cnt - 1;
662 ri = rsv->cnt;
663 }
664 502
665 for ( ; mi >= 0; mi--) { 503 alloc = __memblock_alloc_base(size, align, max_addr);
666 struct memblock_region *m = &mem->regions[mi];
667 phys_addr_t m_start = m->base;
668 phys_addr_t m_end = m->base + m->size;
669 504
670 /* only memory regions are associated with nodes, check it */ 505 if (alloc == 0)
671 if (nid != MAX_NUMNODES && nid != memblock_get_region_node(m)) 506 panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
672 continue; 507 (unsigned long long) size, (unsigned long long) max_addr);
673 508
674 /* scan areas before each reservation for intersection */ 509 return alloc;
675 for ( ; ri >= 0; ri--) { 510}
676 struct memblock_region *r = &rsv->regions[ri];
677 phys_addr_t r_start = ri ? r[-1].base + r[-1].size : 0;
678 phys_addr_t r_end = ri < rsv->cnt ? r->base : ULLONG_MAX;
679
680 /* if ri advanced past mi, break out to advance mi */
681 if (r_end <= m_start)
682 break;
683 /* if the two regions intersect, we're done */
684 if (m_end > r_start) {
685 if (out_start)
686 *out_start = max(m_start, r_start);
687 if (out_end)
688 *out_end = min(m_end, r_end);
689 if (out_nid)
690 *out_nid = memblock_get_region_node(m);
691
692 if (m_start >= r_start)
693 mi--;
694 else
695 ri--;
696 *idx = (u32)mi | (u64)ri << 32;
697 return;
698 }
699 }
700 }
701 511
702 *idx = ULLONG_MAX; 512phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
513{
514 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
703} 515}
704 516
705#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 517
706/* 518/*
707 * Common iterator interface used to define for_each_mem_range(). 519 * Additional node-local allocators. Search for node memory is bottom up
520 * and walks memblock regions within that node bottom-up as well, but allocation
521 * within an memblock region is top-down. XXX I plan to fix that at some stage
522 *
523 * WARNING: Only available after early_node_map[] has been populated,
524 * on some architectures, that is after all the calls to add_active_range()
525 * have been done to populate it.
708 */ 526 */
709void __init_memblock __next_mem_pfn_range(int *idx, int nid,
710 unsigned long *out_start_pfn,
711 unsigned long *out_end_pfn, int *out_nid)
712{
713 struct memblock_type *type = &memblock.memory;
714 struct memblock_region *r;
715 527
716 while (++*idx < type->cnt) { 528phys_addr_t __weak __init memblock_nid_range(phys_addr_t start, phys_addr_t end, int *nid)
717 r = &type->regions[*idx]; 529{
530#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
531 /*
532 * This code originates from sparc which really wants use to walk by addresses
533 * and returns the nid. This is not very convenient for early_pfn_map[] users
534 * as the map isn't sorted yet, and it really wants to be walked by nid.
535 *
536 * For now, I implement the inefficient method below which walks the early
537 * map multiple times. Eventually we may want to use an ARCH config option
538 * to implement a completely different method for both case.
539 */
540 unsigned long start_pfn, end_pfn;
541 int i;
718 542
719 if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size)) 543 for (i = 0; i < MAX_NUMNODES; i++) {
544 get_pfn_range_for_nid(i, &start_pfn, &end_pfn);
545 if (start < PFN_PHYS(start_pfn) || start >= PFN_PHYS(end_pfn))
720 continue; 546 continue;
721 if (nid == MAX_NUMNODES || nid == r->nid) 547 *nid = i;
722 break; 548 return min(end, PFN_PHYS(end_pfn));
723 }
724 if (*idx >= type->cnt) {
725 *idx = -1;
726 return;
727 } 549 }
550#endif
551 *nid = 0;
728 552
729 if (out_start_pfn) 553 return end;
730 *out_start_pfn = PFN_UP(r->base);
731 if (out_end_pfn)
732 *out_end_pfn = PFN_DOWN(r->base + r->size);
733 if (out_nid)
734 *out_nid = r->nid;
735} 554}
736 555
737/** 556static phys_addr_t __init memblock_alloc_nid_region(struct memblock_region *mp,
738 * memblock_set_node - set node ID on memblock regions 557 phys_addr_t size,
739 * @base: base of area to set node ID for 558 phys_addr_t align, int nid)
740 * @size: size of area to set node ID for
741 * @nid: node ID to set
742 *
743 * Set the nid of memblock memory regions in [@base,@base+@size) to @nid.
744 * Regions which cross the area boundaries are split as necessary.
745 *
746 * RETURNS:
747 * 0 on success, -errno on failure.
748 */
749int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
750 int nid)
751{ 559{
752 struct memblock_type *type = &memblock.memory; 560 phys_addr_t start, end;
753 int start_rgn, end_rgn;
754 int i, ret;
755
756 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
757 if (ret)
758 return ret;
759
760 for (i = start_rgn; i < end_rgn; i++)
761 memblock_set_region_node(&type->regions[i], nid);
762 561
763 memblock_merge_regions(type); 562 start = mp->base;
764 return 0; 563 end = start + mp->size;
765}
766#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
767
768static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size,
769 phys_addr_t align, phys_addr_t max_addr,
770 int nid)
771{
772 phys_addr_t found;
773 564
774 /* align @size to avoid excessive fragmentation on reserved array */ 565 start = memblock_align_up(start, align);
775 size = round_up(size, align); 566 while (start < end) {
567 phys_addr_t this_end;
568 int this_nid;
776 569
777 found = memblock_find_in_range_node(0, max_addr, size, align, nid); 570 this_end = memblock_nid_range(start, end, &this_nid);
778 if (found && !memblock_reserve(found, size)) 571 if (this_nid == nid) {
779 return found; 572 phys_addr_t ret = memblock_find_region(start, this_end, size, align);
573 if (ret != MEMBLOCK_ERROR &&
574 !memblock_add_region(&memblock.reserved, ret, size))
575 return ret;
576 }
577 start = this_end;
578 }
780 579
781 return 0; 580 return MEMBLOCK_ERROR;
782} 581}
783 582
784phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid) 583phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
785{ 584{
786 return memblock_alloc_base_nid(size, align, MEMBLOCK_ALLOC_ACCESSIBLE, nid); 585 struct memblock_type *mem = &memblock.memory;
787} 586 int i;
788
789phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
790{
791 return memblock_alloc_base_nid(size, align, max_addr, MAX_NUMNODES);
792}
793
794phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
795{
796 phys_addr_t alloc;
797 587
798 alloc = __memblock_alloc_base(size, align, max_addr); 588 BUG_ON(0 == size);
799 589
800 if (alloc == 0) 590 /* We align the size to limit fragmentation. Without this, a lot of
801 panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n", 591 * small allocs quickly eat up the whole reserve array on sparc
802 (unsigned long long) size, (unsigned long long) max_addr); 592 */
593 size = memblock_align_up(size, align);
803 594
804 return alloc; 595 /* We do a bottom-up search for a region with the right
805} 596 * nid since that's easier considering how memblock_nid_range()
597 * works
598 */
599 for (i = 0; i < mem->cnt; i++) {
600 phys_addr_t ret = memblock_alloc_nid_region(&mem->regions[i],
601 size, align, nid);
602 if (ret != MEMBLOCK_ERROR)
603 return ret;
604 }
806 605
807phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align) 606 return 0;
808{
809 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
810} 607}
811 608
812phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid) 609phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
@@ -815,7 +612,7 @@ phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, i
815 612
816 if (res) 613 if (res)
817 return res; 614 return res;
818 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); 615 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE);
819} 616}
820 617
821 618
@@ -823,15 +620,10 @@ phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, i
823 * Remaining API functions 620 * Remaining API functions
824 */ 621 */
825 622
623/* You must call memblock_analyze() before this. */
826phys_addr_t __init memblock_phys_mem_size(void) 624phys_addr_t __init memblock_phys_mem_size(void)
827{ 625{
828 return memblock.memory.total_size; 626 return memblock.memory_size;
829}
830
831/* lowest address */
832phys_addr_t __init_memblock memblock_start_of_DRAM(void)
833{
834 return memblock.memory.regions[0].base;
835} 627}
836 628
837phys_addr_t __init_memblock memblock_end_of_DRAM(void) 629phys_addr_t __init_memblock memblock_end_of_DRAM(void)
@@ -841,28 +633,45 @@ phys_addr_t __init_memblock memblock_end_of_DRAM(void)
841 return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size); 633 return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
842} 634}
843 635
844void __init memblock_enforce_memory_limit(phys_addr_t limit) 636/* You must call memblock_analyze() after this. */
637void __init memblock_enforce_memory_limit(phys_addr_t memory_limit)
845{ 638{
846 unsigned long i; 639 unsigned long i;
847 phys_addr_t max_addr = (phys_addr_t)ULLONG_MAX; 640 phys_addr_t limit;
641 struct memblock_region *p;
848 642
849 if (!limit) 643 if (!memory_limit)
850 return; 644 return;
851 645
852 /* find out max address */ 646 /* Truncate the memblock regions to satisfy the memory limit. */
647 limit = memory_limit;
853 for (i = 0; i < memblock.memory.cnt; i++) { 648 for (i = 0; i < memblock.memory.cnt; i++) {
854 struct memblock_region *r = &memblock.memory.regions[i]; 649 if (limit > memblock.memory.regions[i].size) {
855 650 limit -= memblock.memory.regions[i].size;
856 if (limit <= r->size) { 651 continue;
857 max_addr = r->base + limit;
858 break;
859 } 652 }
860 limit -= r->size; 653
654 memblock.memory.regions[i].size = limit;
655 memblock.memory.cnt = i + 1;
656 break;
861 } 657 }
862 658
863 /* truncate both memory and reserved regions */ 659 memory_limit = memblock_end_of_DRAM();
864 __memblock_remove(&memblock.memory, max_addr, (phys_addr_t)ULLONG_MAX); 660
865 __memblock_remove(&memblock.reserved, max_addr, (phys_addr_t)ULLONG_MAX); 661 /* And truncate any reserves above the limit also. */
662 for (i = 0; i < memblock.reserved.cnt; i++) {
663 p = &memblock.reserved.regions[i];
664
665 if (p->base > memory_limit)
666 p->size = 0;
667 else if ((p->base + p->size) > memory_limit)
668 p->size = memory_limit - p->base;
669
670 if (p->size == 0) {
671 memblock_remove_region(&memblock.reserved, i);
672 i--;
673 }
674 }
866} 675}
867 676
868static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr) 677static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
@@ -893,113 +702,108 @@ int __init_memblock memblock_is_memory(phys_addr_t addr)
893 return memblock_search(&memblock.memory, addr) != -1; 702 return memblock_search(&memblock.memory, addr) != -1;
894} 703}
895 704
896/**
897 * memblock_is_region_memory - check if a region is a subset of memory
898 * @base: base of region to check
899 * @size: size of region to check
900 *
901 * Check if the region [@base, @base+@size) is a subset of a memory block.
902 *
903 * RETURNS:
904 * 0 if false, non-zero if true
905 */
906int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size) 705int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
907{ 706{
908 int idx = memblock_search(&memblock.memory, base); 707 int idx = memblock_search(&memblock.memory, base);
909 phys_addr_t end = base + memblock_cap_size(base, &size);
910 708
911 if (idx == -1) 709 if (idx == -1)
912 return 0; 710 return 0;
913 return memblock.memory.regions[idx].base <= base && 711 return memblock.memory.regions[idx].base <= base &&
914 (memblock.memory.regions[idx].base + 712 (memblock.memory.regions[idx].base +
915 memblock.memory.regions[idx].size) >= end; 713 memblock.memory.regions[idx].size) >= (base + size);
916} 714}
917 715
918/**
919 * memblock_is_region_reserved - check if a region intersects reserved memory
920 * @base: base of region to check
921 * @size: size of region to check
922 *
923 * Check if the region [@base, @base+@size) intersects a reserved memory block.
924 *
925 * RETURNS:
926 * 0 if false, non-zero if true
927 */
928int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) 716int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
929{ 717{
930 memblock_cap_size(base, &size);
931 return memblock_overlaps_region(&memblock.reserved, base, size) >= 0; 718 return memblock_overlaps_region(&memblock.reserved, base, size) >= 0;
932} 719}
933 720
934void __init_memblock memblock_trim_memory(phys_addr_t align)
935{
936 int i;
937 phys_addr_t start, end, orig_start, orig_end;
938 struct memblock_type *mem = &memblock.memory;
939
940 for (i = 0; i < mem->cnt; i++) {
941 orig_start = mem->regions[i].base;
942 orig_end = mem->regions[i].base + mem->regions[i].size;
943 start = round_up(orig_start, align);
944 end = round_down(orig_end, align);
945
946 if (start == orig_start && end == orig_end)
947 continue;
948
949 if (start < end) {
950 mem->regions[i].base = start;
951 mem->regions[i].size = end - start;
952 } else {
953 memblock_remove_region(mem, i);
954 i--;
955 }
956 }
957}
958 721
959void __init_memblock memblock_set_current_limit(phys_addr_t limit) 722void __init_memblock memblock_set_current_limit(phys_addr_t limit)
960{ 723{
961 memblock.current_limit = limit; 724 memblock.current_limit = limit;
962} 725}
963 726
964static void __init_memblock memblock_dump(struct memblock_type *type, char *name) 727static void __init_memblock memblock_dump(struct memblock_type *region, char *name)
965{ 728{
966 unsigned long long base, size; 729 unsigned long long base, size;
967 int i; 730 int i;
968 731
969 pr_info(" %s.cnt = 0x%lx\n", name, type->cnt); 732 pr_info(" %s.cnt = 0x%lx\n", name, region->cnt);
970 733
971 for (i = 0; i < type->cnt; i++) { 734 for (i = 0; i < region->cnt; i++) {
972 struct memblock_region *rgn = &type->regions[i]; 735 base = region->regions[i].base;
973 char nid_buf[32] = ""; 736 size = region->regions[i].size;
974 737
975 base = rgn->base; 738 pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes\n",
976 size = rgn->size; 739 name, i, base, base + size - 1, size);
977#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
978 if (memblock_get_region_node(rgn) != MAX_NUMNODES)
979 snprintf(nid_buf, sizeof(nid_buf), " on node %d",
980 memblock_get_region_node(rgn));
981#endif
982 pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes%s\n",
983 name, i, base, base + size - 1, size, nid_buf);
984 } 740 }
985} 741}
986 742
987void __init_memblock __memblock_dump_all(void) 743void __init_memblock memblock_dump_all(void)
988{ 744{
745 if (!memblock_debug)
746 return;
747
989 pr_info("MEMBLOCK configuration:\n"); 748 pr_info("MEMBLOCK configuration:\n");
990 pr_info(" memory size = %#llx reserved size = %#llx\n", 749 pr_info(" memory size = 0x%llx\n", (unsigned long long)memblock.memory_size);
991 (unsigned long long)memblock.memory.total_size,
992 (unsigned long long)memblock.reserved.total_size);
993 750
994 memblock_dump(&memblock.memory, "memory"); 751 memblock_dump(&memblock.memory, "memory");
995 memblock_dump(&memblock.reserved, "reserved"); 752 memblock_dump(&memblock.reserved, "reserved");
996} 753}
997 754
998void __init memblock_allow_resize(void) 755void __init memblock_analyze(void)
999{ 756{
757 int i;
758
759 /* Check marker in the unused last array entry */
760 WARN_ON(memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS].base
761 != MEMBLOCK_INACTIVE);
762 WARN_ON(memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS].base
763 != MEMBLOCK_INACTIVE);
764
765 memblock.memory_size = 0;
766
767 for (i = 0; i < memblock.memory.cnt; i++)
768 memblock.memory_size += memblock.memory.regions[i].size;
769
770 /* We allow resizing from there */
1000 memblock_can_resize = 1; 771 memblock_can_resize = 1;
1001} 772}
1002 773
774void __init memblock_init(void)
775{
776 static int init_done __initdata = 0;
777
778 if (init_done)
779 return;
780 init_done = 1;
781
782 /* Hookup the initial arrays */
783 memblock.memory.regions = memblock_memory_init_regions;
784 memblock.memory.max = INIT_MEMBLOCK_REGIONS;
785 memblock.reserved.regions = memblock_reserved_init_regions;
786 memblock.reserved.max = INIT_MEMBLOCK_REGIONS;
787
788 /* Write a marker in the unused last array entry */
789 memblock.memory.regions[INIT_MEMBLOCK_REGIONS].base = MEMBLOCK_INACTIVE;
790 memblock.reserved.regions[INIT_MEMBLOCK_REGIONS].base = MEMBLOCK_INACTIVE;
791
792 /* Create a dummy zero size MEMBLOCK which will get coalesced away later.
793 * This simplifies the memblock_add() code below...
794 */
795 memblock.memory.regions[0].base = 0;
796 memblock.memory.regions[0].size = 0;
797 memblock.memory.cnt = 1;
798
799 /* Ditto. */
800 memblock.reserved.regions[0].base = 0;
801 memblock.reserved.regions[0].size = 0;
802 memblock.reserved.cnt = 1;
803
804 memblock.current_limit = MEMBLOCK_ALLOC_ANYWHERE;
805}
806
1003static int __init early_memblock(char *p) 807static int __init early_memblock(char *p)
1004{ 808{
1005 if (p && strstr(p, "debug")) 809 if (p && strstr(p, "debug"))
@@ -1008,7 +812,7 @@ static int __init early_memblock(char *p)
1008} 812}
1009early_param("memblock", early_memblock); 813early_param("memblock", early_memblock);
1010 814
1011#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK) 815#if defined(CONFIG_DEBUG_FS) && !defined(ARCH_DISCARD_MEMBLOCK)
1012 816
1013static int memblock_debug_show(struct seq_file *m, void *private) 817static int memblock_debug_show(struct seq_file *m, void *private)
1014{ 818{