aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memblock.c
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2010-07-06 18:39:13 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2010-08-04 22:56:20 -0400
commit142b45a72e221537c1bb1995497fef7cdc439e26 (patch)
tree478f95f05f923e63a845d6f8272647cf40765c4c /mm/memblock.c
parent6ed311b282210d23d1a2cb2665aa899979993628 (diff)
memblock: Add array resizing support
When one of the array gets full, we resize it. After much thinking and a few iterations of that code, I went back to on-demand resizing using the (new) internal memblock_find_base() function, which is pretty much what Yinghai initially proposed, though there some differences in the details. To work this relies on the default alloc limit being set sensibly by the architecture. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'mm/memblock.c')
-rw-r--r--mm/memblock.c104
1 files changed, 102 insertions, 2 deletions
diff --git a/mm/memblock.c b/mm/memblock.c
index e5f3f9bdc31..0787790b1ce 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -11,6 +11,7 @@
11 */ 11 */
12 12
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/slab.h>
14#include <linux/init.h> 15#include <linux/init.h>
15#include <linux/bitops.h> 16#include <linux/bitops.h>
16#include <linux/poison.h> 17#include <linux/poison.h>
@@ -18,12 +19,23 @@
18 19
19struct memblock memblock; 20struct memblock memblock;
20 21
21static int memblock_debug; 22static int memblock_debug, memblock_can_resize;
22static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS + 1]; 23static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS + 1];
23static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS + 1]; 24static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS + 1];
24 25
25#define MEMBLOCK_ERROR (~(phys_addr_t)0) 26#define MEMBLOCK_ERROR (~(phys_addr_t)0)
26 27
28/* inline so we don't get a warning when pr_debug is compiled out */
29static inline const char *memblock_type_name(struct memblock_type *type)
30{
31 if (type == &memblock.memory)
32 return "memory";
33 else if (type == &memblock.reserved)
34 return "reserved";
35 else
36 return "unknown";
37}
38
27/* 39/*
28 * Address comparison utilities 40 * Address comparison utilities
29 */ 41 */
@@ -156,6 +168,79 @@ static void memblock_coalesce_regions(struct memblock_type *type,
156 memblock_remove_region(type, r2); 168 memblock_remove_region(type, r2);
157} 169}
158 170
171/* Defined below but needed now */
172static long memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size);
173
174static int memblock_double_array(struct memblock_type *type)
175{
176 struct memblock_region *new_array, *old_array;
177 phys_addr_t old_size, new_size, addr;
178 int use_slab = slab_is_available();
179
180 /* We don't allow resizing until we know about the reserved regions
181 * of memory that aren't suitable for allocation
182 */
183 if (!memblock_can_resize)
184 return -1;
185
186 pr_debug("memblock: %s array full, doubling...", memblock_type_name(type));
187
188 /* Calculate new doubled size */
189 old_size = type->max * sizeof(struct memblock_region);
190 new_size = old_size << 1;
191
192 /* Try to find some space for it.
193 *
194 * WARNING: We assume that either slab_is_available() and we use it or
195 * we use MEMBLOCK for allocations. That means that this is unsafe to use
196 * when bootmem is currently active (unless bootmem itself is implemented
197 * on top of MEMBLOCK which isn't the case yet)
198 *
199 * This should however not be an issue for now, as we currently only
200 * call into MEMBLOCK while it's still active, or much later when slab is
201 * active for memory hotplug operations
202 */
203 if (use_slab) {
204 new_array = kmalloc(new_size, GFP_KERNEL);
205 addr = new_array == NULL ? MEMBLOCK_ERROR : __pa(new_array);
206 } else
207 addr = memblock_find_base(new_size, sizeof(phys_addr_t), MEMBLOCK_ALLOC_ACCESSIBLE);
208 if (addr == MEMBLOCK_ERROR) {
209 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
210 memblock_type_name(type), type->max, type->max * 2);
211 return -1;
212 }
213 new_array = __va(addr);
214
215 /* Found space, we now need to move the array over before
216 * we add the reserved region since it may be our reserved
217 * array itself that is full.
218 */
219 memcpy(new_array, type->regions, old_size);
220 memset(new_array + type->max, 0, old_size);
221 old_array = type->regions;
222 type->regions = new_array;
223 type->max <<= 1;
224
225 /* If we use SLAB that's it, we are done */
226 if (use_slab)
227 return 0;
228
229 /* Add the new reserved region now. Should not fail ! */
230 BUG_ON(memblock_add_region(&memblock.reserved, addr, new_size) < 0);
231
232 /* If the array wasn't our static init one, then free it. We only do
233 * that before SLAB is available as later on, we don't know whether
234 * to use kfree or free_bootmem_pages(). Shouldn't be a big deal
235 * anyways
236 */
237 if (old_array != memblock_memory_init_regions &&
238 old_array != memblock_reserved_init_regions)
239 memblock_free(__pa(old_array), old_size);
240
241 return 0;
242}
243
159static long memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size) 244static long memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size)
160{ 245{
161 unsigned long coalesced = 0; 246 unsigned long coalesced = 0;
@@ -196,7 +281,11 @@ static long memblock_add_region(struct memblock_type *type, phys_addr_t base, ph
196 281
197 if (coalesced) 282 if (coalesced)
198 return coalesced; 283 return coalesced;
199 if (type->cnt >= type->max) 284
285 /* If we are out of space, we fail. It's too late to resize the array
286 * but then this shouldn't have happened in the first place.
287 */
288 if (WARN_ON(type->cnt >= type->max))
200 return -1; 289 return -1;
201 290
202 /* Couldn't coalesce the MEMBLOCK, so add it to the sorted table. */ 291 /* Couldn't coalesce the MEMBLOCK, so add it to the sorted table. */
@@ -217,6 +306,14 @@ static long memblock_add_region(struct memblock_type *type, phys_addr_t base, ph
217 } 306 }
218 type->cnt++; 307 type->cnt++;
219 308
309 /* The array is full ? Try to resize it. If that fails, we undo
310 * our allocation and return an error
311 */
312 if (type->cnt == type->max && memblock_double_array(type)) {
313 type->cnt--;
314 return -1;
315 }
316
220 return 0; 317 return 0;
221} 318}
222 319
@@ -541,6 +638,9 @@ void __init memblock_analyze(void)
541 638
542 for (i = 0; i < memblock.memory.cnt; i++) 639 for (i = 0; i < memblock.memory.cnt; i++)
543 memblock.memory_size += memblock.memory.regions[i].size; 640 memblock.memory_size += memblock.memory.regions[i].size;
641
642 /* We allow resizing from there */
643 memblock_can_resize = 1;
544} 644}
545 645
546void __init memblock_init(void) 646void __init memblock_init(void)