aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memblock.c
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2010-07-12 00:36:48 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2010-08-04 22:56:19 -0400
commit6ed311b282210d23d1a2cb2665aa899979993628 (patch)
treea1e77961b0a067af26d0bbae52e64e258bbc3136 /mm/memblock.c
parent7f219c736f9439acb1c50d264fbee93c353773ca (diff)
memblock: Move functions around into a more sensible order
Some shuffling is needed for doing array resize so we may as well put some sense into the ordering of the functions in the whole memblock.c file. No code change. Added some comments. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'mm/memblock.c')
-rw-r--r--mm/memblock.c301
1 files changed, 159 insertions, 142 deletions
diff --git a/mm/memblock.c b/mm/memblock.c
index b775fca4fba5..e5f3f9bdc311 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -24,40 +24,18 @@ static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIO
24 24
25#define MEMBLOCK_ERROR (~(phys_addr_t)0) 25#define MEMBLOCK_ERROR (~(phys_addr_t)0)
26 26
27static int __init early_memblock(char *p) 27/*
28{ 28 * Address comparison utilities
29 if (p && strstr(p, "debug")) 29 */
30 memblock_debug = 1;
31 return 0;
32}
33early_param("memblock", early_memblock);
34 30
35static void memblock_dump(struct memblock_type *region, char *name) 31static phys_addr_t memblock_align_down(phys_addr_t addr, phys_addr_t size)
36{ 32{
37 unsigned long long base, size; 33 return addr & ~(size - 1);
38 int i;
39
40 pr_info(" %s.cnt = 0x%lx\n", name, region->cnt);
41
42 for (i = 0; i < region->cnt; i++) {
43 base = region->regions[i].base;
44 size = region->regions[i].size;
45
46 pr_info(" %s[0x%x]\t0x%016llx - 0x%016llx, 0x%llx bytes\n",
47 name, i, base, base + size - 1, size);
48 }
49} 34}
50 35
51void memblock_dump_all(void) 36static phys_addr_t memblock_align_up(phys_addr_t addr, phys_addr_t size)
52{ 37{
53 if (!memblock_debug) 38 return (addr + (size - 1)) & ~(size - 1);
54 return;
55
56 pr_info("MEMBLOCK configuration:\n");
57 pr_info(" memory size = 0x%llx\n", (unsigned long long)memblock.memory_size);
58
59 memblock_dump(&memblock.memory, "memory");
60 memblock_dump(&memblock.reserved, "reserved");
61} 39}
62 40
63static unsigned long memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1, 41static unsigned long memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
@@ -88,6 +66,77 @@ static long memblock_regions_adjacent(struct memblock_type *type,
88 return memblock_addrs_adjacent(base1, size1, base2, size2); 66 return memblock_addrs_adjacent(base1, size1, base2, size2);
89} 67}
90 68
69long memblock_overlaps_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size)
70{
71 unsigned long i;
72
73 for (i = 0; i < type->cnt; i++) {
74 phys_addr_t rgnbase = type->regions[i].base;
75 phys_addr_t rgnsize = type->regions[i].size;
76 if (memblock_addrs_overlap(base, size, rgnbase, rgnsize))
77 break;
78 }
79
80 return (i < type->cnt) ? i : -1;
81}
82
83/*
84 * Find, allocate, deallocate or reserve unreserved regions. All allocations
85 * are top-down.
86 */
87
88static phys_addr_t __init memblock_find_region(phys_addr_t start, phys_addr_t end,
89 phys_addr_t size, phys_addr_t align)
90{
91 phys_addr_t base, res_base;
92 long j;
93
94 base = memblock_align_down((end - size), align);
95 while (start <= base) {
96 j = memblock_overlaps_region(&memblock.reserved, base, size);
97 if (j < 0)
98 return base;
99 res_base = memblock.reserved.regions[j].base;
100 if (res_base < size)
101 break;
102 base = memblock_align_down(res_base - size, align);
103 }
104
105 return MEMBLOCK_ERROR;
106}
107
108static phys_addr_t __init memblock_find_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
109{
110 long i;
111 phys_addr_t base = 0;
112 phys_addr_t res_base;
113
114 BUG_ON(0 == size);
115
116 size = memblock_align_up(size, align);
117
118 /* Pump up max_addr */
119 if (max_addr == MEMBLOCK_ALLOC_ACCESSIBLE)
120 max_addr = memblock.current_limit;
121
122 /* We do a top-down search, this tends to limit memory
123 * fragmentation by keeping early boot allocs near the
124 * top of memory
125 */
126 for (i = memblock.memory.cnt - 1; i >= 0; i--) {
127 phys_addr_t memblockbase = memblock.memory.regions[i].base;
128 phys_addr_t memblocksize = memblock.memory.regions[i].size;
129
130 if (memblocksize < size)
131 continue;
132 base = min(memblockbase + memblocksize, max_addr);
133 res_base = memblock_find_region(memblockbase, base, size, align);
134 if (res_base != MEMBLOCK_ERROR)
135 return res_base;
136 }
137 return MEMBLOCK_ERROR;
138}
139
91static void memblock_remove_region(struct memblock_type *type, unsigned long r) 140static void memblock_remove_region(struct memblock_type *type, unsigned long r)
92{ 141{
93 unsigned long i; 142 unsigned long i;
@@ -107,22 +156,6 @@ static void memblock_coalesce_regions(struct memblock_type *type,
107 memblock_remove_region(type, r2); 156 memblock_remove_region(type, r2);
108} 157}
109 158
110void __init memblock_analyze(void)
111{
112 int i;
113
114 /* Check marker in the unused last array entry */
115 WARN_ON(memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS].base
116 != (phys_addr_t)RED_INACTIVE);
117 WARN_ON(memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS].base
118 != (phys_addr_t)RED_INACTIVE);
119
120 memblock.memory_size = 0;
121
122 for (i = 0; i < memblock.memory.cnt; i++)
123 memblock.memory_size += memblock.memory.regions[i].size;
124}
125
126static long memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size) 159static long memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size)
127{ 160{
128 unsigned long coalesced = 0; 161 unsigned long coalesced = 0;
@@ -260,49 +293,47 @@ long __init memblock_reserve(phys_addr_t base, phys_addr_t size)
260 return memblock_add_region(_rgn, base, size); 293 return memblock_add_region(_rgn, base, size);
261} 294}
262 295
263long memblock_overlaps_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size) 296phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
264{ 297{
265 unsigned long i; 298 phys_addr_t found;
266 299
267 for (i = 0; i < type->cnt; i++) { 300 /* We align the size to limit fragmentation. Without this, a lot of
268 phys_addr_t rgnbase = type->regions[i].base; 301 * small allocs quickly eat up the whole reserve array on sparc
269 phys_addr_t rgnsize = type->regions[i].size; 302 */
270 if (memblock_addrs_overlap(base, size, rgnbase, rgnsize)) 303 size = memblock_align_up(size, align);
271 break;
272 }
273 304
274 return (i < type->cnt) ? i : -1; 305 found = memblock_find_base(size, align, max_addr);
275} 306 if (found != MEMBLOCK_ERROR &&
307 memblock_add_region(&memblock.reserved, found, size) >= 0)
308 return found;
276 309
277static phys_addr_t memblock_align_down(phys_addr_t addr, phys_addr_t size) 310 return 0;
278{
279 return addr & ~(size - 1);
280} 311}
281 312
282static phys_addr_t memblock_align_up(phys_addr_t addr, phys_addr_t size) 313phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
283{ 314{
284 return (addr + (size - 1)) & ~(size - 1); 315 phys_addr_t alloc;
316
317 alloc = __memblock_alloc_base(size, align, max_addr);
318
319 if (alloc == 0)
320 panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
321 (unsigned long long) size, (unsigned long long) max_addr);
322
323 return alloc;
285} 324}
286 325
287static phys_addr_t __init memblock_find_region(phys_addr_t start, phys_addr_t end, 326phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
288 phys_addr_t size, phys_addr_t align)
289{ 327{
290 phys_addr_t base, res_base; 328 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
291 long j; 329}
292 330
293 base = memblock_align_down((end - size), align);
294 while (start <= base) {
295 j = memblock_overlaps_region(&memblock.reserved, base, size);
296 if (j < 0)
297 return base;
298 res_base = memblock.reserved.regions[j].base;
299 if (res_base < size)
300 break;
301 base = memblock_align_down(res_base - size, align);
302 }
303 331
304 return MEMBLOCK_ERROR; 332/*
305} 333 * Additional node-local allocators. Search for node memory is bottom up
334 * and walks memblock regions within that node bottom-up as well, but allocation
335 * within an memblock region is top-down.
336 */
306 337
307phys_addr_t __weak __init memblock_nid_range(phys_addr_t start, phys_addr_t end, int *nid) 338phys_addr_t __weak __init memblock_nid_range(phys_addr_t start, phys_addr_t end, int *nid)
308{ 339{
@@ -364,72 +395,6 @@ phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int n
364 return memblock_alloc(size, align); 395 return memblock_alloc(size, align);
365} 396}
366 397
367phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
368{
369 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
370}
371
372static phys_addr_t __init memblock_find_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
373{
374 long i;
375 phys_addr_t base = 0;
376 phys_addr_t res_base;
377
378 BUG_ON(0 == size);
379
380 /* Pump up max_addr */
381 if (max_addr == MEMBLOCK_ALLOC_ACCESSIBLE)
382 max_addr = memblock.current_limit;
383
384 /* We do a top-down search, this tends to limit memory
385 * fragmentation by keeping early boot allocs near the
386 * top of memory
387 */
388 for (i = memblock.memory.cnt - 1; i >= 0; i--) {
389 phys_addr_t memblockbase = memblock.memory.regions[i].base;
390 phys_addr_t memblocksize = memblock.memory.regions[i].size;
391
392 if (memblocksize < size)
393 continue;
394 base = min(memblockbase + memblocksize, max_addr);
395 res_base = memblock_find_region(memblockbase, base, size, align);
396 if (res_base != MEMBLOCK_ERROR)
397 return res_base;
398 }
399 return MEMBLOCK_ERROR;
400}
401
402phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
403{
404 phys_addr_t found;
405
406 /* We align the size to limit fragmentation. Without this, a lot of
407 * small allocs quickly eat up the whole reserve array on sparc
408 */
409 size = memblock_align_up(size, align);
410
411 found = memblock_find_base(size, align, max_addr);
412 if (found != MEMBLOCK_ERROR &&
413 memblock_add_region(&memblock.reserved, found, size) >= 0)
414 return found;
415
416 return 0;
417}
418
419phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
420{
421 phys_addr_t alloc;
422
423 alloc = __memblock_alloc_base(size, align, max_addr);
424
425 if (alloc == 0)
426 panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
427 (unsigned long long) size, (unsigned long long) max_addr);
428
429 return alloc;
430}
431
432
433/* You must call memblock_analyze() before this. */ 398/* You must call memblock_analyze() before this. */
434phys_addr_t __init memblock_phys_mem_size(void) 399phys_addr_t __init memblock_phys_mem_size(void)
435{ 400{
@@ -534,6 +499,50 @@ void __init memblock_set_current_limit(phys_addr_t limit)
534 memblock.current_limit = limit; 499 memblock.current_limit = limit;
535} 500}
536 501
502static void memblock_dump(struct memblock_type *region, char *name)
503{
504 unsigned long long base, size;
505 int i;
506
507 pr_info(" %s.cnt = 0x%lx\n", name, region->cnt);
508
509 for (i = 0; i < region->cnt; i++) {
510 base = region->regions[i].base;
511 size = region->regions[i].size;
512
513 pr_info(" %s[0x%x]\t0x%016llx - 0x%016llx, 0x%llx bytes\n",
514 name, i, base, base + size - 1, size);
515 }
516}
517
518void memblock_dump_all(void)
519{
520 if (!memblock_debug)
521 return;
522
523 pr_info("MEMBLOCK configuration:\n");
524 pr_info(" memory size = 0x%llx\n", (unsigned long long)memblock.memory_size);
525
526 memblock_dump(&memblock.memory, "memory");
527 memblock_dump(&memblock.reserved, "reserved");
528}
529
530void __init memblock_analyze(void)
531{
532 int i;
533
534 /* Check marker in the unused last array entry */
535 WARN_ON(memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS].base
536 != (phys_addr_t)RED_INACTIVE);
537 WARN_ON(memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS].base
538 != (phys_addr_t)RED_INACTIVE);
539
540 memblock.memory_size = 0;
541
542 for (i = 0; i < memblock.memory.cnt; i++)
543 memblock.memory_size += memblock.memory.regions[i].size;
544}
545
537void __init memblock_init(void) 546void __init memblock_init(void)
538{ 547{
539 /* Hookup the initial arrays */ 548 /* Hookup the initial arrays */
@@ -561,3 +570,11 @@ void __init memblock_init(void)
561 memblock.current_limit = MEMBLOCK_ALLOC_ANYWHERE; 570 memblock.current_limit = MEMBLOCK_ALLOC_ANYWHERE;
562} 571}
563 572
573static int __init early_memblock(char *p)
574{
575 if (p && strstr(p, "debug"))
576 memblock_debug = 1;
577 return 0;
578}
579early_param("memblock", early_memblock);
580