diff options
Diffstat (limited to 'lib/lmb.c')
| -rw-r--r-- | lib/lmb.c | 45 | 
1 files changed, 30 insertions, 15 deletions
| @@ -19,31 +19,42 @@ | |||
| 19 | 19 | ||
| 20 | struct lmb lmb; | 20 | struct lmb lmb; | 
| 21 | 21 | ||
| 22 | static int lmb_debug; | ||
| 23 | |||
| 24 | static int __init early_lmb(char *p) | ||
| 25 | { | ||
| 26 | if (p && strstr(p, "debug")) | ||
| 27 | lmb_debug = 1; | ||
| 28 | return 0; | ||
| 29 | } | ||
| 30 | early_param("lmb", early_lmb); | ||
| 31 | |||
| 22 | void lmb_dump_all(void) | 32 | void lmb_dump_all(void) | 
| 23 | { | 33 | { | 
| 24 | #ifdef DEBUG | ||
| 25 | unsigned long i; | 34 | unsigned long i; | 
| 26 | 35 | ||
| 27 | pr_debug("lmb_dump_all:\n"); | 36 | if (!lmb_debug) | 
| 28 | pr_debug(" memory.cnt = 0x%lx\n", lmb.memory.cnt); | 37 | return; | 
| 29 | pr_debug(" memory.size = 0x%llx\n", | 38 | |
| 39 | pr_info("lmb_dump_all:\n"); | ||
| 40 | pr_info(" memory.cnt = 0x%lx\n", lmb.memory.cnt); | ||
| 41 | pr_info(" memory.size = 0x%llx\n", | ||
| 30 | (unsigned long long)lmb.memory.size); | 42 | (unsigned long long)lmb.memory.size); | 
| 31 | for (i=0; i < lmb.memory.cnt ;i++) { | 43 | for (i=0; i < lmb.memory.cnt ;i++) { | 
| 32 | pr_debug(" memory.region[0x%x].base = 0x%llx\n", | 44 | pr_info(" memory.region[0x%lx].base = 0x%llx\n", | 
| 33 | i, (unsigned long long)lmb.memory.region[i].base); | 45 | i, (unsigned long long)lmb.memory.region[i].base); | 
| 34 | pr_debug(" .size = 0x%llx\n", | 46 | pr_info(" .size = 0x%llx\n", | 
| 35 | (unsigned long long)lmb.memory.region[i].size); | 47 | (unsigned long long)lmb.memory.region[i].size); | 
| 36 | } | 48 | } | 
| 37 | 49 | ||
| 38 | pr_debug(" reserved.cnt = 0x%lx\n", lmb.reserved.cnt); | 50 | pr_info(" reserved.cnt = 0x%lx\n", lmb.reserved.cnt); | 
| 39 | pr_debug(" reserved.size = 0x%lx\n", lmb.reserved.size); | 51 | pr_info(" reserved.size = 0x%lx\n", lmb.reserved.size); | 
| 40 | for (i=0; i < lmb.reserved.cnt ;i++) { | 52 | for (i=0; i < lmb.reserved.cnt ;i++) { | 
| 41 | pr_debug(" reserved.region[0x%x].base = 0x%llx\n", | 53 | pr_info(" reserved.region[0x%lx].base = 0x%llx\n", | 
| 42 | i, (unsigned long long)lmb.reserved.region[i].base); | 54 | i, (unsigned long long)lmb.reserved.region[i].base); | 
| 43 | pr_debug(" .size = 0x%llx\n", | 55 | pr_info(" .size = 0x%llx\n", | 
| 44 | (unsigned long long)lmb.reserved.region[i].size); | 56 | (unsigned long long)lmb.reserved.region[i].size); | 
| 45 | } | 57 | } | 
| 46 | #endif /* DEBUG */ | ||
| 47 | } | 58 | } | 
| 48 | 59 | ||
| 49 | static unsigned long lmb_addrs_overlap(u64 base1, u64 size1, u64 base2, | 60 | static unsigned long lmb_addrs_overlap(u64 base1, u64 size1, u64 base2, | 
| @@ -286,8 +297,7 @@ static u64 __init lmb_alloc_nid_unreserved(u64 start, u64 end, | |||
| 286 | j = lmb_overlaps_region(&lmb.reserved, base, size); | 297 | j = lmb_overlaps_region(&lmb.reserved, base, size); | 
| 287 | if (j < 0) { | 298 | if (j < 0) { | 
| 288 | /* this area isn't reserved, take it */ | 299 | /* this area isn't reserved, take it */ | 
| 289 | if (lmb_add_region(&lmb.reserved, base, | 300 | if (lmb_add_region(&lmb.reserved, base, size) < 0) | 
| 290 | lmb_align_up(size, align)) < 0) | ||
| 291 | base = ~(u64)0; | 301 | base = ~(u64)0; | 
| 292 | return base; | 302 | return base; | 
| 293 | } | 303 | } | 
| @@ -333,6 +343,10 @@ u64 __init lmb_alloc_nid(u64 size, u64 align, int nid, | |||
| 333 | struct lmb_region *mem = &lmb.memory; | 343 | struct lmb_region *mem = &lmb.memory; | 
| 334 | int i; | 344 | int i; | 
| 335 | 345 | ||
| 346 | BUG_ON(0 == size); | ||
| 347 | |||
| 348 | size = lmb_align_up(size, align); | ||
| 349 | |||
| 336 | for (i = 0; i < mem->cnt; i++) { | 350 | for (i = 0; i < mem->cnt; i++) { | 
| 337 | u64 ret = lmb_alloc_nid_region(&mem->region[i], | 351 | u64 ret = lmb_alloc_nid_region(&mem->region[i], | 
| 338 | nid_range, | 352 | nid_range, | 
| @@ -370,6 +384,8 @@ u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr) | |||
| 370 | 384 | ||
| 371 | BUG_ON(0 == size); | 385 | BUG_ON(0 == size); | 
| 372 | 386 | ||
| 387 | size = lmb_align_up(size, align); | ||
| 388 | |||
| 373 | /* On some platforms, make sure we allocate lowmem */ | 389 | /* On some platforms, make sure we allocate lowmem */ | 
| 374 | /* Note that LMB_REAL_LIMIT may be LMB_ALLOC_ANYWHERE */ | 390 | /* Note that LMB_REAL_LIMIT may be LMB_ALLOC_ANYWHERE */ | 
| 375 | if (max_addr == LMB_ALLOC_ANYWHERE) | 391 | if (max_addr == LMB_ALLOC_ANYWHERE) | 
| @@ -393,8 +409,7 @@ u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr) | |||
| 393 | j = lmb_overlaps_region(&lmb.reserved, base, size); | 409 | j = lmb_overlaps_region(&lmb.reserved, base, size); | 
| 394 | if (j < 0) { | 410 | if (j < 0) { | 
| 395 | /* this area isn't reserved, take it */ | 411 | /* this area isn't reserved, take it */ | 
| 396 | if (lmb_add_region(&lmb.reserved, base, | 412 | if (lmb_add_region(&lmb.reserved, base, size) < 0) | 
| 397 | lmb_align_up(size, align)) < 0) | ||
| 398 | return 0; | 413 | return 0; | 
| 399 | return base; | 414 | return base; | 
| 400 | } | 415 | } | 
