aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--lib/lmb.c56
1 files changed, 30 insertions, 26 deletions
diff --git a/lib/lmb.c b/lib/lmb.c
index e4bbc617f468..896e2832099e 100644
--- a/lib/lmb.c
+++ b/lib/lmb.c
@@ -230,20 +230,23 @@ static u64 lmb_align_up(u64 addr, u64 size)
230static u64 __init lmb_alloc_nid_unreserved(u64 start, u64 end, 230static u64 __init lmb_alloc_nid_unreserved(u64 start, u64 end,
231 u64 size, u64 align) 231 u64 size, u64 align)
232{ 232{
233 u64 base; 233 u64 base, res_base;
234 long j; 234 long j;
235 235
236 base = lmb_align_down((end - size), align); 236 base = lmb_align_down((end - size), align);
237 while (start <= base && 237 while (start <= base) {
238 ((j = lmb_overlaps_region(&lmb.reserved, base, size)) >= 0)) 238 j = lmb_overlaps_region(&lmb.reserved, base, size);
239 base = lmb_align_down(lmb.reserved.region[j].base - size, 239 if (j < 0) {
240 align); 240 /* this area isn't reserved, take it */
241 241 if (lmb_add_region(&lmb.reserved, base,
242 if (base != 0 && start <= base) { 242 lmb_align_up(size, align)) < 0)
243 if (lmb_add_region(&lmb.reserved, base, 243 base = ~(u64)0;
244 lmb_align_up(size, align)) < 0) 244 return base;
245 base = ~(u64)0; 245 }
246 return base; 246 res_base = lmb.reserved.region[j].base;
247 if (res_base < size)
248 break;
249 base = lmb_align_down(res_base - size, align);
247 } 250 }
248 251
249 return ~(u64)0; 252 return ~(u64)0;
@@ -315,10 +318,12 @@ u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr)
315{ 318{
316 long i, j; 319 long i, j;
317 u64 base = 0; 320 u64 base = 0;
321 u64 res_base;
318 322
319 BUG_ON(0 == size); 323 BUG_ON(0 == size);
320 324
321 /* On some platforms, make sure we allocate lowmem */ 325 /* On some platforms, make sure we allocate lowmem */
326 /* Note that LMB_REAL_LIMIT may be LMB_ALLOC_ANYWHERE */
322 if (max_addr == LMB_ALLOC_ANYWHERE) 327 if (max_addr == LMB_ALLOC_ANYWHERE)
323 max_addr = LMB_REAL_LIMIT; 328 max_addr = LMB_REAL_LIMIT;
324 329
@@ -326,6 +331,8 @@ u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr)
326 u64 lmbbase = lmb.memory.region[i].base; 331 u64 lmbbase = lmb.memory.region[i].base;
327 u64 lmbsize = lmb.memory.region[i].size; 332 u64 lmbsize = lmb.memory.region[i].size;
328 333
334 if (lmbsize < size)
335 continue;
329 if (max_addr == LMB_ALLOC_ANYWHERE) 336 if (max_addr == LMB_ALLOC_ANYWHERE)
330 base = lmb_align_down(lmbbase + lmbsize - size, align); 337 base = lmb_align_down(lmbbase + lmbsize - size, align);
331 else if (lmbbase < max_addr) { 338 else if (lmbbase < max_addr) {
@@ -334,25 +341,22 @@ u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr)
334 } else 341 } else
335 continue; 342 continue;
336 343
337 while (lmbbase <= base) { 344 while (base && lmbbase <= base) {
338 j = lmb_overlaps_region(&lmb.reserved, base, size); 345 j = lmb_overlaps_region(&lmb.reserved, base, size);
339 if (j < 0) 346 if (j < 0) {
347 /* this area isn't reserved, take it */
348 if (lmb_add_region(&lmb.reserved, base,
349 size) < 0)
350 return 0;
351 return base;
352 }
353 res_base = lmb.reserved.region[j].base;
354 if (res_base < size)
340 break; 355 break;
341 base = lmb_align_down(lmb.reserved.region[j].base - size, 356 base = lmb_align_down(res_base - size, align);
342 align);
343 } 357 }
344
345 if ((base != 0) && (lmbbase <= base))
346 break;
347 } 358 }
348 359 return 0;
349 if (i < 0)
350 return 0;
351
352 if (lmb_add_region(&lmb.reserved, base, lmb_align_up(size, align)) < 0)
353 return 0;
354
355 return base;
356} 360}
357 361
358/* You must call lmb_analyze() before this. */ 362/* You must call lmb_analyze() before this. */