aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2008-04-12 01:20:59 -0400
committerPaul Mackerras <paulus@samba.org>2008-04-15 07:22:17 -0400
commitd9024df02ffe74d723d97d552f86de3b34beb8cc (patch)
tree6e753db66f4404526d6c22a41a3a050dc156db72 /lib
parent300613e523d53f346f8ff0256921e289da39ed7b (diff)
[LMB] Restructure allocation loops to avoid unsigned underflow
There is a potential bug in __lmb_alloc_base where we subtract `size' from the base address of a reserved region without checking whether the subtraction could wrap around and produce a very large unsigned value. In fact it probably isn't possible to hit the bug in practice since it would only occur in the situation where we can't satisfy the allocation request and there is a reserved region starting at 0. This fixes the potential bug by breaking out of the loop when we get to the point where the base of the reserved region is less than the size requested. This also restructures the loop to be a bit easier to follow. The same logic got copied into lmb_alloc_nid_unreserved, so this makes a similar change there. Here the bug is more likely to be hit because the outer loop (in lmb_alloc_nid) goes through the memory regions in increasing order rather than decreasing order as __lmb_alloc_base does, and we are therefore more likely to hit the case where we are testing against a reserved region with a base address of 0. Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'lib')
-rw-r--r--lib/lmb.c56
1 files changed, 30 insertions, 26 deletions
diff --git a/lib/lmb.c b/lib/lmb.c
index e4bbc617f468..896e2832099e 100644
--- a/lib/lmb.c
+++ b/lib/lmb.c
@@ -230,20 +230,23 @@ static u64 lmb_align_up(u64 addr, u64 size)
230static u64 __init lmb_alloc_nid_unreserved(u64 start, u64 end, 230static u64 __init lmb_alloc_nid_unreserved(u64 start, u64 end,
231 u64 size, u64 align) 231 u64 size, u64 align)
232{ 232{
233 u64 base; 233 u64 base, res_base;
234 long j; 234 long j;
235 235
236 base = lmb_align_down((end - size), align); 236 base = lmb_align_down((end - size), align);
237 while (start <= base && 237 while (start <= base) {
238 ((j = lmb_overlaps_region(&lmb.reserved, base, size)) >= 0)) 238 j = lmb_overlaps_region(&lmb.reserved, base, size);
239 base = lmb_align_down(lmb.reserved.region[j].base - size, 239 if (j < 0) {
240 align); 240 /* this area isn't reserved, take it */
241 241 if (lmb_add_region(&lmb.reserved, base,
242 if (base != 0 && start <= base) { 242 lmb_align_up(size, align)) < 0)
243 if (lmb_add_region(&lmb.reserved, base, 243 base = ~(u64)0;
244 lmb_align_up(size, align)) < 0) 244 return base;
245 base = ~(u64)0; 245 }
246 return base; 246 res_base = lmb.reserved.region[j].base;
247 if (res_base < size)
248 break;
249 base = lmb_align_down(res_base - size, align);
247 } 250 }
248 251
249 return ~(u64)0; 252 return ~(u64)0;
@@ -315,10 +318,12 @@ u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr)
315{ 318{
316 long i, j; 319 long i, j;
317 u64 base = 0; 320 u64 base = 0;
321 u64 res_base;
318 322
319 BUG_ON(0 == size); 323 BUG_ON(0 == size);
320 324
321 /* On some platforms, make sure we allocate lowmem */ 325 /* On some platforms, make sure we allocate lowmem */
326 /* Note that LMB_REAL_LIMIT may be LMB_ALLOC_ANYWHERE */
322 if (max_addr == LMB_ALLOC_ANYWHERE) 327 if (max_addr == LMB_ALLOC_ANYWHERE)
323 max_addr = LMB_REAL_LIMIT; 328 max_addr = LMB_REAL_LIMIT;
324 329
@@ -326,6 +331,8 @@ u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr)
326 u64 lmbbase = lmb.memory.region[i].base; 331 u64 lmbbase = lmb.memory.region[i].base;
327 u64 lmbsize = lmb.memory.region[i].size; 332 u64 lmbsize = lmb.memory.region[i].size;
328 333
334 if (lmbsize < size)
335 continue;
329 if (max_addr == LMB_ALLOC_ANYWHERE) 336 if (max_addr == LMB_ALLOC_ANYWHERE)
330 base = lmb_align_down(lmbbase + lmbsize - size, align); 337 base = lmb_align_down(lmbbase + lmbsize - size, align);
331 else if (lmbbase < max_addr) { 338 else if (lmbbase < max_addr) {
@@ -334,25 +341,22 @@ u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr)
334 } else 341 } else
335 continue; 342 continue;
336 343
337 while (lmbbase <= base) { 344 while (base && lmbbase <= base) {
338 j = lmb_overlaps_region(&lmb.reserved, base, size); 345 j = lmb_overlaps_region(&lmb.reserved, base, size);
339 if (j < 0) 346 if (j < 0) {
347 /* this area isn't reserved, take it */
348 if (lmb_add_region(&lmb.reserved, base,
349 size) < 0)
350 return 0;
351 return base;
352 }
353 res_base = lmb.reserved.region[j].base;
354 if (res_base < size)
340 break; 355 break;
341 base = lmb_align_down(lmb.reserved.region[j].base - size, 356 base = lmb_align_down(res_base - size, align);
342 align);
343 } 357 }
344
345 if ((base != 0) && (lmbbase <= base))
346 break;
347 } 358 }
348 359 return 0;
349 if (i < 0)
350 return 0;
351
352 if (lmb_add_region(&lmb.reserved, base, lmb_align_up(size, align)) < 0)
353 return 0;
354
355 return base;
356} 360}
357 361
358/* You must call lmb_analyze() before this. */ 362/* You must call lmb_analyze() before this. */