diff options
author | David S. Miller <davem@davemloft.net> | 2008-05-12 19:51:15 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-05-12 19:51:15 -0400 |
commit | 4978db5bd964d90265f957f980ab2b0771ca2b9f (patch) | |
tree | d93e4b3a4c7e7418d8307506d40007e641482a2f | |
parent | 94d149c34cda933ff5096aca94bb23bf68602f4e (diff) |
lmb: Fix inconsistent alignment of size argument.
When allocating, if we will align up the size when making
the reservation, we should also align the size for the
check that the space is actually available.
The simplest thing is to just aling the size up from
the beginning, then we can use plain 'size' throughout.
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | lib/lmb.c | 12 |
1 files changed, 8 insertions, 4 deletions
@@ -286,8 +286,7 @@ static u64 __init lmb_alloc_nid_unreserved(u64 start, u64 end, | |||
286 | j = lmb_overlaps_region(&lmb.reserved, base, size); | 286 | j = lmb_overlaps_region(&lmb.reserved, base, size); |
287 | if (j < 0) { | 287 | if (j < 0) { |
288 | /* this area isn't reserved, take it */ | 288 | /* this area isn't reserved, take it */ |
289 | if (lmb_add_region(&lmb.reserved, base, | 289 | if (lmb_add_region(&lmb.reserved, base, size) < 0) |
290 | lmb_align_up(size, align)) < 0) | ||
291 | base = ~(u64)0; | 290 | base = ~(u64)0; |
292 | return base; | 291 | return base; |
293 | } | 292 | } |
@@ -333,6 +332,10 @@ u64 __init lmb_alloc_nid(u64 size, u64 align, int nid, | |||
333 | struct lmb_region *mem = &lmb.memory; | 332 | struct lmb_region *mem = &lmb.memory; |
334 | int i; | 333 | int i; |
335 | 334 | ||
335 | BUG_ON(0 == size); | ||
336 | |||
337 | size = lmb_align_up(size, align); | ||
338 | |||
336 | for (i = 0; i < mem->cnt; i++) { | 339 | for (i = 0; i < mem->cnt; i++) { |
337 | u64 ret = lmb_alloc_nid_region(&mem->region[i], | 340 | u64 ret = lmb_alloc_nid_region(&mem->region[i], |
338 | nid_range, | 341 | nid_range, |
@@ -370,6 +373,8 @@ u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr) | |||
370 | 373 | ||
371 | BUG_ON(0 == size); | 374 | BUG_ON(0 == size); |
372 | 375 | ||
376 | size = lmb_align_up(size, align); | ||
377 | |||
373 | /* On some platforms, make sure we allocate lowmem */ | 378 | /* On some platforms, make sure we allocate lowmem */ |
374 | /* Note that LMB_REAL_LIMIT may be LMB_ALLOC_ANYWHERE */ | 379 | /* Note that LMB_REAL_LIMIT may be LMB_ALLOC_ANYWHERE */ |
375 | if (max_addr == LMB_ALLOC_ANYWHERE) | 380 | if (max_addr == LMB_ALLOC_ANYWHERE) |
@@ -393,8 +398,7 @@ u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr) | |||
393 | j = lmb_overlaps_region(&lmb.reserved, base, size); | 398 | j = lmb_overlaps_region(&lmb.reserved, base, size); |
394 | if (j < 0) { | 399 | if (j < 0) { |
395 | /* this area isn't reserved, take it */ | 400 | /* this area isn't reserved, take it */ |
396 | if (lmb_add_region(&lmb.reserved, base, | 401 | if (lmb_add_region(&lmb.reserved, base, size) < 0) |
397 | lmb_align_up(size, align)) < 0) | ||
398 | return 0; | 402 | return 0; |
399 | return base; | 403 | return base; |
400 | } | 404 | } |