diff options
author | Yinghai Lu <yinghai@kernel.org> | 2010-10-04 17:58:04 -0400 |
---|---|---|
committer | H. Peter Anvin <hpa@zytor.com> | 2010-10-06 00:45:43 -0400 |
commit | 16c36f743bf8481d0ba40a6de0af11736095d7cf (patch) | |
tree | d9b319ab02e97c122aae512143ffc23b3bdb5375 /arch/x86/mm/memblock.c | |
parent | f1af98c7629a1b76fd7336decbc776acdeed2120 (diff) |
x86, memblock: Remove __memblock_x86_find_in_range_size()
Fold it into memblock_x86_find_in_range(), and change bad_addr_size()
to check_reserve_memblock().
So whole memblock_x86_find_in_range_size() code is more readable.
Signed-off-by: Yinghai Lu <yinghai@kernel.org>
LKML-Reference: <4CAA4DEC.4000401@kernel.org>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch/x86/mm/memblock.c')
-rw-r--r-- | arch/x86/mm/memblock.c | 39 |
1 files changed, 11 insertions, 28 deletions
diff --git a/arch/x86/mm/memblock.c b/arch/x86/mm/memblock.c index fd7a0404945d..aa1169392b83 100644 --- a/arch/x86/mm/memblock.c +++ b/arch/x86/mm/memblock.c | |||
@@ -8,7 +8,7 @@ | |||
8 | #include <linux/range.h> | 8 | #include <linux/range.h> |
9 | 9 | ||
10 | /* Check for already reserved areas */ | 10 | /* Check for already reserved areas */ |
11 | static inline bool __init bad_addr_size(u64 *addrp, u64 *sizep, u64 align) | 11 | static bool __init check_with_memblock_reserved_size(u64 *addrp, u64 *sizep, u64 align) |
12 | { | 12 | { |
13 | struct memblock_region *r; | 13 | struct memblock_region *r; |
14 | u64 addr = *addrp, last; | 14 | u64 addr = *addrp, last; |
@@ -30,7 +30,7 @@ again: | |||
30 | goto again; | 30 | goto again; |
31 | } | 31 | } |
32 | if (last <= (r->base + r->size) && addr >= r->base) { | 32 | if (last <= (r->base + r->size) && addr >= r->base) { |
33 | (*sizep)++; | 33 | *sizep = 0; |
34 | return false; | 34 | return false; |
35 | } | 35 | } |
36 | } | 36 | } |
@@ -41,29 +41,6 @@ again: | |||
41 | return changed; | 41 | return changed; |
42 | } | 42 | } |
43 | 43 | ||
44 | static u64 __init __memblock_x86_find_in_range_size(u64 ei_start, u64 ei_last, u64 start, | ||
45 | u64 *sizep, u64 align) | ||
46 | { | ||
47 | u64 addr, last; | ||
48 | |||
49 | addr = round_up(ei_start, align); | ||
50 | if (addr < start) | ||
51 | addr = round_up(start, align); | ||
52 | if (addr >= ei_last) | ||
53 | goto out; | ||
54 | *sizep = ei_last - addr; | ||
55 | while (bad_addr_size(&addr, sizep, align) && addr + *sizep <= ei_last) | ||
56 | ; | ||
57 | last = addr + *sizep; | ||
58 | if (last > ei_last) | ||
59 | goto out; | ||
60 | |||
61 | return addr; | ||
62 | |||
63 | out: | ||
64 | return MEMBLOCK_ERROR; | ||
65 | } | ||
66 | |||
67 | /* | 44 | /* |
68 | * Find next free range after start, and size is returned in *sizep | 45 | * Find next free range after start, and size is returned in *sizep |
69 | */ | 46 | */ |
@@ -76,10 +53,16 @@ u64 __init memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align) | |||
76 | u64 ei_last = ei_start + r->size; | 53 | u64 ei_last = ei_start + r->size; |
77 | u64 addr; | 54 | u64 addr; |
78 | 55 | ||
79 | addr = __memblock_x86_find_in_range_size(ei_start, ei_last, start, | 56 | addr = round_up(ei_start, align); |
80 | sizep, align); | 57 | if (addr < start) |
58 | addr = round_up(start, align); | ||
59 | if (addr >= ei_last) | ||
60 | continue; | ||
61 | *sizep = ei_last - addr; | ||
62 | while (check_with_memblock_reserved_size(&addr, sizep, align)) | ||
63 | ; | ||
81 | 64 | ||
82 | if (addr != MEMBLOCK_ERROR) | 65 | if (*sizep) |
83 | return addr; | 66 | return addr; |
84 | } | 67 | } |
85 | 68 | ||