diff options
Diffstat (limited to 'arch/x86/mm/memblock.c')
| -rw-r--r-- | arch/x86/mm/memblock.c | 87 |
1 files changed, 87 insertions, 0 deletions
diff --git a/arch/x86/mm/memblock.c b/arch/x86/mm/memblock.c new file mode 100644 index 000000000000..26ba46234cba --- /dev/null +++ b/arch/x86/mm/memblock.c | |||
| @@ -0,0 +1,87 @@ | |||
| 1 | #include <linux/kernel.h> | ||
| 2 | #include <linux/types.h> | ||
| 3 | #include <linux/init.h> | ||
| 4 | #include <linux/bitops.h> | ||
| 5 | #include <linux/memblock.h> | ||
| 6 | #include <linux/bootmem.h> | ||
| 7 | #include <linux/mm.h> | ||
| 8 | #include <linux/range.h> | ||
| 9 | |||
| 10 | /* Check for already reserved areas */ | ||
| 11 | static inline bool __init bad_addr_size(u64 *addrp, u64 *sizep, u64 align) | ||
| 12 | { | ||
| 13 | struct memblock_region *r; | ||
| 14 | u64 addr = *addrp, last; | ||
| 15 | u64 size = *sizep; | ||
| 16 | bool changed = false; | ||
| 17 | |||
| 18 | again: | ||
| 19 | last = addr + size; | ||
| 20 | for_each_memblock(reserved, r) { | ||
| 21 | if (last > r->base && addr < r->base) { | ||
| 22 | size = r->base - addr; | ||
| 23 | changed = true; | ||
| 24 | goto again; | ||
| 25 | } | ||
| 26 | if (last > (r->base + r->size) && addr < (r->base + r->size)) { | ||
| 27 | addr = round_up(r->base + r->size, align); | ||
| 28 | size = last - addr; | ||
| 29 | changed = true; | ||
| 30 | goto again; | ||
| 31 | } | ||
| 32 | if (last <= (r->base + r->size) && addr >= r->base) { | ||
| 33 | (*sizep)++; | ||
| 34 | return false; | ||
| 35 | } | ||
| 36 | } | ||
| 37 | if (changed) { | ||
| 38 | *addrp = addr; | ||
| 39 | *sizep = size; | ||
| 40 | } | ||
| 41 | return changed; | ||
| 42 | } | ||
| 43 | |||
| 44 | static u64 __init __memblock_x86_find_in_range_size(u64 ei_start, u64 ei_last, u64 start, | ||
| 45 | u64 *sizep, u64 align) | ||
| 46 | { | ||
| 47 | u64 addr, last; | ||
| 48 | |||
| 49 | addr = round_up(ei_start, align); | ||
| 50 | if (addr < start) | ||
| 51 | addr = round_up(start, align); | ||
| 52 | if (addr >= ei_last) | ||
| 53 | goto out; | ||
| 54 | *sizep = ei_last - addr; | ||
| 55 | while (bad_addr_size(&addr, sizep, align) && addr + *sizep <= ei_last) | ||
| 56 | ; | ||
| 57 | last = addr + *sizep; | ||
| 58 | if (last > ei_last) | ||
| 59 | goto out; | ||
| 60 | |||
| 61 | return addr; | ||
| 62 | |||
| 63 | out: | ||
| 64 | return MEMBLOCK_ERROR; | ||
| 65 | } | ||
| 66 | |||
| 67 | /* | ||
| 68 | * Find next free range after start, and size is returned in *sizep | ||
| 69 | */ | ||
| 70 | u64 __init memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align) | ||
| 71 | { | ||
| 72 | struct memblock_region *r; | ||
| 73 | |||
| 74 | for_each_memblock(memory, r) { | ||
| 75 | u64 ei_start = r->base; | ||
| 76 | u64 ei_last = ei_start + r->size; | ||
| 77 | u64 addr; | ||
| 78 | |||
| 79 | addr = __memblock_x86_find_in_range_size(ei_start, ei_last, start, | ||
| 80 | sizep, align); | ||
| 81 | |||
| 82 | if (addr != MEMBLOCK_ERROR) | ||
| 83 | return addr; | ||
| 84 | } | ||
| 85 | |||
| 86 | return MEMBLOCK_ERROR; | ||
| 87 | } | ||
