diff options
author | Yinghai Lu <yinghai@kernel.org> | 2010-08-25 16:39:17 -0400 |
---|---|---|
committer | H. Peter Anvin <hpa@zytor.com> | 2010-08-27 14:12:29 -0400 |
commit | 72d7c3b33c980843e756681fb4867dc1efd62a76 (patch) | |
tree | 9607345d9fa055dd501aacf0772258fb72897035 /mm | |
parent | 301ff3e88ef9ff4bdb92f36a3e6170fce4c9dd34 (diff) |
x86: Use memblock to replace early_res
1. replace find_e820_area with memblock_find_in_range
2. replace reserve_early with memblock_x86_reserve_range
3. replace free_early with memblock_x86_free_range.
4. NO_BOOTMEM will switch to use memblock too.
5. use _e820, _early wrap in the patch, in following patch, will
replace them all
6. because memblock_x86_free_range support partial free, we can remove some special care
7. Need to make sure that memblock_find_in_range() is called after memblock_x86_fill()
so adjust some calling later in setup.c::setup_arch()
-- corruption_check and mptable_update
-v2: Move reserve_brk() early
Before fill_memblock_area, to avoid overlap between brk and memblock_find_in_range()
that could happen We have more then 128 RAM entry in E820 tables, and
memblock_x86_fill() could use memblock_find_in_range() to find a new place for
memblock.memory.region array.
and We don't need to use extend_brk() after fill_memblock_area()
So move reserve_brk() early before fill_memblock_area().
-v3: Move find_smp_config early
To make sure memblock_find_in_range not find wrong place, if BIOS doesn't put mptable
in right place.
-v4: Treat RESERVED_KERN as RAM in memblock.memory. and they are already in
memblock.reserved already..
use __NOT_KEEP_MEMBLOCK to make sure memblock related code could be freed later.
-v5: Generic version __memblock_find_in_range() is going from high to low, and for 32bit
active_region for 32bit does include high pages
need to replace the limit with memblock.default_alloc_limit, aka get_max_mapped()
-v6: Use current_limit instead
-v7: check with MEMBLOCK_ERROR instead of -1ULL or -1L
-v8: Set memblock_can_resize early to handle EFI with more RAM entries
-v9: update after kmemleak changes in mainline
Suggested-by: David S. Miller <davem@davemloft.net>
Suggested-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Suggested-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Yinghai Lu <yinghai@kernel.org>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/bootmem.c | 3 | ||||
-rw-r--r-- | mm/page_alloc.c | 50 | ||||
-rw-r--r-- | mm/sparse-vmemmap.c | 11 |
3 files changed, 18 insertions, 46 deletions
diff --git a/mm/bootmem.c b/mm/bootmem.c index bde170dd2fde..fda01a2c31af 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | #include <linux/kmemleak.h> | 16 | #include <linux/kmemleak.h> |
17 | #include <linux/range.h> | 17 | #include <linux/range.h> |
18 | #include <linux/memblock.h> | ||
18 | 19 | ||
19 | #include <asm/bug.h> | 20 | #include <asm/bug.h> |
20 | #include <asm/io.h> | 21 | #include <asm/io.h> |
@@ -434,6 +435,7 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, | |||
434 | unsigned long size) | 435 | unsigned long size) |
435 | { | 436 | { |
436 | #ifdef CONFIG_NO_BOOTMEM | 437 | #ifdef CONFIG_NO_BOOTMEM |
438 | kmemleak_free_part(__va(physaddr), size); | ||
437 | free_early(physaddr, physaddr + size); | 439 | free_early(physaddr, physaddr + size); |
438 | #else | 440 | #else |
439 | unsigned long start, end; | 441 | unsigned long start, end; |
@@ -459,6 +461,7 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, | |||
459 | void __init free_bootmem(unsigned long addr, unsigned long size) | 461 | void __init free_bootmem(unsigned long addr, unsigned long size) |
460 | { | 462 | { |
461 | #ifdef CONFIG_NO_BOOTMEM | 463 | #ifdef CONFIG_NO_BOOTMEM |
464 | kmemleak_free_part(__va(addr), size); | ||
462 | free_early(addr, addr + size); | 465 | free_early(addr, addr + size); |
463 | #else | 466 | #else |
464 | unsigned long start, end; | 467 | unsigned long start, end; |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 8c9b34674d83..f2cd7450fa76 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -3667,46 +3667,26 @@ int __init add_from_early_node_map(struct range *range, int az, | |||
3667 | void * __init __alloc_memory_core_early(int nid, u64 size, u64 align, | 3667 | void * __init __alloc_memory_core_early(int nid, u64 size, u64 align, |
3668 | u64 goal, u64 limit) | 3668 | u64 goal, u64 limit) |
3669 | { | 3669 | { |
3670 | int i; | ||
3671 | void *ptr; | 3670 | void *ptr; |
3671 | u64 addr; | ||
3672 | 3672 | ||
3673 | if (limit > get_max_mapped()) | 3673 | if (limit > memblock.current_limit) |
3674 | limit = get_max_mapped(); | 3674 | limit = memblock.current_limit; |
3675 | 3675 | ||
3676 | /* need to go over early_node_map to find out good range for node */ | 3676 | addr = find_memory_core_early(nid, size, align, goal, limit); |
3677 | for_each_active_range_index_in_nid(i, nid) { | ||
3678 | u64 addr; | ||
3679 | u64 ei_start, ei_last; | ||
3680 | 3677 | ||
3681 | ei_last = early_node_map[i].end_pfn; | 3678 | if (addr == MEMBLOCK_ERROR) |
3682 | ei_last <<= PAGE_SHIFT; | 3679 | return NULL; |
3683 | ei_start = early_node_map[i].start_pfn; | ||
3684 | ei_start <<= PAGE_SHIFT; | ||
3685 | addr = find_early_area(ei_start, ei_last, | ||
3686 | goal, limit, size, align); | ||
3687 | |||
3688 | if (addr == -1ULL) | ||
3689 | continue; | ||
3690 | |||
3691 | #if 0 | ||
3692 | printk(KERN_DEBUG "alloc (nid=%d %llx - %llx) (%llx - %llx) %llx %llx => %llx\n", | ||
3693 | nid, | ||
3694 | ei_start, ei_last, goal, limit, size, | ||
3695 | align, addr); | ||
3696 | #endif | ||
3697 | |||
3698 | ptr = phys_to_virt(addr); | ||
3699 | memset(ptr, 0, size); | ||
3700 | reserve_early_without_check(addr, addr + size, "BOOTMEM"); | ||
3701 | /* | ||
3702 | * The min_count is set to 0 so that bootmem allocated blocks | ||
3703 | * are never reported as leaks. | ||
3704 | */ | ||
3705 | kmemleak_alloc(ptr, size, 0, 0); | ||
3706 | return ptr; | ||
3707 | } | ||
3708 | 3680 | ||
3709 | return NULL; | 3681 | ptr = phys_to_virt(addr); |
3682 | memset(ptr, 0, size); | ||
3683 | memblock_x86_reserve_range(addr, addr + size, "BOOTMEM"); | ||
3684 | /* | ||
3685 | * The min_count is set to 0 so that bootmem allocated blocks | ||
3686 | * are never reported as leaks. | ||
3687 | */ | ||
3688 | kmemleak_alloc(ptr, size, 0, 0); | ||
3689 | return ptr; | ||
3710 | } | 3690 | } |
3711 | #endif | 3691 | #endif |
3712 | 3692 | ||
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c index aa33fd67fa41..29d6cbffb283 100644 --- a/mm/sparse-vmemmap.c +++ b/mm/sparse-vmemmap.c | |||
@@ -220,18 +220,7 @@ void __init sparse_mem_maps_populate_node(struct page **map_map, | |||
220 | 220 | ||
221 | if (vmemmap_buf_start) { | 221 | if (vmemmap_buf_start) { |
222 | /* need to free left buf */ | 222 | /* need to free left buf */ |
223 | #ifdef CONFIG_NO_BOOTMEM | ||
224 | free_early(__pa(vmemmap_buf_start), __pa(vmemmap_buf_end)); | ||
225 | if (vmemmap_buf_start < vmemmap_buf) { | ||
226 | char name[15]; | ||
227 | |||
228 | snprintf(name, sizeof(name), "MEMMAP %d", nodeid); | ||
229 | reserve_early_without_check(__pa(vmemmap_buf_start), | ||
230 | __pa(vmemmap_buf), name); | ||
231 | } | ||
232 | #else | ||
233 | free_bootmem(__pa(vmemmap_buf), vmemmap_buf_end - vmemmap_buf); | 223 | free_bootmem(__pa(vmemmap_buf), vmemmap_buf_end - vmemmap_buf); |
234 | #endif | ||
235 | vmemmap_buf = NULL; | 224 | vmemmap_buf = NULL; |
236 | vmemmap_buf_end = NULL; | 225 | vmemmap_buf_end = NULL; |
237 | } | 226 | } |