diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-11 22:12:10 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-11 22:12:10 -0500 |
commit | d0b9706c20ebb4ba181dc26e52ac9a6861abf425 (patch) | |
tree | 436e89246fd5ebcf737cae27e135a1995155329b /arch/x86/mm | |
parent | 02d929502ce7b57f4835d8bb7c828d36e6d9e8ce (diff) | |
parent | 54eed6cb16ec315565aaaf8e34252ca253a68b7b (diff) |
Merge branch 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
* 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/numa: Add constraints check for nid parameters
mm, x86: Remove debug_pagealloc_enabled
x86/mm: Initialize high mem before free_all_bootmem()
arch/x86/kernel/e820.c: quiet sparse noise about plain integer as NULL pointer
arch/x86/kernel/e820.c: Eliminate bubble sort from sanitize_e820_map()
x86: Fix mmap random address range
x86, mm: Unify zone_sizes_init()
x86, mm: Prepare zone_sizes_init() for unification
x86, mm: Use max_low_pfn for ZONE_NORMAL on 64-bit
x86, mm: Wrap ZONE_DMA32 with CONFIG_ZONE_DMA32
x86, mm: Use max_pfn instead of highend_pfn
x86, mm: Move zone init from paging_init() on 64-bit
x86, mm: Use MAX_DMA_PFN for ZONE_DMA on 32-bit
Diffstat (limited to 'arch/x86/mm')
-rw-r--r-- | arch/x86/mm/init.c | 23 | ||||
-rw-r--r-- | arch/x86/mm/init_32.c | 29 | ||||
-rw-r--r-- | arch/x86/mm/init_64.c | 11 | ||||
-rw-r--r-- | arch/x86/mm/mmap.c | 4 | ||||
-rw-r--r-- | arch/x86/mm/numa.c | 10 | ||||
-rw-r--r-- | arch/x86/mm/pageattr.c | 6 |
6 files changed, 43 insertions, 40 deletions
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index a298914058f9..6cabf6570d64 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c | |||
@@ -3,6 +3,7 @@ | |||
3 | #include <linux/ioport.h> | 3 | #include <linux/ioport.h> |
4 | #include <linux/swap.h> | 4 | #include <linux/swap.h> |
5 | #include <linux/memblock.h> | 5 | #include <linux/memblock.h> |
6 | #include <linux/bootmem.h> /* for max_low_pfn */ | ||
6 | 7 | ||
7 | #include <asm/cacheflush.h> | 8 | #include <asm/cacheflush.h> |
8 | #include <asm/e820.h> | 9 | #include <asm/e820.h> |
@@ -15,6 +16,7 @@ | |||
15 | #include <asm/tlbflush.h> | 16 | #include <asm/tlbflush.h> |
16 | #include <asm/tlb.h> | 17 | #include <asm/tlb.h> |
17 | #include <asm/proto.h> | 18 | #include <asm/proto.h> |
19 | #include <asm/dma.h> /* for MAX_DMA_PFN */ | ||
18 | 20 | ||
19 | unsigned long __initdata pgt_buf_start; | 21 | unsigned long __initdata pgt_buf_start; |
20 | unsigned long __meminitdata pgt_buf_end; | 22 | unsigned long __meminitdata pgt_buf_end; |
@@ -392,3 +394,24 @@ void free_initrd_mem(unsigned long start, unsigned long end) | |||
392 | free_init_pages("initrd memory", start, PAGE_ALIGN(end)); | 394 | free_init_pages("initrd memory", start, PAGE_ALIGN(end)); |
393 | } | 395 | } |
394 | #endif | 396 | #endif |
397 | |||
398 | void __init zone_sizes_init(void) | ||
399 | { | ||
400 | unsigned long max_zone_pfns[MAX_NR_ZONES]; | ||
401 | |||
402 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); | ||
403 | |||
404 | #ifdef CONFIG_ZONE_DMA | ||
405 | max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN; | ||
406 | #endif | ||
407 | #ifdef CONFIG_ZONE_DMA32 | ||
408 | max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN; | ||
409 | #endif | ||
410 | max_zone_pfns[ZONE_NORMAL] = max_low_pfn; | ||
411 | #ifdef CONFIG_HIGHMEM | ||
412 | max_zone_pfns[ZONE_HIGHMEM] = max_pfn; | ||
413 | #endif | ||
414 | |||
415 | free_area_init_nodes(max_zone_pfns); | ||
416 | } | ||
417 | |||
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 0c1da394a634..8663f6c47ccb 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c | |||
@@ -668,22 +668,6 @@ void __init initmem_init(void) | |||
668 | } | 668 | } |
669 | #endif /* !CONFIG_NEED_MULTIPLE_NODES */ | 669 | #endif /* !CONFIG_NEED_MULTIPLE_NODES */ |
670 | 670 | ||
671 | static void __init zone_sizes_init(void) | ||
672 | { | ||
673 | unsigned long max_zone_pfns[MAX_NR_ZONES]; | ||
674 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); | ||
675 | #ifdef CONFIG_ZONE_DMA | ||
676 | max_zone_pfns[ZONE_DMA] = | ||
677 | virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; | ||
678 | #endif | ||
679 | max_zone_pfns[ZONE_NORMAL] = max_low_pfn; | ||
680 | #ifdef CONFIG_HIGHMEM | ||
681 | max_zone_pfns[ZONE_HIGHMEM] = highend_pfn; | ||
682 | #endif | ||
683 | |||
684 | free_area_init_nodes(max_zone_pfns); | ||
685 | } | ||
686 | |||
687 | void __init setup_bootmem_allocator(void) | 671 | void __init setup_bootmem_allocator(void) |
688 | { | 672 | { |
689 | printk(KERN_INFO " mapped low ram: 0 - %08lx\n", | 673 | printk(KERN_INFO " mapped low ram: 0 - %08lx\n", |
@@ -754,6 +738,17 @@ void __init mem_init(void) | |||
754 | #ifdef CONFIG_FLATMEM | 738 | #ifdef CONFIG_FLATMEM |
755 | BUG_ON(!mem_map); | 739 | BUG_ON(!mem_map); |
756 | #endif | 740 | #endif |
741 | /* | ||
742 | * With CONFIG_DEBUG_PAGEALLOC initialization of highmem pages has to | ||
743 | * be done before free_all_bootmem(). Memblock use free low memory for | ||
744 | * temporary data (see find_range_array()) and for this purpose can use | ||
745 | * pages that was already passed to the buddy allocator, hence marked as | ||
746 | * not accessible in the page tables when compiled with | ||
747 | * CONFIG_DEBUG_PAGEALLOC. Otherwise order of initialization is not | ||
748 | * important here. | ||
749 | */ | ||
750 | set_highmem_pages_init(); | ||
751 | |||
757 | /* this will put all low memory onto the freelists */ | 752 | /* this will put all low memory onto the freelists */ |
758 | totalram_pages += free_all_bootmem(); | 753 | totalram_pages += free_all_bootmem(); |
759 | 754 | ||
@@ -765,8 +760,6 @@ void __init mem_init(void) | |||
765 | if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp))) | 760 | if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp))) |
766 | reservedpages++; | 761 | reservedpages++; |
767 | 762 | ||
768 | set_highmem_pages_init(); | ||
769 | |||
770 | codesize = (unsigned long) &_etext - (unsigned long) &_text; | 763 | codesize = (unsigned long) &_etext - (unsigned long) &_text; |
771 | datasize = (unsigned long) &_edata - (unsigned long) &_etext; | 764 | datasize = (unsigned long) &_edata - (unsigned long) &_etext; |
772 | initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; | 765 | initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; |
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index a8a56ce3a962..436a0309db33 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -614,15 +614,6 @@ void __init initmem_init(void) | |||
614 | 614 | ||
615 | void __init paging_init(void) | 615 | void __init paging_init(void) |
616 | { | 616 | { |
617 | unsigned long max_zone_pfns[MAX_NR_ZONES]; | ||
618 | |||
619 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); | ||
620 | #ifdef CONFIG_ZONE_DMA | ||
621 | max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN; | ||
622 | #endif | ||
623 | max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN; | ||
624 | max_zone_pfns[ZONE_NORMAL] = max_pfn; | ||
625 | |||
626 | sparse_memory_present_with_active_regions(MAX_NUMNODES); | 617 | sparse_memory_present_with_active_regions(MAX_NUMNODES); |
627 | sparse_init(); | 618 | sparse_init(); |
628 | 619 | ||
@@ -634,7 +625,7 @@ void __init paging_init(void) | |||
634 | */ | 625 | */ |
635 | node_clear_state(0, N_NORMAL_MEMORY); | 626 | node_clear_state(0, N_NORMAL_MEMORY); |
636 | 627 | ||
637 | free_area_init_nodes(max_zone_pfns); | 628 | zone_sizes_init(); |
638 | } | 629 | } |
639 | 630 | ||
640 | /* | 631 | /* |
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c index 4b5ba85eb5c9..845df6835f9f 100644 --- a/arch/x86/mm/mmap.c +++ b/arch/x86/mm/mmap.c | |||
@@ -75,9 +75,9 @@ static unsigned long mmap_rnd(void) | |||
75 | */ | 75 | */ |
76 | if (current->flags & PF_RANDOMIZE) { | 76 | if (current->flags & PF_RANDOMIZE) { |
77 | if (mmap_is_ia32()) | 77 | if (mmap_is_ia32()) |
78 | rnd = (long)get_random_int() % (1<<8); | 78 | rnd = get_random_int() % (1<<8); |
79 | else | 79 | else |
80 | rnd = (long)(get_random_int() % (1<<28)); | 80 | rnd = get_random_int() % (1<<28); |
81 | } | 81 | } |
82 | return rnd << PAGE_SHIFT; | 82 | return rnd << PAGE_SHIFT; |
83 | } | 83 | } |
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index 496f494593bf..020cd2e80873 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c | |||
@@ -422,8 +422,9 @@ static int __init numa_alloc_distance(void) | |||
422 | * calls are ignored until the distance table is reset with | 422 | * calls are ignored until the distance table is reset with |
423 | * numa_reset_distance(). | 423 | * numa_reset_distance(). |
424 | * | 424 | * |
425 | * If @from or @to is higher than the highest known node at the time of | 425 | * If @from or @to is higher than the highest known node or lower than zero |
426 | * table creation or @distance doesn't make sense, the call is ignored. | 426 | * at the time of table creation or @distance doesn't make sense, the call |
427 | * is ignored. | ||
427 | * This is to allow simplification of specific NUMA config implementations. | 428 | * This is to allow simplification of specific NUMA config implementations. |
428 | */ | 429 | */ |
429 | void __init numa_set_distance(int from, int to, int distance) | 430 | void __init numa_set_distance(int from, int to, int distance) |
@@ -431,8 +432,9 @@ void __init numa_set_distance(int from, int to, int distance) | |||
431 | if (!numa_distance && numa_alloc_distance() < 0) | 432 | if (!numa_distance && numa_alloc_distance() < 0) |
432 | return; | 433 | return; |
433 | 434 | ||
434 | if (from >= numa_distance_cnt || to >= numa_distance_cnt) { | 435 | if (from >= numa_distance_cnt || to >= numa_distance_cnt || |
435 | printk_once(KERN_DEBUG "NUMA: Debug: distance out of bound, from=%d to=%d distance=%d\n", | 436 | from < 0 || to < 0) { |
437 | pr_warn_once("NUMA: Warning: node ids are out of bound, from=%d to=%d distance=%d\n", | ||
436 | from, to, distance); | 438 | from, to, distance); |
437 | return; | 439 | return; |
438 | } | 440 | } |
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index eda2acbb6e81..e1ebde315210 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -1334,12 +1334,6 @@ void kernel_map_pages(struct page *page, int numpages, int enable) | |||
1334 | } | 1334 | } |
1335 | 1335 | ||
1336 | /* | 1336 | /* |
1337 | * If page allocator is not up yet then do not call c_p_a(): | ||
1338 | */ | ||
1339 | if (!debug_pagealloc_enabled) | ||
1340 | return; | ||
1341 | |||
1342 | /* | ||
1343 | * The return value is ignored as the calls cannot fail. | 1337 | * The return value is ignored as the calls cannot fail. |
1344 | * Large pages for identity mappings are not used at boot time | 1338 | * Large pages for identity mappings are not used at boot time |
1345 | * and hence no memory allocations during large page split. | 1339 | * and hence no memory allocations during large page split. |