diff options
Diffstat (limited to 'arch/x86/mm')
-rw-r--r-- | arch/x86/mm/fault.c | 2 | ||||
-rw-r--r-- | arch/x86/mm/init_32.c | 1 | ||||
-rw-r--r-- | arch/x86/mm/init_64.c | 1 | ||||
-rw-r--r-- | arch/x86/mm/pageattr.c | 17 |
4 files changed, 18 insertions, 3 deletions
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 621afb6343dc..fdc667422df9 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -186,7 +186,7 @@ static int bad_address(void *p) | |||
186 | } | 186 | } |
187 | #endif | 187 | #endif |
188 | 188 | ||
189 | void dump_pagetable(unsigned long address) | 189 | static void dump_pagetable(unsigned long address) |
190 | { | 190 | { |
191 | #ifdef CONFIG_X86_32 | 191 | #ifdef CONFIG_X86_32 |
192 | __typeof__(pte_val(__pte(0))) page; | 192 | __typeof__(pte_val(__pte(0))) page; |
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 8106bba41ecb..ee1091a46964 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c | |||
@@ -47,6 +47,7 @@ | |||
47 | #include <asm/sections.h> | 47 | #include <asm/sections.h> |
48 | #include <asm/paravirt.h> | 48 | #include <asm/paravirt.h> |
49 | #include <asm/setup.h> | 49 | #include <asm/setup.h> |
50 | #include <asm/cacheflush.h> | ||
50 | 51 | ||
51 | unsigned int __VMALLOC_RESERVE = 128 << 20; | 52 | unsigned int __VMALLOC_RESERVE = 128 << 20; |
52 | 53 | ||
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index b59fc238151f..a4a9cccdd4f2 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -45,6 +45,7 @@ | |||
45 | #include <asm/sections.h> | 45 | #include <asm/sections.h> |
46 | #include <asm/kdebug.h> | 46 | #include <asm/kdebug.h> |
47 | #include <asm/numa.h> | 47 | #include <asm/numa.h> |
48 | #include <asm/cacheflush.h> | ||
48 | 49 | ||
49 | const struct dma_mapping_ops *dma_ops; | 50 | const struct dma_mapping_ops *dma_ops; |
50 | EXPORT_SYMBOL(dma_ops); | 51 | EXPORT_SYMBOL(dma_ops); |
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index bd61ed13f9cf..4119379f80ff 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -688,6 +688,15 @@ static int change_page_attr_set_clr(unsigned long addr, int numpages, | |||
688 | if (!pgprot_val(mask_set) && !pgprot_val(mask_clr)) | 688 | if (!pgprot_val(mask_set) && !pgprot_val(mask_clr)) |
689 | return 0; | 689 | return 0; |
690 | 690 | ||
691 | /* Ensure we are PAGE_SIZE aligned */ | ||
692 | if (addr & ~PAGE_MASK) { | ||
693 | addr &= PAGE_MASK; | ||
694 | /* | ||
695 | * People should not be passing in unaligned addresses: | ||
696 | */ | ||
697 | WARN_ON_ONCE(1); | ||
698 | } | ||
699 | |||
691 | cpa.vaddr = addr; | 700 | cpa.vaddr = addr; |
692 | cpa.numpages = numpages; | 701 | cpa.numpages = numpages; |
693 | cpa.mask_set = mask_set; | 702 | cpa.mask_set = mask_set; |
@@ -861,8 +870,12 @@ void kernel_map_pages(struct page *page, int numpages, int enable) | |||
861 | return; | 870 | return; |
862 | 871 | ||
863 | /* | 872 | /* |
864 | * The return value is ignored - the calls cannot fail, | 873 | * The return value is ignored as the calls cannot fail. |
865 | * large pages are disabled at boot time: | 874 | * Large pages are kept enabled at boot time, and are |
875 | * split up quickly with DEBUG_PAGEALLOC. If a splitup | ||
876 | * fails here (due to temporary memory shortage) no damage | ||
877 | * is done because we just keep the largepage intact up | ||
878 | * to the next attempt when it will likely be split up: | ||
866 | */ | 879 | */ |
867 | if (enable) | 880 | if (enable) |
868 | __set_pages_p(page, numpages); | 881 | __set_pages_p(page, numpages); |