diff options
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/Kconfig | 4 | ||||
-rw-r--r-- | arch/x86/kernel/acpi/cstate.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/efi.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/pci-gart_64.c | 9 | ||||
-rw-r--r-- | arch/x86/kernel/test_rodata.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/traps_64.c | 4 | ||||
-rw-r--r-- | arch/x86/mm/fault.c | 2 | ||||
-rw-r--r-- | arch/x86/mm/init_32.c | 1 | ||||
-rw-r--r-- | arch/x86/mm/init_64.c | 1 | ||||
-rw-r--r-- | arch/x86/mm/pageattr.c | 17 |
10 files changed, 36 insertions, 10 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index aaed1a3b92d6..3be2305709b7 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -21,6 +21,8 @@ config X86 | |||
21 | select HAVE_IDE | 21 | select HAVE_IDE |
22 | select HAVE_OPROFILE | 22 | select HAVE_OPROFILE |
23 | select HAVE_KPROBES | 23 | select HAVE_KPROBES |
24 | select HAVE_KVM | ||
25 | |||
24 | 26 | ||
25 | config GENERIC_LOCKBREAK | 27 | config GENERIC_LOCKBREAK |
26 | def_bool n | 28 | def_bool n |
@@ -119,8 +121,6 @@ config ARCH_HAS_CPU_RELAX | |||
119 | config HAVE_SETUP_PER_CPU_AREA | 121 | config HAVE_SETUP_PER_CPU_AREA |
120 | def_bool X86_64 | 122 | def_bool X86_64 |
121 | 123 | ||
122 | select HAVE_KVM | ||
123 | |||
124 | config ARCH_HIBERNATION_POSSIBLE | 124 | config ARCH_HIBERNATION_POSSIBLE |
125 | def_bool y | 125 | def_bool y |
126 | depends on !SMP || !X86_VOYAGER | 126 | depends on !SMP || !X86_VOYAGER |
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c index 10b67170b133..8ca3557a6d59 100644 --- a/arch/x86/kernel/acpi/cstate.c +++ b/arch/x86/kernel/acpi/cstate.c | |||
@@ -126,6 +126,8 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu, | |||
126 | printk(KERN_DEBUG "Monitor-Mwait will be used to enter C-%d " | 126 | printk(KERN_DEBUG "Monitor-Mwait will be used to enter C-%d " |
127 | "state\n", cx->type); | 127 | "state\n", cx->type); |
128 | } | 128 | } |
129 | snprintf(cx->desc, ACPI_CX_DESC_LEN, "ACPI FFH INTEL MWAIT 0x%x", | ||
130 | cx->address); | ||
129 | 131 | ||
130 | out: | 132 | out: |
131 | set_cpus_allowed(current, saved_mask); | 133 | set_cpus_allowed(current, saved_mask); |
diff --git a/arch/x86/kernel/efi.c b/arch/x86/kernel/efi.c index cbdf9bacc575..0c0eeb163d90 100644 --- a/arch/x86/kernel/efi.c +++ b/arch/x86/kernel/efi.c | |||
@@ -391,7 +391,7 @@ static void __init runtime_code_page_mkexec(void) | |||
391 | if (md->type != EFI_RUNTIME_SERVICES_CODE) | 391 | if (md->type != EFI_RUNTIME_SERVICES_CODE) |
392 | continue; | 392 | continue; |
393 | 393 | ||
394 | set_memory_x(md->virt_addr, md->num_pages << EFI_PAGE_SHIFT); | 394 | set_memory_x(md->virt_addr, md->num_pages); |
395 | } | 395 | } |
396 | } | 396 | } |
397 | 397 | ||
@@ -434,7 +434,7 @@ void __init efi_enter_virtual_mode(void) | |||
434 | } | 434 | } |
435 | 435 | ||
436 | if (!(md->attribute & EFI_MEMORY_WB)) | 436 | if (!(md->attribute & EFI_MEMORY_WB)) |
437 | set_memory_uc(md->virt_addr, size); | 437 | set_memory_uc(md->virt_addr, md->num_pages); |
438 | 438 | ||
439 | systab = (u64) (unsigned long) efi_phys.systab; | 439 | systab = (u64) (unsigned long) efi_phys.systab; |
440 | if (md->phys_addr <= systab && systab < end) { | 440 | if (md->phys_addr <= systab && systab < end) { |
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index 65f6acb025c8..faf3229f8fb3 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c | |||
@@ -749,6 +749,15 @@ void __init gart_iommu_init(void) | |||
749 | */ | 749 | */ |
750 | set_memory_np((unsigned long)__va(iommu_bus_base), | 750 | set_memory_np((unsigned long)__va(iommu_bus_base), |
751 | iommu_size >> PAGE_SHIFT); | 751 | iommu_size >> PAGE_SHIFT); |
752 | /* | ||
753 | * Tricky. The GART table remaps the physical memory range, | ||
754 | * so the CPU wont notice potential aliases and if the memory | ||
755 | * is remapped to UC later on, we might surprise the PCI devices | ||
756 | * with a stray writeout of a cacheline. So play it sure and | ||
757 | * do an explicit, full-scale wbinvd() _after_ having marked all | ||
758 | * the pages as Not-Present: | ||
759 | */ | ||
760 | wbinvd(); | ||
752 | 761 | ||
753 | /* | 762 | /* |
754 | * Try to workaround a bug (thanks to BenH) | 763 | * Try to workaround a bug (thanks to BenH) |
diff --git a/arch/x86/kernel/test_rodata.c b/arch/x86/kernel/test_rodata.c index 4c163772000e..c29e235792af 100644 --- a/arch/x86/kernel/test_rodata.c +++ b/arch/x86/kernel/test_rodata.c | |||
@@ -10,8 +10,8 @@ | |||
10 | * of the License. | 10 | * of the License. |
11 | */ | 11 | */ |
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <asm/cacheflush.h> | ||
13 | #include <asm/sections.h> | 14 | #include <asm/sections.h> |
14 | extern int rodata_test_data; | ||
15 | 15 | ||
16 | int rodata_test(void) | 16 | int rodata_test(void) |
17 | { | 17 | { |
diff --git a/arch/x86/kernel/traps_64.c b/arch/x86/kernel/traps_64.c index efc66df728b6..045466681911 100644 --- a/arch/x86/kernel/traps_64.c +++ b/arch/x86/kernel/traps_64.c | |||
@@ -84,7 +84,7 @@ static inline void conditional_sti(struct pt_regs *regs) | |||
84 | 84 | ||
85 | static inline void preempt_conditional_sti(struct pt_regs *regs) | 85 | static inline void preempt_conditional_sti(struct pt_regs *regs) |
86 | { | 86 | { |
87 | preempt_disable(); | 87 | inc_preempt_count(); |
88 | if (regs->flags & X86_EFLAGS_IF) | 88 | if (regs->flags & X86_EFLAGS_IF) |
89 | local_irq_enable(); | 89 | local_irq_enable(); |
90 | } | 90 | } |
@@ -95,7 +95,7 @@ static inline void preempt_conditional_cli(struct pt_regs *regs) | |||
95 | local_irq_disable(); | 95 | local_irq_disable(); |
96 | /* Make sure to not schedule here because we could be running | 96 | /* Make sure to not schedule here because we could be running |
97 | on an exception stack. */ | 97 | on an exception stack. */ |
98 | preempt_enable_no_resched(); | 98 | dec_preempt_count(); |
99 | } | 99 | } |
100 | 100 | ||
101 | int kstack_depth_to_print = 12; | 101 | int kstack_depth_to_print = 12; |
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 621afb6343dc..fdc667422df9 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -186,7 +186,7 @@ static int bad_address(void *p) | |||
186 | } | 186 | } |
187 | #endif | 187 | #endif |
188 | 188 | ||
189 | void dump_pagetable(unsigned long address) | 189 | static void dump_pagetable(unsigned long address) |
190 | { | 190 | { |
191 | #ifdef CONFIG_X86_32 | 191 | #ifdef CONFIG_X86_32 |
192 | __typeof__(pte_val(__pte(0))) page; | 192 | __typeof__(pte_val(__pte(0))) page; |
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 8106bba41ecb..ee1091a46964 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c | |||
@@ -47,6 +47,7 @@ | |||
47 | #include <asm/sections.h> | 47 | #include <asm/sections.h> |
48 | #include <asm/paravirt.h> | 48 | #include <asm/paravirt.h> |
49 | #include <asm/setup.h> | 49 | #include <asm/setup.h> |
50 | #include <asm/cacheflush.h> | ||
50 | 51 | ||
51 | unsigned int __VMALLOC_RESERVE = 128 << 20; | 52 | unsigned int __VMALLOC_RESERVE = 128 << 20; |
52 | 53 | ||
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index b59fc238151f..a4a9cccdd4f2 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -45,6 +45,7 @@ | |||
45 | #include <asm/sections.h> | 45 | #include <asm/sections.h> |
46 | #include <asm/kdebug.h> | 46 | #include <asm/kdebug.h> |
47 | #include <asm/numa.h> | 47 | #include <asm/numa.h> |
48 | #include <asm/cacheflush.h> | ||
48 | 49 | ||
49 | const struct dma_mapping_ops *dma_ops; | 50 | const struct dma_mapping_ops *dma_ops; |
50 | EXPORT_SYMBOL(dma_ops); | 51 | EXPORT_SYMBOL(dma_ops); |
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index bd61ed13f9cf..4119379f80ff 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -688,6 +688,15 @@ static int change_page_attr_set_clr(unsigned long addr, int numpages, | |||
688 | if (!pgprot_val(mask_set) && !pgprot_val(mask_clr)) | 688 | if (!pgprot_val(mask_set) && !pgprot_val(mask_clr)) |
689 | return 0; | 689 | return 0; |
690 | 690 | ||
691 | /* Ensure we are PAGE_SIZE aligned */ | ||
692 | if (addr & ~PAGE_MASK) { | ||
693 | addr &= PAGE_MASK; | ||
694 | /* | ||
695 | * People should not be passing in unaligned addresses: | ||
696 | */ | ||
697 | WARN_ON_ONCE(1); | ||
698 | } | ||
699 | |||
691 | cpa.vaddr = addr; | 700 | cpa.vaddr = addr; |
692 | cpa.numpages = numpages; | 701 | cpa.numpages = numpages; |
693 | cpa.mask_set = mask_set; | 702 | cpa.mask_set = mask_set; |
@@ -861,8 +870,12 @@ void kernel_map_pages(struct page *page, int numpages, int enable) | |||
861 | return; | 870 | return; |
862 | 871 | ||
863 | /* | 872 | /* |
864 | * The return value is ignored - the calls cannot fail, | 873 | * The return value is ignored as the calls cannot fail. |
865 | * large pages are disabled at boot time: | 874 | * Large pages are kept enabled at boot time, and are |
875 | * split up quickly with DEBUG_PAGEALLOC. If a splitup | ||
876 | * fails here (due to temporary memory shortage) no damage | ||
877 | * is done because we just keep the largepage intact up | ||
878 | * to the next attempt when it will likely be split up: | ||
866 | */ | 879 | */ |
867 | if (enable) | 880 | if (enable) |
868 | __set_pages_p(page, numpages); | 881 | __set_pages_p(page, numpages); |