aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/Kconfig4
-rw-r--r--arch/x86/kernel/efi.c4
-rw-r--r--arch/x86/kernel/pci-gart_64.c9
-rw-r--r--arch/x86/kernel/test_rodata.c2
-rw-r--r--arch/x86/kernel/traps_64.c4
-rw-r--r--arch/x86/mm/fault.c2
-rw-r--r--arch/x86/mm/init_32.c1
-rw-r--r--arch/x86/mm/init_64.c1
-rw-r--r--arch/x86/mm/pageattr.c17
-rw-r--r--include/asm-x86/cacheflush.h7
-rw-r--r--include/asm-x86/kdebug.h1
-rw-r--r--mm/memory.c7
12 files changed, 46 insertions, 13 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index aaed1a3b92d6..3be2305709b7 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -21,6 +21,8 @@ config X86
21 select HAVE_IDE 21 select HAVE_IDE
22 select HAVE_OPROFILE 22 select HAVE_OPROFILE
23 select HAVE_KPROBES 23 select HAVE_KPROBES
24 select HAVE_KVM
25
24 26
25config GENERIC_LOCKBREAK 27config GENERIC_LOCKBREAK
26 def_bool n 28 def_bool n
@@ -119,8 +121,6 @@ config ARCH_HAS_CPU_RELAX
119config HAVE_SETUP_PER_CPU_AREA 121config HAVE_SETUP_PER_CPU_AREA
120 def_bool X86_64 122 def_bool X86_64
121 123
122select HAVE_KVM
123
124config ARCH_HIBERNATION_POSSIBLE 124config ARCH_HIBERNATION_POSSIBLE
125 def_bool y 125 def_bool y
126 depends on !SMP || !X86_VOYAGER 126 depends on !SMP || !X86_VOYAGER
diff --git a/arch/x86/kernel/efi.c b/arch/x86/kernel/efi.c
index cbdf9bacc575..0c0eeb163d90 100644
--- a/arch/x86/kernel/efi.c
+++ b/arch/x86/kernel/efi.c
@@ -391,7 +391,7 @@ static void __init runtime_code_page_mkexec(void)
391 if (md->type != EFI_RUNTIME_SERVICES_CODE) 391 if (md->type != EFI_RUNTIME_SERVICES_CODE)
392 continue; 392 continue;
393 393
394 set_memory_x(md->virt_addr, md->num_pages << EFI_PAGE_SHIFT); 394 set_memory_x(md->virt_addr, md->num_pages);
395 } 395 }
396} 396}
397 397
@@ -434,7 +434,7 @@ void __init efi_enter_virtual_mode(void)
434 } 434 }
435 435
436 if (!(md->attribute & EFI_MEMORY_WB)) 436 if (!(md->attribute & EFI_MEMORY_WB))
437 set_memory_uc(md->virt_addr, size); 437 set_memory_uc(md->virt_addr, md->num_pages);
438 438
439 systab = (u64) (unsigned long) efi_phys.systab; 439 systab = (u64) (unsigned long) efi_phys.systab;
440 if (md->phys_addr <= systab && systab < end) { 440 if (md->phys_addr <= systab && systab < end) {
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index 65f6acb025c8..faf3229f8fb3 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -749,6 +749,15 @@ void __init gart_iommu_init(void)
749 */ 749 */
750 set_memory_np((unsigned long)__va(iommu_bus_base), 750 set_memory_np((unsigned long)__va(iommu_bus_base),
751 iommu_size >> PAGE_SHIFT); 751 iommu_size >> PAGE_SHIFT);
752 /*
753 * Tricky. The GART table remaps the physical memory range,
754 * so the CPU wont notice potential aliases and if the memory
755 * is remapped to UC later on, we might surprise the PCI devices
756 * with a stray writeout of a cacheline. So play it sure and
757 * do an explicit, full-scale wbinvd() _after_ having marked all
758 * the pages as Not-Present:
759 */
760 wbinvd();
752 761
753 /* 762 /*
754 * Try to workaround a bug (thanks to BenH) 763 * Try to workaround a bug (thanks to BenH)
diff --git a/arch/x86/kernel/test_rodata.c b/arch/x86/kernel/test_rodata.c
index 4c163772000e..c29e235792af 100644
--- a/arch/x86/kernel/test_rodata.c
+++ b/arch/x86/kernel/test_rodata.c
@@ -10,8 +10,8 @@
10 * of the License. 10 * of the License.
11 */ 11 */
12#include <linux/module.h> 12#include <linux/module.h>
13#include <asm/cacheflush.h>
13#include <asm/sections.h> 14#include <asm/sections.h>
14extern int rodata_test_data;
15 15
16int rodata_test(void) 16int rodata_test(void)
17{ 17{
diff --git a/arch/x86/kernel/traps_64.c b/arch/x86/kernel/traps_64.c
index efc66df728b6..045466681911 100644
--- a/arch/x86/kernel/traps_64.c
+++ b/arch/x86/kernel/traps_64.c
@@ -84,7 +84,7 @@ static inline void conditional_sti(struct pt_regs *regs)
84 84
85static inline void preempt_conditional_sti(struct pt_regs *regs) 85static inline void preempt_conditional_sti(struct pt_regs *regs)
86{ 86{
87 preempt_disable(); 87 inc_preempt_count();
88 if (regs->flags & X86_EFLAGS_IF) 88 if (regs->flags & X86_EFLAGS_IF)
89 local_irq_enable(); 89 local_irq_enable();
90} 90}
@@ -95,7 +95,7 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
95 local_irq_disable(); 95 local_irq_disable();
96 /* Make sure to not schedule here because we could be running 96 /* Make sure to not schedule here because we could be running
97 on an exception stack. */ 97 on an exception stack. */
98 preempt_enable_no_resched(); 98 dec_preempt_count();
99} 99}
100 100
101int kstack_depth_to_print = 12; 101int kstack_depth_to_print = 12;
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 621afb6343dc..fdc667422df9 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -186,7 +186,7 @@ static int bad_address(void *p)
186} 186}
187#endif 187#endif
188 188
189void dump_pagetable(unsigned long address) 189static void dump_pagetable(unsigned long address)
190{ 190{
191#ifdef CONFIG_X86_32 191#ifdef CONFIG_X86_32
192 __typeof__(pte_val(__pte(0))) page; 192 __typeof__(pte_val(__pte(0))) page;
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 8106bba41ecb..ee1091a46964 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -47,6 +47,7 @@
47#include <asm/sections.h> 47#include <asm/sections.h>
48#include <asm/paravirt.h> 48#include <asm/paravirt.h>
49#include <asm/setup.h> 49#include <asm/setup.h>
50#include <asm/cacheflush.h>
50 51
51unsigned int __VMALLOC_RESERVE = 128 << 20; 52unsigned int __VMALLOC_RESERVE = 128 << 20;
52 53
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index b59fc238151f..a4a9cccdd4f2 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -45,6 +45,7 @@
45#include <asm/sections.h> 45#include <asm/sections.h>
46#include <asm/kdebug.h> 46#include <asm/kdebug.h>
47#include <asm/numa.h> 47#include <asm/numa.h>
48#include <asm/cacheflush.h>
48 49
49const struct dma_mapping_ops *dma_ops; 50const struct dma_mapping_ops *dma_ops;
50EXPORT_SYMBOL(dma_ops); 51EXPORT_SYMBOL(dma_ops);
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index bd61ed13f9cf..4119379f80ff 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -688,6 +688,15 @@ static int change_page_attr_set_clr(unsigned long addr, int numpages,
688 if (!pgprot_val(mask_set) && !pgprot_val(mask_clr)) 688 if (!pgprot_val(mask_set) && !pgprot_val(mask_clr))
689 return 0; 689 return 0;
690 690
691 /* Ensure we are PAGE_SIZE aligned */
692 if (addr & ~PAGE_MASK) {
693 addr &= PAGE_MASK;
694 /*
695 * People should not be passing in unaligned addresses:
696 */
697 WARN_ON_ONCE(1);
698 }
699
691 cpa.vaddr = addr; 700 cpa.vaddr = addr;
692 cpa.numpages = numpages; 701 cpa.numpages = numpages;
693 cpa.mask_set = mask_set; 702 cpa.mask_set = mask_set;
@@ -861,8 +870,12 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
861 return; 870 return;
862 871
863 /* 872 /*
864 * The return value is ignored - the calls cannot fail, 873 * The return value is ignored as the calls cannot fail.
865 * large pages are disabled at boot time: 874 * Large pages are kept enabled at boot time, and are
875 * split up quickly with DEBUG_PAGEALLOC. If a splitup
876 * fails here (due to temporary memory shortage) no damage
877 * is done because we just keep the largepage intact up
878 * to the next attempt when it will likely be split up:
866 */ 879 */
867 if (enable) 880 if (enable)
868 __set_pages_p(page, numpages); 881 __set_pages_p(page, numpages);
diff --git a/include/asm-x86/cacheflush.h b/include/asm-x86/cacheflush.h
index 6a22212b4b20..5396c212d8c0 100644
--- a/include/asm-x86/cacheflush.h
+++ b/include/asm-x86/cacheflush.h
@@ -48,12 +48,15 @@ void cpa_init(void);
48 48
49#ifdef CONFIG_DEBUG_RODATA 49#ifdef CONFIG_DEBUG_RODATA
50void mark_rodata_ro(void); 50void mark_rodata_ro(void);
51extern const int rodata_test_data;
51#endif 52#endif
53
52#ifdef CONFIG_DEBUG_RODATA_TEST 54#ifdef CONFIG_DEBUG_RODATA_TEST
53void rodata_test(void); 55int rodata_test(void);
54#else 56#else
55static inline void rodata_test(void) 57static inline int rodata_test(void)
56{ 58{
59 return 0;
57} 60}
58#endif 61#endif
59 62
diff --git a/include/asm-x86/kdebug.h b/include/asm-x86/kdebug.h
index dd442a1632c0..99dcbafa1511 100644
--- a/include/asm-x86/kdebug.h
+++ b/include/asm-x86/kdebug.h
@@ -31,7 +31,6 @@ extern void show_trace(struct task_struct *t, struct pt_regs *regs,
31 unsigned long *sp, unsigned long bp); 31 unsigned long *sp, unsigned long bp);
32extern void __show_regs(struct pt_regs *regs); 32extern void __show_regs(struct pt_regs *regs);
33extern void show_regs(struct pt_regs *regs); 33extern void show_regs(struct pt_regs *regs);
34extern void dump_pagetable(unsigned long);
35extern unsigned long oops_begin(void); 34extern unsigned long oops_begin(void);
36extern void oops_end(unsigned long, struct pt_regs *, int signr); 35extern void oops_end(unsigned long, struct pt_regs *, int signr);
37 36
diff --git a/mm/memory.c b/mm/memory.c
index e7a6dcacefc1..ce3c9e4492d8 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2711,6 +2711,13 @@ void print_vma_addr(char *prefix, unsigned long ip)
2711 struct mm_struct *mm = current->mm; 2711 struct mm_struct *mm = current->mm;
2712 struct vm_area_struct *vma; 2712 struct vm_area_struct *vma;
2713 2713
2714 /*
2715 * Do not print if we are in atomic
2716 * contexts (in exception stacks, etc.):
2717 */
2718 if (preempt_count())
2719 return;
2720
2714 down_read(&mm->mmap_sem); 2721 down_read(&mm->mmap_sem);
2715 vma = find_vma(mm, ip); 2722 vma = find_vma(mm, ip);
2716 if (vma && vma->vm_file) { 2723 if (vma && vma->vm_file) {