diff options
Diffstat (limited to 'arch/x86')
66 files changed, 398 insertions, 210 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 350bee1d54dc..93224b569187 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -28,7 +28,7 @@ config X86 | |||
28 | select HAVE_KRETPROBES | 28 | select HAVE_KRETPROBES |
29 | select HAVE_FTRACE_MCOUNT_RECORD | 29 | select HAVE_FTRACE_MCOUNT_RECORD |
30 | select HAVE_DYNAMIC_FTRACE | 30 | select HAVE_DYNAMIC_FTRACE |
31 | select HAVE_FTRACE | 31 | select HAVE_FUNCTION_TRACER |
32 | select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64) | 32 | select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64) |
33 | select HAVE_ARCH_KGDB if !X86_VOYAGER | 33 | select HAVE_ARCH_KGDB if !X86_VOYAGER |
34 | select HAVE_ARCH_TRACEHOOK | 34 | select HAVE_ARCH_TRACEHOOK |
@@ -231,6 +231,10 @@ config SMP | |||
231 | 231 | ||
232 | If you don't know what to do here, say N. | 232 | If you don't know what to do here, say N. |
233 | 233 | ||
234 | config X86_HAS_BOOT_CPU_ID | ||
235 | def_bool y | ||
236 | depends on X86_VOYAGER | ||
237 | |||
234 | config X86_FIND_SMP_CONFIG | 238 | config X86_FIND_SMP_CONFIG |
235 | def_bool y | 239 | def_bool y |
236 | depends on X86_MPPARSE || X86_VOYAGER | 240 | depends on X86_MPPARSE || X86_VOYAGER |
@@ -953,7 +957,7 @@ config ARCH_PHYS_ADDR_T_64BIT | |||
953 | config NUMA | 957 | config NUMA |
954 | bool "Numa Memory Allocation and Scheduler Support (EXPERIMENTAL)" | 958 | bool "Numa Memory Allocation and Scheduler Support (EXPERIMENTAL)" |
955 | depends on SMP | 959 | depends on SMP |
956 | depends on X86_64 || (X86_32 && HIGHMEM64G && (X86_NUMAQ || X86_BIGSMP || X86_SUMMIT && ACPI) && EXPERIMENTAL) | 960 | depends on X86_64 || (X86_32 && HIGHMEM64G && (X86_NUMAQ || X86_BIGSMP || X86_SUMMIT && ACPI) && BROKEN) |
957 | default n if X86_PC | 961 | default n if X86_PC |
958 | default y if (X86_NUMAQ || X86_SUMMIT || X86_BIGSMP) | 962 | default y if (X86_NUMAQ || X86_SUMMIT || X86_BIGSMP) |
959 | help | 963 | help |
@@ -1490,7 +1494,7 @@ config HAVE_ARCH_EARLY_PFN_TO_NID | |||
1490 | def_bool X86_64 | 1494 | def_bool X86_64 |
1491 | depends on NUMA | 1495 | depends on NUMA |
1492 | 1496 | ||
1493 | menu "Power management options" | 1497 | menu "Power management and ACPI options" |
1494 | depends on !X86_VOYAGER | 1498 | depends on !X86_VOYAGER |
1495 | 1499 | ||
1496 | config ARCH_HIBERNATION_HEADER | 1500 | config ARCH_HIBERNATION_HEADER |
@@ -1890,6 +1894,10 @@ config SYSVIPC_COMPAT | |||
1890 | endmenu | 1894 | endmenu |
1891 | 1895 | ||
1892 | 1896 | ||
1897 | config HAVE_ATOMIC_IOMAP | ||
1898 | def_bool y | ||
1899 | depends on X86_32 | ||
1900 | |||
1893 | source "net/Kconfig" | 1901 | source "net/Kconfig" |
1894 | 1902 | ||
1895 | source "drivers/Kconfig" | 1903 | source "drivers/Kconfig" |
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu index 0b7c4a3f0651..b815664fe370 100644 --- a/arch/x86/Kconfig.cpu +++ b/arch/x86/Kconfig.cpu | |||
@@ -513,19 +513,19 @@ config CPU_SUP_UMC_32 | |||
513 | If unsure, say N. | 513 | If unsure, say N. |
514 | 514 | ||
515 | config X86_DS | 515 | config X86_DS |
516 | bool "Debug Store support" | 516 | def_bool X86_PTRACE_BTS |
517 | default y | 517 | depends on X86_DEBUGCTLMSR |
518 | help | ||
519 | Add support for Debug Store. | ||
520 | This allows the kernel to provide a memory buffer to the hardware | ||
521 | to store various profiling and tracing events. | ||
522 | 518 | ||
523 | config X86_PTRACE_BTS | 519 | config X86_PTRACE_BTS |
524 | bool "ptrace interface to Branch Trace Store" | 520 | bool "Branch Trace Store" |
525 | default y | 521 | default y |
526 | depends on (X86_DS && X86_DEBUGCTLMSR) | 522 | depends on X86_DEBUGCTLMSR |
527 | help | 523 | help |
528 | Add a ptrace interface to allow collecting an execution trace | 524 | This adds a ptrace interface to the hardware's branch trace store. |
529 | of the traced task. | 525 | |
530 | This collects control flow changes in a (cyclic) buffer and allows | 526 | Debuggers may use it to collect an execution trace of the debugged |
531 | debuggers to fill in the gaps and show an execution trace of the debuggee. | 527 | application in order to answer the question 'how did I get here?'. |
528 | Debuggers may trace user mode as well as kernel mode. | ||
529 | |||
530 | Say Y unless there is no application development on this machine | ||
531 | and you want to save a small amount of code size. | ||
diff --git a/arch/x86/boot/compressed/.gitignore b/arch/x86/boot/compressed/.gitignore index be0ed065249b..63eff3b04d01 100644 --- a/arch/x86/boot/compressed/.gitignore +++ b/arch/x86/boot/compressed/.gitignore | |||
@@ -1 +1,3 @@ | |||
1 | relocs | 1 | relocs |
2 | vmlinux.bin.all | ||
3 | vmlinux.relocs | ||
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index f73e95d75b45..cfdf8c2c5c31 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h | |||
@@ -91,7 +91,7 @@ | |||
91 | #define X86_FEATURE_11AP (3*32+19) /* "" Bad local APIC aka 11AP */ | 91 | #define X86_FEATURE_11AP (3*32+19) /* "" Bad local APIC aka 11AP */ |
92 | #define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */ | 92 | #define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */ |
93 | #define X86_FEATURE_AMDC1E (3*32+21) /* AMD C1E detected */ | 93 | #define X86_FEATURE_AMDC1E (3*32+21) /* AMD C1E detected */ |
94 | #define X86_FEATURE_XTOPOLOGY (3*32+21) /* cpu topology enum extensions */ | 94 | #define X86_FEATURE_XTOPOLOGY (3*32+22) /* cpu topology enum extensions */ |
95 | 95 | ||
96 | /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ | 96 | /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ |
97 | #define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */ | 97 | #define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */ |
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index 4a5397bfce27..7f225a4b2a26 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h | |||
@@ -255,9 +255,11 @@ static inline unsigned long dma_alloc_coherent_mask(struct device *dev, | |||
255 | 255 | ||
256 | static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp) | 256 | static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp) |
257 | { | 257 | { |
258 | #ifdef CONFIG_X86_64 | ||
259 | unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp); | 258 | unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp); |
260 | 259 | ||
260 | if (dma_mask <= DMA_24BIT_MASK) | ||
261 | gfp |= GFP_DMA; | ||
262 | #ifdef CONFIG_X86_64 | ||
261 | if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA)) | 263 | if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA)) |
262 | gfp |= GFP_DMA32; | 264 | gfp |= GFP_DMA32; |
263 | #endif | 265 | #endif |
diff --git a/arch/x86/include/asm/es7000/wakecpu.h b/arch/x86/include/asm/es7000/wakecpu.h index 3ffc5a7bf667..398493461913 100644 --- a/arch/x86/include/asm/es7000/wakecpu.h +++ b/arch/x86/include/asm/es7000/wakecpu.h | |||
@@ -50,10 +50,9 @@ static inline void restore_NMI_vector(unsigned short *high, unsigned short *low) | |||
50 | { | 50 | { |
51 | } | 51 | } |
52 | 52 | ||
53 | #if APIC_DEBUG | 53 | #define inquire_remote_apic(apicid) do { \ |
54 | #define inquire_remote_apic(apicid) __inquire_remote_apic(apicid) | 54 | if (apic_verbosity >= APIC_DEBUG) \ |
55 | #else | 55 | __inquire_remote_apic(apicid); \ |
56 | #define inquire_remote_apic(apicid) {} | 56 | } while (0) |
57 | #endif | ||
58 | 57 | ||
59 | #endif /* __ASM_MACH_WAKECPU_H */ | 58 | #endif /* __ASM_MACH_WAKECPU_H */ |
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h index 8668a94f850e..23696d44a0af 100644 --- a/arch/x86/include/asm/fixmap.h +++ b/arch/x86/include/asm/fixmap.h | |||
@@ -9,6 +9,10 @@ | |||
9 | 9 | ||
10 | extern int fixmaps_set; | 10 | extern int fixmaps_set; |
11 | 11 | ||
12 | extern pte_t *kmap_pte; | ||
13 | extern pgprot_t kmap_prot; | ||
14 | extern pte_t *pkmap_page_table; | ||
15 | |||
12 | void __native_set_fixmap(enum fixed_addresses idx, pte_t pte); | 16 | void __native_set_fixmap(enum fixed_addresses idx, pte_t pte); |
13 | void native_set_fixmap(enum fixed_addresses idx, | 17 | void native_set_fixmap(enum fixed_addresses idx, |
14 | unsigned long phys, pgprot_t flags); | 18 | unsigned long phys, pgprot_t flags); |
diff --git a/arch/x86/include/asm/fixmap_32.h b/arch/x86/include/asm/fixmap_32.h index 09f29ab5c139..c7115c1d7217 100644 --- a/arch/x86/include/asm/fixmap_32.h +++ b/arch/x86/include/asm/fixmap_32.h | |||
@@ -28,10 +28,8 @@ extern unsigned long __FIXADDR_TOP; | |||
28 | #include <asm/acpi.h> | 28 | #include <asm/acpi.h> |
29 | #include <asm/apicdef.h> | 29 | #include <asm/apicdef.h> |
30 | #include <asm/page.h> | 30 | #include <asm/page.h> |
31 | #ifdef CONFIG_HIGHMEM | ||
32 | #include <linux/threads.h> | 31 | #include <linux/threads.h> |
33 | #include <asm/kmap_types.h> | 32 | #include <asm/kmap_types.h> |
34 | #endif | ||
35 | 33 | ||
36 | /* | 34 | /* |
37 | * Here we define all the compile-time 'special' virtual | 35 | * Here we define all the compile-time 'special' virtual |
@@ -75,10 +73,8 @@ enum fixed_addresses { | |||
75 | #ifdef CONFIG_X86_CYCLONE_TIMER | 73 | #ifdef CONFIG_X86_CYCLONE_TIMER |
76 | FIX_CYCLONE_TIMER, /*cyclone timer register*/ | 74 | FIX_CYCLONE_TIMER, /*cyclone timer register*/ |
77 | #endif | 75 | #endif |
78 | #ifdef CONFIG_HIGHMEM | ||
79 | FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ | 76 | FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ |
80 | FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, | 77 | FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, |
81 | #endif | ||
82 | #ifdef CONFIG_PCI_MMCONFIG | 78 | #ifdef CONFIG_PCI_MMCONFIG |
83 | FIX_PCIE_MCFG, | 79 | FIX_PCIE_MCFG, |
84 | #endif | 80 | #endif |
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h index 47f7e65e6c1d..9e8bc29b8b17 100644 --- a/arch/x86/include/asm/ftrace.h +++ b/arch/x86/include/asm/ftrace.h | |||
@@ -1,7 +1,7 @@ | |||
1 | #ifndef _ASM_X86_FTRACE_H | 1 | #ifndef _ASM_X86_FTRACE_H |
2 | #define _ASM_X86_FTRACE_H | 2 | #define _ASM_X86_FTRACE_H |
3 | 3 | ||
4 | #ifdef CONFIG_FTRACE | 4 | #ifdef CONFIG_FUNCTION_TRACER |
5 | #define MCOUNT_ADDR ((long)(mcount)) | 5 | #define MCOUNT_ADDR ((long)(mcount)) |
6 | #define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */ | 6 | #define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */ |
7 | 7 | ||
@@ -19,6 +19,6 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr) | |||
19 | } | 19 | } |
20 | #endif | 20 | #endif |
21 | 21 | ||
22 | #endif /* CONFIG_FTRACE */ | 22 | #endif /* CONFIG_FUNCTION_TRACER */ |
23 | 23 | ||
24 | #endif /* _ASM_X86_FTRACE_H */ | 24 | #endif /* _ASM_X86_FTRACE_H */ |
diff --git a/arch/x86/include/asm/highmem.h b/arch/x86/include/asm/highmem.h index a3b3b7c3027b..bf9276bea660 100644 --- a/arch/x86/include/asm/highmem.h +++ b/arch/x86/include/asm/highmem.h | |||
@@ -25,14 +25,11 @@ | |||
25 | #include <asm/kmap_types.h> | 25 | #include <asm/kmap_types.h> |
26 | #include <asm/tlbflush.h> | 26 | #include <asm/tlbflush.h> |
27 | #include <asm/paravirt.h> | 27 | #include <asm/paravirt.h> |
28 | #include <asm/fixmap.h> | ||
28 | 29 | ||
29 | /* declarations for highmem.c */ | 30 | /* declarations for highmem.c */ |
30 | extern unsigned long highstart_pfn, highend_pfn; | 31 | extern unsigned long highstart_pfn, highend_pfn; |
31 | 32 | ||
32 | extern pte_t *kmap_pte; | ||
33 | extern pgprot_t kmap_prot; | ||
34 | extern pte_t *pkmap_page_table; | ||
35 | |||
36 | /* | 33 | /* |
37 | * Right now we initialize only a single pte table. It can be extended | 34 | * Right now we initialize only a single pte table. It can be extended |
38 | * easily, subsequent pte tables have to be allocated in one physical | 35 | * easily, subsequent pte tables have to be allocated in one physical |
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index 5618a103f395..ac2abc88cd95 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h | |||
@@ -82,9 +82,9 @@ extern void __iomem *ioremap_wc(unsigned long offset, unsigned long size); | |||
82 | extern void early_ioremap_init(void); | 82 | extern void early_ioremap_init(void); |
83 | extern void early_ioremap_clear(void); | 83 | extern void early_ioremap_clear(void); |
84 | extern void early_ioremap_reset(void); | 84 | extern void early_ioremap_reset(void); |
85 | extern void *early_ioremap(unsigned long offset, unsigned long size); | 85 | extern void __iomem *early_ioremap(unsigned long offset, unsigned long size); |
86 | extern void *early_memremap(unsigned long offset, unsigned long size); | 86 | extern void __iomem *early_memremap(unsigned long offset, unsigned long size); |
87 | extern void early_iounmap(void *addr, unsigned long size); | 87 | extern void early_iounmap(void __iomem *addr, unsigned long size); |
88 | extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys); | 88 | extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys); |
89 | 89 | ||
90 | 90 | ||
diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h index 98e28ea8cd16..e4a552d44465 100644 --- a/arch/x86/include/asm/iommu.h +++ b/arch/x86/include/asm/iommu.h | |||
@@ -7,7 +7,6 @@ extern struct dma_mapping_ops nommu_dma_ops; | |||
7 | extern int force_iommu, no_iommu; | 7 | extern int force_iommu, no_iommu; |
8 | extern int iommu_detected; | 8 | extern int iommu_detected; |
9 | extern int dmar_disabled; | 9 | extern int dmar_disabled; |
10 | extern int forbid_dac; | ||
11 | 10 | ||
12 | extern unsigned long iommu_nr_pages(unsigned long addr, unsigned long len); | 11 | extern unsigned long iommu_nr_pages(unsigned long addr, unsigned long len); |
13 | 12 | ||
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h index d843ed0e9b2e..0005adb0f941 100644 --- a/arch/x86/include/asm/irq_vectors.h +++ b/arch/x86/include/asm/irq_vectors.h | |||
@@ -101,30 +101,22 @@ | |||
101 | #define LAST_VM86_IRQ 15 | 101 | #define LAST_VM86_IRQ 15 |
102 | #define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15) | 102 | #define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15) |
103 | 103 | ||
104 | #ifdef CONFIG_X86_64 | 104 | #if defined(CONFIG_X86_IO_APIC) && !defined(CONFIG_X86_VOYAGER) |
105 | # if NR_CPUS < MAX_IO_APICS | 105 | # if NR_CPUS < MAX_IO_APICS |
106 | # define NR_IRQS (NR_VECTORS + (32 * NR_CPUS)) | 106 | # define NR_IRQS (NR_VECTORS + (32 * NR_CPUS)) |
107 | # else | 107 | # else |
108 | # define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS)) | 108 | # define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS)) |
109 | # endif | 109 | # endif |
110 | 110 | ||
111 | #elif !defined(CONFIG_X86_VOYAGER) | 111 | #elif defined(CONFIG_X86_VOYAGER) |
112 | 112 | ||
113 | # if defined(CONFIG_X86_IO_APIC) || defined(CONFIG_PARAVIRT) || defined(CONFIG_X86_VISWS) | 113 | # define NR_IRQS 224 |
114 | |||
115 | # define NR_IRQS 224 | ||
116 | |||
117 | # else /* IO_APIC || PARAVIRT */ | ||
118 | |||
119 | # define NR_IRQS 16 | ||
120 | |||
121 | # endif | ||
122 | 114 | ||
123 | #else /* !VISWS && !VOYAGER */ | 115 | #else /* IO_APIC || VOYAGER */ |
124 | 116 | ||
125 | # define NR_IRQS 224 | 117 | # define NR_IRQS 16 |
126 | 118 | ||
127 | #endif /* VISWS */ | 119 | #endif |
128 | 120 | ||
129 | /* Voyager specific defines */ | 121 | /* Voyager specific defines */ |
130 | /* These define the CPIs we use in linux */ | 122 | /* These define the CPIs we use in linux */ |
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 65679d006337..8346be87cfa1 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -364,6 +364,9 @@ struct kvm_arch{ | |||
364 | 364 | ||
365 | struct page *ept_identity_pagetable; | 365 | struct page *ept_identity_pagetable; |
366 | bool ept_identity_pagetable_done; | 366 | bool ept_identity_pagetable_done; |
367 | |||
368 | unsigned long irq_sources_bitmap; | ||
369 | unsigned long irq_states[KVM_IOAPIC_NUM_PINS]; | ||
367 | }; | 370 | }; |
368 | 371 | ||
369 | struct kvm_vm_stat { | 372 | struct kvm_vm_stat { |
diff --git a/arch/x86/include/asm/mach-default/mach_wakecpu.h b/arch/x86/include/asm/mach-default/mach_wakecpu.h index d5c0b826a4ff..9d80db91e992 100644 --- a/arch/x86/include/asm/mach-default/mach_wakecpu.h +++ b/arch/x86/include/asm/mach-default/mach_wakecpu.h | |||
@@ -33,10 +33,9 @@ static inline void restore_NMI_vector(unsigned short *high, unsigned short *low) | |||
33 | { | 33 | { |
34 | } | 34 | } |
35 | 35 | ||
36 | #if APIC_DEBUG | 36 | #define inquire_remote_apic(apicid) do { \ |
37 | #define inquire_remote_apic(apicid) __inquire_remote_apic(apicid) | 37 | if (apic_verbosity >= APIC_DEBUG) \ |
38 | #else | 38 | __inquire_remote_apic(apicid); \ |
39 | #define inquire_remote_apic(apicid) {} | 39 | } while (0) |
40 | #endif | ||
41 | 40 | ||
42 | #endif /* _ASM_X86_MACH_DEFAULT_MACH_WAKECPU_H */ | 41 | #endif /* _ASM_X86_MACH_DEFAULT_MACH_WAKECPU_H */ |
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h index 46be2fa7ac26..c2a812ebde89 100644 --- a/arch/x86/include/asm/msr.h +++ b/arch/x86/include/asm/msr.h | |||
@@ -108,9 +108,7 @@ static __always_inline unsigned long long __native_read_tsc(void) | |||
108 | { | 108 | { |
109 | DECLARE_ARGS(val, low, high); | 109 | DECLARE_ARGS(val, low, high); |
110 | 110 | ||
111 | rdtsc_barrier(); | ||
112 | asm volatile("rdtsc" : EAX_EDX_RET(val, low, high)); | 111 | asm volatile("rdtsc" : EAX_EDX_RET(val, low, high)); |
113 | rdtsc_barrier(); | ||
114 | 112 | ||
115 | return EAX_EDX_VAL(val, low, high); | 113 | return EAX_EDX_VAL(val, low, high); |
116 | } | 114 | } |
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h index fb16cec702e4..52597aeadfff 100644 --- a/arch/x86/include/asm/pgtable-3level.h +++ b/arch/x86/include/asm/pgtable-3level.h | |||
@@ -120,13 +120,13 @@ static inline void pud_clear(pud_t *pudp) | |||
120 | write_cr3(pgd); | 120 | write_cr3(pgd); |
121 | } | 121 | } |
122 | 122 | ||
123 | #define pud_page(pud) ((struct page *) __va(pud_val(pud) & PTE_PFN_MASK)) | 123 | #define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT) |
124 | 124 | ||
125 | #define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PTE_PFN_MASK)) | 125 | #define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PTE_PFN_MASK)) |
126 | 126 | ||
127 | 127 | ||
128 | /* Find an entry in the second-level page table.. */ | 128 | /* Find an entry in the second-level page table.. */ |
129 | #define pmd_offset(pud, address) ((pmd_t *)pud_page(*(pud)) + \ | 129 | #define pmd_offset(pud, address) ((pmd_t *)pud_page_vaddr(*(pud)) + \ |
130 | pmd_index(address)) | 130 | pmd_index(address)) |
131 | 131 | ||
132 | #ifdef CONFIG_SMP | 132 | #ifdef CONFIG_SMP |
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index 2766021aef80..d12811ce51d9 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h | |||
@@ -225,5 +225,11 @@ static inline int hard_smp_processor_id(void) | |||
225 | 225 | ||
226 | #endif /* CONFIG_X86_LOCAL_APIC */ | 226 | #endif /* CONFIG_X86_LOCAL_APIC */ |
227 | 227 | ||
228 | #ifdef CONFIG_X86_HAS_BOOT_CPU_ID | ||
229 | extern unsigned char boot_cpu_id; | ||
230 | #else | ||
231 | #define boot_cpu_id 0 | ||
232 | #endif | ||
233 | |||
228 | #endif /* __ASSEMBLY__ */ | 234 | #endif /* __ASSEMBLY__ */ |
229 | #endif /* _ASM_X86_SMP_H */ | 235 | #endif /* _ASM_X86_SMP_H */ |
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h index 90ac7718469a..4850e4b02b61 100644 --- a/arch/x86/include/asm/topology.h +++ b/arch/x86/include/asm/topology.h | |||
@@ -154,7 +154,7 @@ extern unsigned long node_remap_size[]; | |||
154 | 154 | ||
155 | #endif | 155 | #endif |
156 | 156 | ||
157 | /* sched_domains SD_NODE_INIT for NUMAQ machines */ | 157 | /* sched_domains SD_NODE_INIT for NUMA machines */ |
158 | #define SD_NODE_INIT (struct sched_domain) { \ | 158 | #define SD_NODE_INIT (struct sched_domain) { \ |
159 | .min_interval = 8, \ | 159 | .min_interval = 8, \ |
160 | .max_interval = 32, \ | 160 | .max_interval = 32, \ |
@@ -169,8 +169,9 @@ extern unsigned long node_remap_size[]; | |||
169 | .flags = SD_LOAD_BALANCE \ | 169 | .flags = SD_LOAD_BALANCE \ |
170 | | SD_BALANCE_EXEC \ | 170 | | SD_BALANCE_EXEC \ |
171 | | SD_BALANCE_FORK \ | 171 | | SD_BALANCE_FORK \ |
172 | | SD_SERIALIZE \ | 172 | | SD_WAKE_AFFINE \ |
173 | | SD_WAKE_BALANCE, \ | 173 | | SD_WAKE_BALANCE \ |
174 | | SD_SERIALIZE, \ | ||
174 | .last_balance = jiffies, \ | 175 | .last_balance = jiffies, \ |
175 | .balance_interval = 1, \ | 176 | .balance_interval = 1, \ |
176 | } | 177 | } |
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h index 38ae163cc91b..9cd83a8e40d5 100644 --- a/arch/x86/include/asm/tsc.h +++ b/arch/x86/include/asm/tsc.h | |||
@@ -34,6 +34,8 @@ static inline cycles_t get_cycles(void) | |||
34 | 34 | ||
35 | static __always_inline cycles_t vget_cycles(void) | 35 | static __always_inline cycles_t vget_cycles(void) |
36 | { | 36 | { |
37 | cycles_t cycles; | ||
38 | |||
37 | /* | 39 | /* |
38 | * We only do VDSOs on TSC capable CPUs, so this shouldnt | 40 | * We only do VDSOs on TSC capable CPUs, so this shouldnt |
39 | * access boot_cpu_data (which is not VDSO-safe): | 41 | * access boot_cpu_data (which is not VDSO-safe): |
@@ -42,7 +44,11 @@ static __always_inline cycles_t vget_cycles(void) | |||
42 | if (!cpu_has_tsc) | 44 | if (!cpu_has_tsc) |
43 | return 0; | 45 | return 0; |
44 | #endif | 46 | #endif |
45 | return (cycles_t)__native_read_tsc(); | 47 | rdtsc_barrier(); |
48 | cycles = (cycles_t)__native_read_tsc(); | ||
49 | rdtsc_barrier(); | ||
50 | |||
51 | return cycles; | ||
46 | } | 52 | } |
47 | 53 | ||
48 | extern void tsc_init(void); | 54 | extern void tsc_init(void); |
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h index c6ad93e315c8..7a5782610b2b 100644 --- a/arch/x86/include/asm/uv/uv_hub.h +++ b/arch/x86/include/asm/uv/uv_hub.h | |||
@@ -13,6 +13,7 @@ | |||
13 | 13 | ||
14 | #include <linux/numa.h> | 14 | #include <linux/numa.h> |
15 | #include <linux/percpu.h> | 15 | #include <linux/percpu.h> |
16 | #include <linux/timer.h> | ||
16 | #include <asm/types.h> | 17 | #include <asm/types.h> |
17 | #include <asm/percpu.h> | 18 | #include <asm/percpu.h> |
18 | 19 | ||
diff --git a/arch/x86/include/asm/voyager.h b/arch/x86/include/asm/voyager.h index 9c811d2e6f91..b3e647307625 100644 --- a/arch/x86/include/asm/voyager.h +++ b/arch/x86/include/asm/voyager.h | |||
@@ -520,6 +520,7 @@ extern void voyager_restart(void); | |||
520 | extern void voyager_cat_power_off(void); | 520 | extern void voyager_cat_power_off(void); |
521 | extern void voyager_cat_do_common_interrupt(void); | 521 | extern void voyager_cat_do_common_interrupt(void); |
522 | extern void voyager_handle_nmi(void); | 522 | extern void voyager_handle_nmi(void); |
523 | extern void voyager_smp_intr_init(void); | ||
523 | /* Commands for the following are */ | 524 | /* Commands for the following are */ |
524 | #define VOYAGER_PSI_READ 0 | 525 | #define VOYAGER_PSI_READ 0 |
525 | #define VOYAGER_PSI_WRITE 1 | 526 | #define VOYAGER_PSI_WRITE 1 |
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index d7e5a58ee22f..e489ff9cb3e2 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
@@ -6,11 +6,12 @@ extra-y := head_$(BITS).o head$(BITS).o head.o init_task.o vmlinu | |||
6 | 6 | ||
7 | CPPFLAGS_vmlinux.lds += -U$(UTS_MACHINE) | 7 | CPPFLAGS_vmlinux.lds += -U$(UTS_MACHINE) |
8 | 8 | ||
9 | ifdef CONFIG_FTRACE | 9 | ifdef CONFIG_FUNCTION_TRACER |
10 | # Do not profile debug and lowlevel utilities | 10 | # Do not profile debug and lowlevel utilities |
11 | CFLAGS_REMOVE_tsc.o = -pg | 11 | CFLAGS_REMOVE_tsc.o = -pg |
12 | CFLAGS_REMOVE_rtc.o = -pg | 12 | CFLAGS_REMOVE_rtc.o = -pg |
13 | CFLAGS_REMOVE_paravirt-spinlocks.o = -pg | 13 | CFLAGS_REMOVE_paravirt-spinlocks.o = -pg |
14 | CFLAGS_REMOVE_ftrace.o = -pg | ||
14 | endif | 15 | endif |
15 | 16 | ||
16 | # | 17 | # |
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index a8fd9ebdc8e2..331b318304eb 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -50,7 +50,7 @@ static int dma_ops_unity_map(struct dma_ops_domain *dma_dom, | |||
50 | /* returns !0 if the IOMMU is caching non-present entries in its TLB */ | 50 | /* returns !0 if the IOMMU is caching non-present entries in its TLB */ |
51 | static int iommu_has_npcache(struct amd_iommu *iommu) | 51 | static int iommu_has_npcache(struct amd_iommu *iommu) |
52 | { | 52 | { |
53 | return iommu->cap & IOMMU_CAP_NPCACHE; | 53 | return iommu->cap & (1UL << IOMMU_CAP_NPCACHE); |
54 | } | 54 | } |
55 | 55 | ||
56 | /**************************************************************************** | 56 | /**************************************************************************** |
@@ -536,6 +536,9 @@ static void dma_ops_free_addresses(struct dma_ops_domain *dom, | |||
536 | { | 536 | { |
537 | address >>= PAGE_SHIFT; | 537 | address >>= PAGE_SHIFT; |
538 | iommu_area_free(dom->bitmap, address, pages); | 538 | iommu_area_free(dom->bitmap, address, pages); |
539 | |||
540 | if (address + pages >= dom->next_bit) | ||
541 | dom->need_flush = true; | ||
539 | } | 542 | } |
540 | 543 | ||
541 | /**************************************************************************** | 544 | /**************************************************************************** |
@@ -992,8 +995,10 @@ static void __unmap_single(struct amd_iommu *iommu, | |||
992 | 995 | ||
993 | dma_ops_free_addresses(dma_dom, dma_addr, pages); | 996 | dma_ops_free_addresses(dma_dom, dma_addr, pages); |
994 | 997 | ||
995 | if (amd_iommu_unmap_flush) | 998 | if (amd_iommu_unmap_flush || dma_dom->need_flush) { |
996 | iommu_flush_pages(iommu, dma_dom->domain.id, dma_addr, size); | 999 | iommu_flush_pages(iommu, dma_dom->domain.id, dma_addr, size); |
1000 | dma_dom->need_flush = false; | ||
1001 | } | ||
997 | } | 1002 | } |
998 | 1003 | ||
999 | /* | 1004 | /* |
diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c index 0d9c993aa93e..ef8f831af823 100644 --- a/arch/x86/kernel/cpu/addon_cpuid_features.c +++ b/arch/x86/kernel/cpu/addon_cpuid_features.c | |||
@@ -69,7 +69,7 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c) | |||
69 | */ | 69 | */ |
70 | void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c) | 70 | void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c) |
71 | { | 71 | { |
72 | #ifdef CONFIG_SMP | 72 | #ifdef CONFIG_X86_SMP |
73 | unsigned int eax, ebx, ecx, edx, sub_index; | 73 | unsigned int eax, ebx, ecx, edx, sub_index; |
74 | unsigned int ht_mask_width, core_plus_mask_width; | 74 | unsigned int ht_mask_width, core_plus_mask_width; |
75 | unsigned int core_select_mask, core_level_siblings; | 75 | unsigned int core_select_mask, core_level_siblings; |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 25581dcb280e..b9c9ea0217a9 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <asm/pat.h> | 20 | #include <asm/pat.h> |
21 | #include <asm/asm.h> | 21 | #include <asm/asm.h> |
22 | #include <asm/numa.h> | 22 | #include <asm/numa.h> |
23 | #include <asm/smp.h> | ||
23 | #ifdef CONFIG_X86_LOCAL_APIC | 24 | #ifdef CONFIG_X86_LOCAL_APIC |
24 | #include <asm/mpspec.h> | 25 | #include <asm/mpspec.h> |
25 | #include <asm/apic.h> | 26 | #include <asm/apic.h> |
@@ -549,6 +550,10 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) | |||
549 | this_cpu->c_early_init(c); | 550 | this_cpu->c_early_init(c); |
550 | 551 | ||
551 | validate_pat_support(c); | 552 | validate_pat_support(c); |
553 | |||
554 | #ifdef CONFIG_SMP | ||
555 | c->cpu_index = boot_cpu_id; | ||
556 | #endif | ||
552 | } | 557 | } |
553 | 558 | ||
554 | void __init early_cpu_init(void) | 559 | void __init early_cpu_init(void) |
@@ -1134,7 +1139,7 @@ void __cpuinit cpu_init(void) | |||
1134 | /* | 1139 | /* |
1135 | * Boot processor to setup the FP and extended state context info. | 1140 | * Boot processor to setup the FP and extended state context info. |
1136 | */ | 1141 | */ |
1137 | if (!smp_processor_id()) | 1142 | if (smp_processor_id() == boot_cpu_id) |
1138 | init_thread_xstate(); | 1143 | init_thread_xstate(); |
1139 | 1144 | ||
1140 | xsave_init(); | 1145 | xsave_init(); |
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index ce97bf3bed12..7aafeb5263ef 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c | |||
@@ -1290,15 +1290,17 @@ void __init e820_reserve_resources(void) | |||
1290 | res->start = e820.map[i].addr; | 1290 | res->start = e820.map[i].addr; |
1291 | res->end = end; | 1291 | res->end = end; |
1292 | 1292 | ||
1293 | res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; | 1293 | res->flags = IORESOURCE_MEM; |
1294 | 1294 | ||
1295 | /* | 1295 | /* |
1296 | * don't register the region that could be conflicted with | 1296 | * don't register the region that could be conflicted with |
1297 | * pci device BAR resource and insert them later in | 1297 | * pci device BAR resource and insert them later in |
1298 | * pcibios_resource_survey() | 1298 | * pcibios_resource_survey() |
1299 | */ | 1299 | */ |
1300 | if (e820.map[i].type != E820_RESERVED || res->start < (1ULL<<20)) | 1300 | if (e820.map[i].type != E820_RESERVED || res->start < (1ULL<<20)) { |
1301 | res->flags |= IORESOURCE_BUSY; | ||
1301 | insert_resource(&iomem_resource, res); | 1302 | insert_resource(&iomem_resource, res); |
1303 | } | ||
1302 | res++; | 1304 | res++; |
1303 | } | 1305 | } |
1304 | 1306 | ||
@@ -1318,7 +1320,7 @@ void __init e820_reserve_resources_late(void) | |||
1318 | res = e820_res; | 1320 | res = e820_res; |
1319 | for (i = 0; i < e820.nr_map; i++) { | 1321 | for (i = 0; i < e820.nr_map; i++) { |
1320 | if (!res->parent && res->end) | 1322 | if (!res->parent && res->end) |
1321 | reserve_region_with_split(&iomem_resource, res->start, res->end, res->name); | 1323 | insert_resource_expand_to_fit(&iomem_resource, res); |
1322 | res++; | 1324 | res++; |
1323 | } | 1325 | } |
1324 | } | 1326 | } |
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index dd65143941a8..28b597ef9ca1 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S | |||
@@ -1149,7 +1149,7 @@ ENDPROC(xen_failsafe_callback) | |||
1149 | 1149 | ||
1150 | #endif /* CONFIG_XEN */ | 1150 | #endif /* CONFIG_XEN */ |
1151 | 1151 | ||
1152 | #ifdef CONFIG_FTRACE | 1152 | #ifdef CONFIG_FUNCTION_TRACER |
1153 | #ifdef CONFIG_DYNAMIC_FTRACE | 1153 | #ifdef CONFIG_DYNAMIC_FTRACE |
1154 | 1154 | ||
1155 | ENTRY(mcount) | 1155 | ENTRY(mcount) |
@@ -1204,7 +1204,7 @@ trace: | |||
1204 | jmp ftrace_stub | 1204 | jmp ftrace_stub |
1205 | END(mcount) | 1205 | END(mcount) |
1206 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 1206 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
1207 | #endif /* CONFIG_FTRACE */ | 1207 | #endif /* CONFIG_FUNCTION_TRACER */ |
1208 | 1208 | ||
1209 | .section .rodata,"a" | 1209 | .section .rodata,"a" |
1210 | #include "syscall_table_32.S" | 1210 | #include "syscall_table_32.S" |
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 09e7145484c5..b86f332c96a6 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -61,7 +61,7 @@ | |||
61 | 61 | ||
62 | .code64 | 62 | .code64 |
63 | 63 | ||
64 | #ifdef CONFIG_FTRACE | 64 | #ifdef CONFIG_FUNCTION_TRACER |
65 | #ifdef CONFIG_DYNAMIC_FTRACE | 65 | #ifdef CONFIG_DYNAMIC_FTRACE |
66 | ENTRY(mcount) | 66 | ENTRY(mcount) |
67 | retq | 67 | retq |
@@ -138,7 +138,7 @@ trace: | |||
138 | jmp ftrace_stub | 138 | jmp ftrace_stub |
139 | END(mcount) | 139 | END(mcount) |
140 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 140 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
141 | #endif /* CONFIG_FTRACE */ | 141 | #endif /* CONFIG_FUNCTION_TRACER */ |
142 | 142 | ||
143 | #ifndef CONFIG_PREEMPT | 143 | #ifndef CONFIG_PREEMPT |
144 | #define retint_kernel retint_restore_args | 144 | #define retint_kernel retint_restore_args |
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index d073d981a730..50ea0ac8c9bf 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c | |||
@@ -21,8 +21,7 @@ | |||
21 | #include <asm/nops.h> | 21 | #include <asm/nops.h> |
22 | 22 | ||
23 | 23 | ||
24 | /* Long is fine, even if it is only 4 bytes ;-) */ | 24 | static unsigned char ftrace_nop[MCOUNT_INSN_SIZE]; |
25 | static unsigned long *ftrace_nop; | ||
26 | 25 | ||
27 | union ftrace_code_union { | 26 | union ftrace_code_union { |
28 | char code[MCOUNT_INSN_SIZE]; | 27 | char code[MCOUNT_INSN_SIZE]; |
@@ -33,17 +32,17 @@ union ftrace_code_union { | |||
33 | }; | 32 | }; |
34 | 33 | ||
35 | 34 | ||
36 | static int notrace ftrace_calc_offset(long ip, long addr) | 35 | static int ftrace_calc_offset(long ip, long addr) |
37 | { | 36 | { |
38 | return (int)(addr - ip); | 37 | return (int)(addr - ip); |
39 | } | 38 | } |
40 | 39 | ||
41 | notrace unsigned char *ftrace_nop_replace(void) | 40 | unsigned char *ftrace_nop_replace(void) |
42 | { | 41 | { |
43 | return (char *)ftrace_nop; | 42 | return ftrace_nop; |
44 | } | 43 | } |
45 | 44 | ||
46 | notrace unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) | 45 | unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) |
47 | { | 46 | { |
48 | static union ftrace_code_union calc; | 47 | static union ftrace_code_union calc; |
49 | 48 | ||
@@ -57,7 +56,7 @@ notrace unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) | |||
57 | return calc.code; | 56 | return calc.code; |
58 | } | 57 | } |
59 | 58 | ||
60 | notrace int | 59 | int |
61 | ftrace_modify_code(unsigned long ip, unsigned char *old_code, | 60 | ftrace_modify_code(unsigned long ip, unsigned char *old_code, |
62 | unsigned char *new_code) | 61 | unsigned char *new_code) |
63 | { | 62 | { |
@@ -66,26 +65,31 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code, | |||
66 | /* | 65 | /* |
67 | * Note: Due to modules and __init, code can | 66 | * Note: Due to modules and __init, code can |
68 | * disappear and change, we need to protect against faulting | 67 | * disappear and change, we need to protect against faulting |
69 | * as well as code changing. | 68 | * as well as code changing. We do this by using the |
69 | * probe_kernel_* functions. | ||
70 | * | 70 | * |
71 | * No real locking needed, this code is run through | 71 | * No real locking needed, this code is run through |
72 | * kstop_machine, or before SMP starts. | 72 | * kstop_machine, or before SMP starts. |
73 | */ | 73 | */ |
74 | if (__copy_from_user_inatomic(replaced, (char __user *)ip, MCOUNT_INSN_SIZE)) | ||
75 | return 1; | ||
76 | 74 | ||
75 | /* read the text we want to modify */ | ||
76 | if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE)) | ||
77 | return -EFAULT; | ||
78 | |||
79 | /* Make sure it is what we expect it to be */ | ||
77 | if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0) | 80 | if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0) |
78 | return 2; | 81 | return -EINVAL; |
79 | 82 | ||
80 | WARN_ON_ONCE(__copy_to_user_inatomic((char __user *)ip, new_code, | 83 | /* replace the text with the new text */ |
81 | MCOUNT_INSN_SIZE)); | 84 | if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE)) |
85 | return -EPERM; | ||
82 | 86 | ||
83 | sync_core(); | 87 | sync_core(); |
84 | 88 | ||
85 | return 0; | 89 | return 0; |
86 | } | 90 | } |
87 | 91 | ||
88 | notrace int ftrace_update_ftrace_func(ftrace_func_t func) | 92 | int ftrace_update_ftrace_func(ftrace_func_t func) |
89 | { | 93 | { |
90 | unsigned long ip = (unsigned long)(&ftrace_call); | 94 | unsigned long ip = (unsigned long)(&ftrace_call); |
91 | unsigned char old[MCOUNT_INSN_SIZE], *new; | 95 | unsigned char old[MCOUNT_INSN_SIZE], *new; |
@@ -98,13 +102,6 @@ notrace int ftrace_update_ftrace_func(ftrace_func_t func) | |||
98 | return ret; | 102 | return ret; |
99 | } | 103 | } |
100 | 104 | ||
101 | notrace int ftrace_mcount_set(unsigned long *data) | ||
102 | { | ||
103 | /* mcount is initialized as a nop */ | ||
104 | *data = 0; | ||
105 | return 0; | ||
106 | } | ||
107 | |||
108 | int __init ftrace_dyn_arch_init(void *data) | 105 | int __init ftrace_dyn_arch_init(void *data) |
109 | { | 106 | { |
110 | extern const unsigned char ftrace_test_p6nop[]; | 107 | extern const unsigned char ftrace_test_p6nop[]; |
@@ -127,9 +124,6 @@ int __init ftrace_dyn_arch_init(void *data) | |||
127 | * TODO: check the cpuid to determine the best nop. | 124 | * TODO: check the cpuid to determine the best nop. |
128 | */ | 125 | */ |
129 | asm volatile ( | 126 | asm volatile ( |
130 | "jmp ftrace_test_jmp\n" | ||
131 | /* This code needs to stay around */ | ||
132 | ".section .text, \"ax\"\n" | ||
133 | "ftrace_test_jmp:" | 127 | "ftrace_test_jmp:" |
134 | "jmp ftrace_test_p6nop\n" | 128 | "jmp ftrace_test_p6nop\n" |
135 | "nop\n" | 129 | "nop\n" |
@@ -140,8 +134,6 @@ int __init ftrace_dyn_arch_init(void *data) | |||
140 | "jmp 1f\n" | 134 | "jmp 1f\n" |
141 | "ftrace_test_nop5:" | 135 | "ftrace_test_nop5:" |
142 | ".byte 0x66,0x66,0x66,0x66,0x90\n" | 136 | ".byte 0x66,0x66,0x66,0x66,0x90\n" |
143 | "jmp 1f\n" | ||
144 | ".previous\n" | ||
145 | "1:" | 137 | "1:" |
146 | ".section .fixup, \"ax\"\n" | 138 | ".section .fixup, \"ax\"\n" |
147 | "2: movl $1, %0\n" | 139 | "2: movl $1, %0\n" |
@@ -156,15 +148,15 @@ int __init ftrace_dyn_arch_init(void *data) | |||
156 | switch (faulted) { | 148 | switch (faulted) { |
157 | case 0: | 149 | case 0: |
158 | pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n"); | 150 | pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n"); |
159 | ftrace_nop = (unsigned long *)ftrace_test_p6nop; | 151 | memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE); |
160 | break; | 152 | break; |
161 | case 1: | 153 | case 1: |
162 | pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n"); | 154 | pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n"); |
163 | ftrace_nop = (unsigned long *)ftrace_test_nop5; | 155 | memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE); |
164 | break; | 156 | break; |
165 | case 2: | 157 | case 2: |
166 | pr_info("ftrace: converting mcount calls to jmp . + 5\n"); | 158 | pr_info("ftrace: converting mcount calls to jmp . + 5\n"); |
167 | ftrace_nop = (unsigned long *)ftrace_test_jmp; | 159 | memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE); |
168 | break; | 160 | break; |
169 | } | 161 | } |
170 | 162 | ||
diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c index 680a06557c5e..2c7dbdb98278 100644 --- a/arch/x86/kernel/genx2apic_uv_x.c +++ b/arch/x86/kernel/genx2apic_uv_x.c | |||
@@ -15,7 +15,6 @@ | |||
15 | #include <linux/ctype.h> | 15 | #include <linux/ctype.h> |
16 | #include <linux/init.h> | 16 | #include <linux/init.h> |
17 | #include <linux/sched.h> | 17 | #include <linux/sched.h> |
18 | #include <linux/bootmem.h> | ||
19 | #include <linux/module.h> | 18 | #include <linux/module.h> |
20 | #include <linux/hardirq.h> | 19 | #include <linux/hardirq.h> |
21 | #include <asm/smp.h> | 20 | #include <asm/smp.h> |
@@ -398,16 +397,16 @@ void __init uv_system_init(void) | |||
398 | printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades()); | 397 | printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades()); |
399 | 398 | ||
400 | bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades(); | 399 | bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades(); |
401 | uv_blade_info = alloc_bootmem_pages(bytes); | 400 | uv_blade_info = kmalloc(bytes, GFP_KERNEL); |
402 | 401 | ||
403 | get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size); | 402 | get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size); |
404 | 403 | ||
405 | bytes = sizeof(uv_node_to_blade[0]) * num_possible_nodes(); | 404 | bytes = sizeof(uv_node_to_blade[0]) * num_possible_nodes(); |
406 | uv_node_to_blade = alloc_bootmem_pages(bytes); | 405 | uv_node_to_blade = kmalloc(bytes, GFP_KERNEL); |
407 | memset(uv_node_to_blade, 255, bytes); | 406 | memset(uv_node_to_blade, 255, bytes); |
408 | 407 | ||
409 | bytes = sizeof(uv_cpu_to_blade[0]) * num_possible_cpus(); | 408 | bytes = sizeof(uv_cpu_to_blade[0]) * num_possible_cpus(); |
410 | uv_cpu_to_blade = alloc_bootmem_pages(bytes); | 409 | uv_cpu_to_blade = kmalloc(bytes, GFP_KERNEL); |
411 | memset(uv_cpu_to_blade, 255, bytes); | 410 | memset(uv_cpu_to_blade, 255, bytes); |
412 | 411 | ||
413 | blade = 0; | 412 | blade = 0; |
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 77017e834cf7..067d8de913f6 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c | |||
@@ -322,7 +322,7 @@ static int hpet_next_event(unsigned long delta, | |||
322 | * what we wrote hit the chip before we compare it to the | 322 | * what we wrote hit the chip before we compare it to the |
323 | * counter. | 323 | * counter. |
324 | */ | 324 | */ |
325 | WARN_ON((u32)hpet_readl(HPET_T0_CMP) != cnt); | 325 | WARN_ON_ONCE((u32)hpet_readl(HPET_Tn_CMP(timer)) != cnt); |
326 | 326 | ||
327 | return (s32)((u32)hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0; | 327 | return (s32)((u32)hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0; |
328 | } | 328 | } |
@@ -445,7 +445,7 @@ static int hpet_setup_irq(struct hpet_dev *dev) | |||
445 | { | 445 | { |
446 | 446 | ||
447 | if (request_irq(dev->irq, hpet_interrupt_handler, | 447 | if (request_irq(dev->irq, hpet_interrupt_handler, |
448 | IRQF_SHARED|IRQF_NOBALANCING, dev->name, dev)) | 448 | IRQF_DISABLED|IRQF_NOBALANCING, dev->name, dev)) |
449 | return -1; | 449 | return -1; |
450 | 450 | ||
451 | disable_irq(dev->irq); | 451 | disable_irq(dev->irq); |
diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c index dd7ebee446af..43cec6bdda63 100644 --- a/arch/x86/kernel/i386_ksyms_32.c +++ b/arch/x86/kernel/i386_ksyms_32.c | |||
@@ -5,7 +5,7 @@ | |||
5 | #include <asm/desc.h> | 5 | #include <asm/desc.h> |
6 | #include <asm/ftrace.h> | 6 | #include <asm/ftrace.h> |
7 | 7 | ||
8 | #ifdef CONFIG_FTRACE | 8 | #ifdef CONFIG_FUNCTION_TRACER |
9 | /* mcount is defined in assembly */ | 9 | /* mcount is defined in assembly */ |
10 | EXPORT_SYMBOL(mcount); | 10 | EXPORT_SYMBOL(mcount); |
11 | #endif | 11 | #endif |
diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index b764d7429c61..7a3f2028e2eb 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c | |||
@@ -3611,6 +3611,8 @@ int __init probe_nr_irqs(void) | |||
3611 | /* something wrong ? */ | 3611 | /* something wrong ? */ |
3612 | if (nr < nr_min) | 3612 | if (nr < nr_min) |
3613 | nr = nr_min; | 3613 | nr = nr_min; |
3614 | if (WARN_ON(nr > NR_IRQS)) | ||
3615 | nr = NR_IRQS; | ||
3614 | 3616 | ||
3615 | return nr; | 3617 | return nr; |
3616 | } | 3618 | } |
diff --git a/arch/x86/kernel/k8.c b/arch/x86/kernel/k8.c index 304d8bad6559..cbc4332a77b2 100644 --- a/arch/x86/kernel/k8.c +++ b/arch/x86/kernel/k8.c | |||
@@ -18,7 +18,6 @@ static u32 *flush_words; | |||
18 | struct pci_device_id k8_nb_ids[] = { | 18 | struct pci_device_id k8_nb_ids[] = { |
19 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, | 19 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, |
20 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, | 20 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, |
21 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) }, | ||
22 | {} | 21 | {} |
23 | }; | 22 | }; |
24 | EXPORT_SYMBOL(k8_nb_ids); | 23 | EXPORT_SYMBOL(k8_nb_ids); |
diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c index 0732adba05ca..7a385746509a 100644 --- a/arch/x86/kernel/machine_kexec_32.c +++ b/arch/x86/kernel/machine_kexec_32.c | |||
@@ -162,7 +162,10 @@ void machine_kexec(struct kimage *image) | |||
162 | page_list[VA_PTE_0] = (unsigned long)kexec_pte0; | 162 | page_list[VA_PTE_0] = (unsigned long)kexec_pte0; |
163 | page_list[PA_PTE_1] = __pa(kexec_pte1); | 163 | page_list[PA_PTE_1] = __pa(kexec_pte1); |
164 | page_list[VA_PTE_1] = (unsigned long)kexec_pte1; | 164 | page_list[VA_PTE_1] = (unsigned long)kexec_pte1; |
165 | page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page) << PAGE_SHIFT); | 165 | |
166 | if (image->type == KEXEC_TYPE_DEFAULT) | ||
167 | page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page) | ||
168 | << PAGE_SHIFT); | ||
166 | 169 | ||
167 | /* The segment registers are funny things, they have both a | 170 | /* The segment registers are funny things, they have both a |
168 | * visible and an invisible part. Whenever the visible part is | 171 | * visible and an invisible part. Whenever the visible part is |
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c index 7a1f8eeac2c7..5f8e5d75a254 100644 --- a/arch/x86/kernel/microcode_amd.c +++ b/arch/x86/kernel/microcode_amd.c | |||
@@ -39,7 +39,7 @@ | |||
39 | #include <asm/microcode.h> | 39 | #include <asm/microcode.h> |
40 | 40 | ||
41 | MODULE_DESCRIPTION("AMD Microcode Update Driver"); | 41 | MODULE_DESCRIPTION("AMD Microcode Update Driver"); |
42 | MODULE_AUTHOR("Peter Oruba <peter.oruba@amd.com>"); | 42 | MODULE_AUTHOR("Peter Oruba"); |
43 | MODULE_LICENSE("GPL v2"); | 43 | MODULE_LICENSE("GPL v2"); |
44 | 44 | ||
45 | #define UCODE_MAGIC 0x00414d44 | 45 | #define UCODE_MAGIC 0x00414d44 |
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c index 936d8d55f230..82fb2809ce32 100644 --- a/arch/x86/kernel/microcode_core.c +++ b/arch/x86/kernel/microcode_core.c | |||
@@ -480,8 +480,8 @@ static int __init microcode_init(void) | |||
480 | 480 | ||
481 | printk(KERN_INFO | 481 | printk(KERN_INFO |
482 | "Microcode Update Driver: v" MICROCODE_VERSION | 482 | "Microcode Update Driver: v" MICROCODE_VERSION |
483 | " <tigran@aivazian.fsnet.co.uk>" | 483 | " <tigran@aivazian.fsnet.co.uk>," |
484 | " <peter.oruba@amd.com>\n"); | 484 | " Peter Oruba\n"); |
485 | 485 | ||
486 | return 0; | 486 | return 0; |
487 | } | 487 | } |
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index 1972266e8ba5..192624820217 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c | |||
@@ -9,6 +9,8 @@ | |||
9 | #include <asm/calgary.h> | 9 | #include <asm/calgary.h> |
10 | #include <asm/amd_iommu.h> | 10 | #include <asm/amd_iommu.h> |
11 | 11 | ||
12 | static int forbid_dac __read_mostly; | ||
13 | |||
12 | struct dma_mapping_ops *dma_ops; | 14 | struct dma_mapping_ops *dma_ops; |
13 | EXPORT_SYMBOL(dma_ops); | 15 | EXPORT_SYMBOL(dma_ops); |
14 | 16 | ||
@@ -291,3 +293,17 @@ void pci_iommu_shutdown(void) | |||
291 | } | 293 | } |
292 | /* Must execute after PCI subsystem */ | 294 | /* Must execute after PCI subsystem */ |
293 | fs_initcall(pci_iommu_init); | 295 | fs_initcall(pci_iommu_init); |
296 | |||
297 | #ifdef CONFIG_PCI | ||
298 | /* Many VIA bridges seem to corrupt data for DAC. Disable it here */ | ||
299 | |||
300 | static __devinit void via_no_dac(struct pci_dev *dev) | ||
301 | { | ||
302 | if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) { | ||
303 | printk(KERN_INFO "PCI: VIA PCI bridge detected." | ||
304 | "Disabling DAC.\n"); | ||
305 | forbid_dac = 1; | ||
306 | } | ||
307 | } | ||
308 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac); | ||
309 | #endif | ||
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index e3f75bbcedea..a42b02b4df68 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c | |||
@@ -744,7 +744,7 @@ void __init gart_iommu_init(void) | |||
744 | long i; | 744 | long i; |
745 | 745 | ||
746 | if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0) { | 746 | if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0) { |
747 | printk(KERN_INFO "PCI-GART: No AMD northbridge found.\n"); | 747 | printk(KERN_INFO "PCI-GART: No AMD GART found.\n"); |
748 | return; | 748 | return; |
749 | } | 749 | } |
750 | 750 | ||
diff --git a/arch/x86/kernel/pci-swiotlb_64.c b/arch/x86/kernel/pci-swiotlb_64.c index c4ce0332759e..3c539d111abb 100644 --- a/arch/x86/kernel/pci-swiotlb_64.c +++ b/arch/x86/kernel/pci-swiotlb_64.c | |||
@@ -18,9 +18,21 @@ swiotlb_map_single_phys(struct device *hwdev, phys_addr_t paddr, size_t size, | |||
18 | return swiotlb_map_single(hwdev, phys_to_virt(paddr), size, direction); | 18 | return swiotlb_map_single(hwdev, phys_to_virt(paddr), size, direction); |
19 | } | 19 | } |
20 | 20 | ||
21 | static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, | ||
22 | dma_addr_t *dma_handle, gfp_t flags) | ||
23 | { | ||
24 | void *vaddr; | ||
25 | |||
26 | vaddr = dma_generic_alloc_coherent(hwdev, size, dma_handle, flags); | ||
27 | if (vaddr) | ||
28 | return vaddr; | ||
29 | |||
30 | return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags); | ||
31 | } | ||
32 | |||
21 | struct dma_mapping_ops swiotlb_dma_ops = { | 33 | struct dma_mapping_ops swiotlb_dma_ops = { |
22 | .mapping_error = swiotlb_dma_mapping_error, | 34 | .mapping_error = swiotlb_dma_mapping_error, |
23 | .alloc_coherent = swiotlb_alloc_coherent, | 35 | .alloc_coherent = x86_swiotlb_alloc_coherent, |
24 | .free_coherent = swiotlb_free_coherent, | 36 | .free_coherent = swiotlb_free_coherent, |
25 | .map_single = swiotlb_map_single_phys, | 37 | .map_single = swiotlb_map_single_phys, |
26 | .unmap_single = swiotlb_unmap_single, | 38 | .unmap_single = swiotlb_unmap_single, |
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index f4c93f1cfc19..724adfc63cb9 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c | |||
@@ -29,11 +29,7 @@ EXPORT_SYMBOL(pm_power_off); | |||
29 | 29 | ||
30 | static const struct desc_ptr no_idt = {}; | 30 | static const struct desc_ptr no_idt = {}; |
31 | static int reboot_mode; | 31 | static int reboot_mode; |
32 | /* | 32 | enum reboot_type reboot_type = BOOT_KBD; |
33 | * Keyboard reset and triple fault may result in INIT, not RESET, which | ||
34 | * doesn't work when we're in vmx root mode. Try ACPI first. | ||
35 | */ | ||
36 | enum reboot_type reboot_type = BOOT_ACPI; | ||
37 | int reboot_force; | 33 | int reboot_force; |
38 | 34 | ||
39 | #if defined(CONFIG_X86_32) && defined(CONFIG_SMP) | 35 | #if defined(CONFIG_X86_32) && defined(CONFIG_SMP) |
diff --git a/arch/x86/kernel/tlb_32.c b/arch/x86/kernel/tlb_32.c index e00534b33534..f4049f3513b6 100644 --- a/arch/x86/kernel/tlb_32.c +++ b/arch/x86/kernel/tlb_32.c | |||
@@ -154,6 +154,12 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm, | |||
154 | flush_mm = mm; | 154 | flush_mm = mm; |
155 | flush_va = va; | 155 | flush_va = va; |
156 | cpus_or(flush_cpumask, cpumask, flush_cpumask); | 156 | cpus_or(flush_cpumask, cpumask, flush_cpumask); |
157 | |||
158 | /* | ||
159 | * Make the above memory operations globally visible before | ||
160 | * sending the IPI. | ||
161 | */ | ||
162 | smp_mb(); | ||
157 | /* | 163 | /* |
158 | * We have to send the IPI only to | 164 | * We have to send the IPI only to |
159 | * CPUs affected. | 165 | * CPUs affected. |
diff --git a/arch/x86/kernel/tlb_64.c b/arch/x86/kernel/tlb_64.c index dcbf7a1159ea..8f919ca69494 100644 --- a/arch/x86/kernel/tlb_64.c +++ b/arch/x86/kernel/tlb_64.c | |||
@@ -183,6 +183,11 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm, | |||
183 | cpus_or(f->flush_cpumask, cpumask, f->flush_cpumask); | 183 | cpus_or(f->flush_cpumask, cpumask, f->flush_cpumask); |
184 | 184 | ||
185 | /* | 185 | /* |
186 | * Make the above memory operations globally visible before | ||
187 | * sending the IPI. | ||
188 | */ | ||
189 | smp_mb(); | ||
190 | /* | ||
186 | * We have to send the IPI only to | 191 | * We have to send the IPI only to |
187 | * CPUs affected. | 192 | * CPUs affected. |
188 | */ | 193 | */ |
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 161bb850fc47..424093b157d3 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c | |||
@@ -55,7 +55,7 @@ u64 native_sched_clock(void) | |||
55 | rdtscll(this_offset); | 55 | rdtscll(this_offset); |
56 | 56 | ||
57 | /* return the value in ns */ | 57 | /* return the value in ns */ |
58 | return cycles_2_ns(this_offset); | 58 | return __cycles_2_ns(this_offset); |
59 | } | 59 | } |
60 | 60 | ||
61 | /* We need to define a real function for sched_clock, to override the | 61 | /* We need to define a real function for sched_clock, to override the |
@@ -759,7 +759,7 @@ __cpuinit int unsynchronized_tsc(void) | |||
759 | if (!cpu_has_tsc || tsc_unstable) | 759 | if (!cpu_has_tsc || tsc_unstable) |
760 | return 1; | 760 | return 1; |
761 | 761 | ||
762 | #ifdef CONFIG_SMP | 762 | #ifdef CONFIG_X86_SMP |
763 | if (apic_is_clustered_box()) | 763 | if (apic_is_clustered_box()) |
764 | return 1; | 764 | return 1; |
765 | #endif | 765 | #endif |
@@ -813,10 +813,6 @@ void __init tsc_init(void) | |||
813 | cpu_khz = calibrate_cpu(); | 813 | cpu_khz = calibrate_cpu(); |
814 | #endif | 814 | #endif |
815 | 815 | ||
816 | lpj = ((u64)tsc_khz * 1000); | ||
817 | do_div(lpj, HZ); | ||
818 | lpj_fine = lpj; | ||
819 | |||
820 | printk("Detected %lu.%03lu MHz processor.\n", | 816 | printk("Detected %lu.%03lu MHz processor.\n", |
821 | (unsigned long)cpu_khz / 1000, | 817 | (unsigned long)cpu_khz / 1000, |
822 | (unsigned long)cpu_khz % 1000); | 818 | (unsigned long)cpu_khz % 1000); |
@@ -836,6 +832,10 @@ void __init tsc_init(void) | |||
836 | /* now allow native_sched_clock() to use rdtsc */ | 832 | /* now allow native_sched_clock() to use rdtsc */ |
837 | tsc_disabled = 0; | 833 | tsc_disabled = 0; |
838 | 834 | ||
835 | lpj = ((u64)tsc_khz * 1000); | ||
836 | do_div(lpj, HZ); | ||
837 | lpj_fine = lpj; | ||
838 | |||
839 | use_tsc_delay(); | 839 | use_tsc_delay(); |
840 | /* Check and install the TSC clocksource */ | 840 | /* Check and install the TSC clocksource */ |
841 | dmi_check_system(bad_tsc_dmi_table); | 841 | dmi_check_system(bad_tsc_dmi_table); |
diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c index 7766d36983fc..a688f3bfaec2 100644 --- a/arch/x86/kernel/vsmp_64.c +++ b/arch/x86/kernel/vsmp_64.c | |||
@@ -78,7 +78,7 @@ static unsigned __init_or_module vsmp_patch(u8 type, u16 clobbers, void *ibuf, | |||
78 | 78 | ||
79 | static void __init set_vsmp_pv_ops(void) | 79 | static void __init set_vsmp_pv_ops(void) |
80 | { | 80 | { |
81 | void *address; | 81 | void __iomem *address; |
82 | unsigned int cap, ctl, cfg; | 82 | unsigned int cap, ctl, cfg; |
83 | 83 | ||
84 | /* set vSMP magic bits to indicate vSMP capable kernel */ | 84 | /* set vSMP magic bits to indicate vSMP capable kernel */ |
diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c index b545f371b5f5..695e426aa354 100644 --- a/arch/x86/kernel/x8664_ksyms_64.c +++ b/arch/x86/kernel/x8664_ksyms_64.c | |||
@@ -12,7 +12,7 @@ | |||
12 | #include <asm/desc.h> | 12 | #include <asm/desc.h> |
13 | #include <asm/ftrace.h> | 13 | #include <asm/ftrace.h> |
14 | 14 | ||
15 | #ifdef CONFIG_FTRACE | 15 | #ifdef CONFIG_FUNCTION_TRACER |
16 | /* mcount is defined in assembly */ | 16 | /* mcount is defined in assembly */ |
17 | EXPORT_SYMBOL(mcount); | 17 | EXPORT_SYMBOL(mcount); |
18 | #endif | 18 | #endif |
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index 11c6725fb798..8772dc946823 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c | |||
@@ -545,6 +545,12 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm) | |||
545 | if (!pit) | 545 | if (!pit) |
546 | return NULL; | 546 | return NULL; |
547 | 547 | ||
548 | mutex_lock(&kvm->lock); | ||
549 | pit->irq_source_id = kvm_request_irq_source_id(kvm); | ||
550 | mutex_unlock(&kvm->lock); | ||
551 | if (pit->irq_source_id < 0) | ||
552 | return NULL; | ||
553 | |||
548 | mutex_init(&pit->pit_state.lock); | 554 | mutex_init(&pit->pit_state.lock); |
549 | mutex_lock(&pit->pit_state.lock); | 555 | mutex_lock(&pit->pit_state.lock); |
550 | spin_lock_init(&pit->pit_state.inject_lock); | 556 | spin_lock_init(&pit->pit_state.inject_lock); |
@@ -587,6 +593,7 @@ void kvm_free_pit(struct kvm *kvm) | |||
587 | mutex_lock(&kvm->arch.vpit->pit_state.lock); | 593 | mutex_lock(&kvm->arch.vpit->pit_state.lock); |
588 | timer = &kvm->arch.vpit->pit_state.pit_timer.timer; | 594 | timer = &kvm->arch.vpit->pit_state.pit_timer.timer; |
589 | hrtimer_cancel(timer); | 595 | hrtimer_cancel(timer); |
596 | kvm_free_irq_source_id(kvm, kvm->arch.vpit->irq_source_id); | ||
590 | mutex_unlock(&kvm->arch.vpit->pit_state.lock); | 597 | mutex_unlock(&kvm->arch.vpit->pit_state.lock); |
591 | kfree(kvm->arch.vpit); | 598 | kfree(kvm->arch.vpit); |
592 | } | 599 | } |
@@ -595,8 +602,8 @@ void kvm_free_pit(struct kvm *kvm) | |||
595 | static void __inject_pit_timer_intr(struct kvm *kvm) | 602 | static void __inject_pit_timer_intr(struct kvm *kvm) |
596 | { | 603 | { |
597 | mutex_lock(&kvm->lock); | 604 | mutex_lock(&kvm->lock); |
598 | kvm_set_irq(kvm, 0, 1); | 605 | kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 1); |
599 | kvm_set_irq(kvm, 0, 0); | 606 | kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 0); |
600 | mutex_unlock(&kvm->lock); | 607 | mutex_unlock(&kvm->lock); |
601 | } | 608 | } |
602 | 609 | ||
diff --git a/arch/x86/kvm/i8254.h b/arch/x86/kvm/i8254.h index e436d4983aa1..4178022b97aa 100644 --- a/arch/x86/kvm/i8254.h +++ b/arch/x86/kvm/i8254.h | |||
@@ -44,6 +44,7 @@ struct kvm_pit { | |||
44 | struct kvm_io_device speaker_dev; | 44 | struct kvm_io_device speaker_dev; |
45 | struct kvm *kvm; | 45 | struct kvm *kvm; |
46 | struct kvm_kpit_state pit_state; | 46 | struct kvm_kpit_state pit_state; |
47 | int irq_source_id; | ||
47 | }; | 48 | }; |
48 | 49 | ||
49 | #define KVM_PIT_BASE_ADDRESS 0x40 | 50 | #define KVM_PIT_BASE_ADDRESS 0x40 |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 99c239c5c0ac..2a5e64881d9b 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -2634,6 +2634,7 @@ static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu, | |||
2634 | static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu) | 2634 | static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu) |
2635 | { | 2635 | { |
2636 | kvm_x86_ops->tlb_flush(vcpu); | 2636 | kvm_x86_ops->tlb_flush(vcpu); |
2637 | set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests); | ||
2637 | return 1; | 2638 | return 1; |
2638 | } | 2639 | } |
2639 | 2640 | ||
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 4f0677d1eae8..f1f8ff2f1fa2 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -1742,7 +1742,8 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
1742 | goto out; | 1742 | goto out; |
1743 | if (irqchip_in_kernel(kvm)) { | 1743 | if (irqchip_in_kernel(kvm)) { |
1744 | mutex_lock(&kvm->lock); | 1744 | mutex_lock(&kvm->lock); |
1745 | kvm_set_irq(kvm, irq_event.irq, irq_event.level); | 1745 | kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, |
1746 | irq_event.irq, irq_event.level); | ||
1746 | mutex_unlock(&kvm->lock); | 1747 | mutex_unlock(&kvm->lock); |
1747 | r = 0; | 1748 | r = 0; |
1748 | } | 1749 | } |
@@ -4013,6 +4014,9 @@ struct kvm *kvm_arch_create_vm(void) | |||
4013 | INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); | 4014 | INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); |
4014 | INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); | 4015 | INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); |
4015 | 4016 | ||
4017 | /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */ | ||
4018 | set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); | ||
4019 | |||
4016 | return kvm; | 4020 | return kvm; |
4017 | } | 4021 | } |
4018 | 4022 | ||
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index 48ee4f9435f4..a5d8e1ace1cf 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c | |||
@@ -367,10 +367,9 @@ static void lguest_cpuid(unsigned int *ax, unsigned int *bx, | |||
367 | * lazily after a task switch, and Linux uses that gratefully, but wouldn't a | 367 | * lazily after a task switch, and Linux uses that gratefully, but wouldn't a |
368 | * name like "FPUTRAP bit" be a little less cryptic? | 368 | * name like "FPUTRAP bit" be a little less cryptic? |
369 | * | 369 | * |
370 | * We store cr0 (and cr3) locally, because the Host never changes it. The | 370 | * We store cr0 locally because the Host never changes it. The Guest sometimes |
371 | * Guest sometimes wants to read it and we'd prefer not to bother the Host | 371 | * wants to read it and we'd prefer not to bother the Host unnecessarily. */ |
372 | * unnecessarily. */ | 372 | static unsigned long current_cr0; |
373 | static unsigned long current_cr0, current_cr3; | ||
374 | static void lguest_write_cr0(unsigned long val) | 373 | static void lguest_write_cr0(unsigned long val) |
375 | { | 374 | { |
376 | lazy_hcall(LHCALL_TS, val & X86_CR0_TS, 0, 0); | 375 | lazy_hcall(LHCALL_TS, val & X86_CR0_TS, 0, 0); |
@@ -399,17 +398,23 @@ static unsigned long lguest_read_cr2(void) | |||
399 | return lguest_data.cr2; | 398 | return lguest_data.cr2; |
400 | } | 399 | } |
401 | 400 | ||
401 | /* See lguest_set_pte() below. */ | ||
402 | static bool cr3_changed = false; | ||
403 | |||
402 | /* cr3 is the current toplevel pagetable page: the principle is the same as | 404 | /* cr3 is the current toplevel pagetable page: the principle is the same as |
403 | * cr0. Keep a local copy, and tell the Host when it changes. */ | 405 | * cr0. Keep a local copy, and tell the Host when it changes. The only |
406 | * difference is that our local copy is in lguest_data because the Host needs | ||
407 | * to set it upon our initial hypercall. */ | ||
404 | static void lguest_write_cr3(unsigned long cr3) | 408 | static void lguest_write_cr3(unsigned long cr3) |
405 | { | 409 | { |
410 | lguest_data.pgdir = cr3; | ||
406 | lazy_hcall(LHCALL_NEW_PGTABLE, cr3, 0, 0); | 411 | lazy_hcall(LHCALL_NEW_PGTABLE, cr3, 0, 0); |
407 | current_cr3 = cr3; | 412 | cr3_changed = true; |
408 | } | 413 | } |
409 | 414 | ||
410 | static unsigned long lguest_read_cr3(void) | 415 | static unsigned long lguest_read_cr3(void) |
411 | { | 416 | { |
412 | return current_cr3; | 417 | return lguest_data.pgdir; |
413 | } | 418 | } |
414 | 419 | ||
415 | /* cr4 is used to enable and disable PGE, but we don't care. */ | 420 | /* cr4 is used to enable and disable PGE, but we don't care. */ |
@@ -498,13 +503,13 @@ static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval) | |||
498 | * to forget all of them. Fortunately, this is very rare. | 503 | * to forget all of them. Fortunately, this is very rare. |
499 | * | 504 | * |
500 | * ... except in early boot when the kernel sets up the initial pagetables, | 505 | * ... except in early boot when the kernel sets up the initial pagetables, |
501 | * which makes booting astonishingly slow. So we don't even tell the Host | 506 | * which makes booting astonishingly slow: 1.83 seconds! So we don't even tell |
502 | * anything changed until we've done the first page table switch. */ | 507 | * the Host anything changed until we've done the first page table switch, |
508 | * which brings boot back to 0.25 seconds. */ | ||
503 | static void lguest_set_pte(pte_t *ptep, pte_t pteval) | 509 | static void lguest_set_pte(pte_t *ptep, pte_t pteval) |
504 | { | 510 | { |
505 | *ptep = pteval; | 511 | *ptep = pteval; |
506 | /* Don't bother with hypercall before initial setup. */ | 512 | if (cr3_changed) |
507 | if (current_cr3) | ||
508 | lazy_hcall(LHCALL_FLUSH_TLB, 1, 0, 0); | 513 | lazy_hcall(LHCALL_FLUSH_TLB, 1, 0, 0); |
509 | } | 514 | } |
510 | 515 | ||
@@ -521,7 +526,7 @@ static void lguest_set_pte(pte_t *ptep, pte_t pteval) | |||
521 | static void lguest_flush_tlb_single(unsigned long addr) | 526 | static void lguest_flush_tlb_single(unsigned long addr) |
522 | { | 527 | { |
523 | /* Simply set it to zero: if it was not, it will fault back in. */ | 528 | /* Simply set it to zero: if it was not, it will fault back in. */ |
524 | lazy_hcall(LHCALL_SET_PTE, current_cr3, addr, 0); | 529 | lazy_hcall(LHCALL_SET_PTE, lguest_data.pgdir, addr, 0); |
525 | } | 530 | } |
526 | 531 | ||
527 | /* This is what happens after the Guest has removed a large number of entries. | 532 | /* This is what happens after the Guest has removed a large number of entries. |
@@ -581,6 +586,9 @@ static void __init lguest_init_IRQ(void) | |||
581 | 586 | ||
582 | for (i = 0; i < LGUEST_IRQS; i++) { | 587 | for (i = 0; i < LGUEST_IRQS; i++) { |
583 | int vector = FIRST_EXTERNAL_VECTOR + i; | 588 | int vector = FIRST_EXTERNAL_VECTOR + i; |
589 | /* Some systems map "vectors" to interrupts weirdly. Lguest has | ||
590 | * a straightforward 1 to 1 mapping, so force that here. */ | ||
591 | __get_cpu_var(vector_irq)[vector] = i; | ||
584 | if (vector != SYSCALL_VECTOR) { | 592 | if (vector != SYSCALL_VECTOR) { |
585 | set_intr_gate(vector, interrupt[vector]); | 593 | set_intr_gate(vector, interrupt[vector]); |
586 | set_irq_chip_and_handler_name(i, &lguest_irq_controller, | 594 | set_irq_chip_and_handler_name(i, &lguest_irq_controller, |
diff --git a/arch/x86/mach-voyager/setup.c b/arch/x86/mach-voyager/setup.c index 6bbdd633864c..a580b9562e76 100644 --- a/arch/x86/mach-voyager/setup.c +++ b/arch/x86/mach-voyager/setup.c | |||
@@ -27,7 +27,7 @@ static struct irqaction irq2 = { | |||
27 | void __init intr_init_hook(void) | 27 | void __init intr_init_hook(void) |
28 | { | 28 | { |
29 | #ifdef CONFIG_SMP | 29 | #ifdef CONFIG_SMP |
30 | smp_intr_init(); | 30 | voyager_smp_intr_init(); |
31 | #endif | 31 | #endif |
32 | 32 | ||
33 | setup_irq(2, &irq2); | 33 | setup_irq(2, &irq2); |
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c index 0f6e8a6523ae..0e331652681e 100644 --- a/arch/x86/mach-voyager/voyager_smp.c +++ b/arch/x86/mach-voyager/voyager_smp.c | |||
@@ -90,6 +90,7 @@ static void ack_vic_irq(unsigned int irq); | |||
90 | static void vic_enable_cpi(void); | 90 | static void vic_enable_cpi(void); |
91 | static void do_boot_cpu(__u8 cpuid); | 91 | static void do_boot_cpu(__u8 cpuid); |
92 | static void do_quad_bootstrap(void); | 92 | static void do_quad_bootstrap(void); |
93 | static void initialize_secondary(void); | ||
93 | 94 | ||
94 | int hard_smp_processor_id(void); | 95 | int hard_smp_processor_id(void); |
95 | int safe_smp_processor_id(void); | 96 | int safe_smp_processor_id(void); |
@@ -344,6 +345,12 @@ static void do_quad_bootstrap(void) | |||
344 | } | 345 | } |
345 | } | 346 | } |
346 | 347 | ||
348 | void prefill_possible_map(void) | ||
349 | { | ||
350 | /* This is empty on voyager because we need a much | ||
351 | * earlier detection which is done in find_smp_config */ | ||
352 | } | ||
353 | |||
347 | /* Set up all the basic stuff: read the SMP config and make all the | 354 | /* Set up all the basic stuff: read the SMP config and make all the |
348 | * SMP information reflect only the boot cpu. All others will be | 355 | * SMP information reflect only the boot cpu. All others will be |
349 | * brought on-line later. */ | 356 | * brought on-line later. */ |
@@ -413,6 +420,7 @@ void __init smp_store_cpu_info(int id) | |||
413 | struct cpuinfo_x86 *c = &cpu_data(id); | 420 | struct cpuinfo_x86 *c = &cpu_data(id); |
414 | 421 | ||
415 | *c = boot_cpu_data; | 422 | *c = boot_cpu_data; |
423 | c->cpu_index = id; | ||
416 | 424 | ||
417 | identify_secondary_cpu(c); | 425 | identify_secondary_cpu(c); |
418 | } | 426 | } |
@@ -650,6 +658,8 @@ void __init smp_boot_cpus(void) | |||
650 | smp_tune_scheduling(); | 658 | smp_tune_scheduling(); |
651 | */ | 659 | */ |
652 | smp_store_cpu_info(boot_cpu_id); | 660 | smp_store_cpu_info(boot_cpu_id); |
661 | /* setup the jump vector */ | ||
662 | initial_code = (unsigned long)initialize_secondary; | ||
653 | printk("CPU%d: ", boot_cpu_id); | 663 | printk("CPU%d: ", boot_cpu_id); |
654 | print_cpu_info(&cpu_data(boot_cpu_id)); | 664 | print_cpu_info(&cpu_data(boot_cpu_id)); |
655 | 665 | ||
@@ -702,7 +712,7 @@ void __init smp_boot_cpus(void) | |||
702 | 712 | ||
703 | /* Reload the secondary CPUs task structure (this function does not | 713 | /* Reload the secondary CPUs task structure (this function does not |
704 | * return ) */ | 714 | * return ) */ |
705 | void __init initialize_secondary(void) | 715 | static void __init initialize_secondary(void) |
706 | { | 716 | { |
707 | #if 0 | 717 | #if 0 |
708 | // AC kernels only | 718 | // AC kernels only |
@@ -1248,7 +1258,7 @@ static void handle_vic_irq(unsigned int irq, struct irq_desc *desc) | |||
1248 | #define QIC_SET_GATE(cpi, vector) \ | 1258 | #define QIC_SET_GATE(cpi, vector) \ |
1249 | set_intr_gate((cpi) + QIC_DEFAULT_CPI_BASE, (vector)) | 1259 | set_intr_gate((cpi) + QIC_DEFAULT_CPI_BASE, (vector)) |
1250 | 1260 | ||
1251 | void __init smp_intr_init(void) | 1261 | void __init voyager_smp_intr_init(void) |
1252 | { | 1262 | { |
1253 | int i; | 1263 | int i; |
1254 | 1264 | ||
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index 59f89b434b45..fea4565ff576 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile | |||
@@ -1,7 +1,7 @@ | |||
1 | obj-y := init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \ | 1 | obj-y := init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \ |
2 | pat.o pgtable.o gup.o | 2 | pat.o pgtable.o gup.o |
3 | 3 | ||
4 | obj-$(CONFIG_X86_32) += pgtable_32.o | 4 | obj-$(CONFIG_X86_32) += pgtable_32.o iomap_32.o |
5 | 5 | ||
6 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o | 6 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o |
7 | obj-$(CONFIG_X86_PTDUMP) += dump_pagetables.o | 7 | obj-$(CONFIG_X86_PTDUMP) += dump_pagetables.o |
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c index 4ba373c5b8c8..be54176e9eb2 100644 --- a/arch/x86/mm/gup.c +++ b/arch/x86/mm/gup.c | |||
@@ -233,7 +233,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, | |||
233 | len = (unsigned long) nr_pages << PAGE_SHIFT; | 233 | len = (unsigned long) nr_pages << PAGE_SHIFT; |
234 | end = start + len; | 234 | end = start + len; |
235 | if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ, | 235 | if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ, |
236 | start, len))) | 236 | (void __user *)start, len))) |
237 | goto slow_irqon; | 237 | goto slow_irqon; |
238 | 238 | ||
239 | /* | 239 | /* |
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 8396868e82c5..c483f4242079 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c | |||
@@ -334,7 +334,6 @@ int devmem_is_allowed(unsigned long pagenr) | |||
334 | return 0; | 334 | return 0; |
335 | } | 335 | } |
336 | 336 | ||
337 | #ifdef CONFIG_HIGHMEM | ||
338 | pte_t *kmap_pte; | 337 | pte_t *kmap_pte; |
339 | pgprot_t kmap_prot; | 338 | pgprot_t kmap_prot; |
340 | 339 | ||
@@ -357,6 +356,7 @@ static void __init kmap_init(void) | |||
357 | kmap_prot = PAGE_KERNEL; | 356 | kmap_prot = PAGE_KERNEL; |
358 | } | 357 | } |
359 | 358 | ||
359 | #ifdef CONFIG_HIGHMEM | ||
360 | static void __init permanent_kmaps_init(pgd_t *pgd_base) | 360 | static void __init permanent_kmaps_init(pgd_t *pgd_base) |
361 | { | 361 | { |
362 | unsigned long vaddr; | 362 | unsigned long vaddr; |
@@ -436,7 +436,6 @@ static void __init set_highmem_pages_init(void) | |||
436 | #endif /* !CONFIG_NUMA */ | 436 | #endif /* !CONFIG_NUMA */ |
437 | 437 | ||
438 | #else | 438 | #else |
439 | # define kmap_init() do { } while (0) | ||
440 | # define permanent_kmaps_init(pgd_base) do { } while (0) | 439 | # define permanent_kmaps_init(pgd_base) do { } while (0) |
441 | # define set_highmem_pages_init() do { } while (0) | 440 | # define set_highmem_pages_init() do { } while (0) |
442 | #endif /* CONFIG_HIGHMEM */ | 441 | #endif /* CONFIG_HIGHMEM */ |
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index b8e461d49412..9db01db6e3cd 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -350,8 +350,10 @@ phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end, | |||
350 | * pagetable pages as RO. So assume someone who pre-setup | 350 | * pagetable pages as RO. So assume someone who pre-setup |
351 | * these mappings are more intelligent. | 351 | * these mappings are more intelligent. |
352 | */ | 352 | */ |
353 | if (pte_val(*pte)) | 353 | if (pte_val(*pte)) { |
354 | pages++; | ||
354 | continue; | 355 | continue; |
356 | } | ||
355 | 357 | ||
356 | if (0) | 358 | if (0) |
357 | printk(" pte=%p addr=%lx pte=%016lx\n", | 359 | printk(" pte=%p addr=%lx pte=%016lx\n", |
@@ -418,8 +420,10 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end, | |||
418 | * not differ with respect to page frame and | 420 | * not differ with respect to page frame and |
419 | * attributes. | 421 | * attributes. |
420 | */ | 422 | */ |
421 | if (page_size_mask & (1 << PG_LEVEL_2M)) | 423 | if (page_size_mask & (1 << PG_LEVEL_2M)) { |
424 | pages++; | ||
422 | continue; | 425 | continue; |
426 | } | ||
423 | new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd)); | 427 | new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd)); |
424 | } | 428 | } |
425 | 429 | ||
@@ -499,8 +503,10 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end, | |||
499 | * not differ with respect to page frame and | 503 | * not differ with respect to page frame and |
500 | * attributes. | 504 | * attributes. |
501 | */ | 505 | */ |
502 | if (page_size_mask & (1 << PG_LEVEL_1G)) | 506 | if (page_size_mask & (1 << PG_LEVEL_1G)) { |
507 | pages++; | ||
503 | continue; | 508 | continue; |
509 | } | ||
504 | prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud)); | 510 | prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud)); |
505 | } | 511 | } |
506 | 512 | ||
@@ -665,12 +671,13 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, | |||
665 | unsigned long last_map_addr = 0; | 671 | unsigned long last_map_addr = 0; |
666 | unsigned long page_size_mask = 0; | 672 | unsigned long page_size_mask = 0; |
667 | unsigned long start_pfn, end_pfn; | 673 | unsigned long start_pfn, end_pfn; |
674 | unsigned long pos; | ||
668 | 675 | ||
669 | struct map_range mr[NR_RANGE_MR]; | 676 | struct map_range mr[NR_RANGE_MR]; |
670 | int nr_range, i; | 677 | int nr_range, i; |
671 | int use_pse, use_gbpages; | 678 | int use_pse, use_gbpages; |
672 | 679 | ||
673 | printk(KERN_INFO "init_memory_mapping\n"); | 680 | printk(KERN_INFO "init_memory_mapping: %016lx-%016lx\n", start, end); |
674 | 681 | ||
675 | /* | 682 | /* |
676 | * Find space for the kernel direct mapping tables. | 683 | * Find space for the kernel direct mapping tables. |
@@ -704,35 +711,50 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, | |||
704 | 711 | ||
705 | /* head if not big page alignment ?*/ | 712 | /* head if not big page alignment ?*/ |
706 | start_pfn = start >> PAGE_SHIFT; | 713 | start_pfn = start >> PAGE_SHIFT; |
707 | end_pfn = ((start + (PMD_SIZE - 1)) >> PMD_SHIFT) | 714 | pos = start_pfn << PAGE_SHIFT; |
715 | end_pfn = ((pos + (PMD_SIZE - 1)) >> PMD_SHIFT) | ||
708 | << (PMD_SHIFT - PAGE_SHIFT); | 716 | << (PMD_SHIFT - PAGE_SHIFT); |
709 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); | 717 | if (start_pfn < end_pfn) { |
718 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); | ||
719 | pos = end_pfn << PAGE_SHIFT; | ||
720 | } | ||
710 | 721 | ||
711 | /* big page (2M) range*/ | 722 | /* big page (2M) range*/ |
712 | start_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT) | 723 | start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT) |
713 | << (PMD_SHIFT - PAGE_SHIFT); | 724 | << (PMD_SHIFT - PAGE_SHIFT); |
714 | end_pfn = ((start + (PUD_SIZE - 1))>>PUD_SHIFT) | 725 | end_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT) |
715 | << (PUD_SHIFT - PAGE_SHIFT); | 726 | << (PUD_SHIFT - PAGE_SHIFT); |
716 | if (end_pfn > ((end>>PUD_SHIFT)<<(PUD_SHIFT - PAGE_SHIFT))) | 727 | if (end_pfn > ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT))) |
717 | end_pfn = ((end>>PUD_SHIFT)<<(PUD_SHIFT - PAGE_SHIFT)); | 728 | end_pfn = ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT)); |
718 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, | 729 | if (start_pfn < end_pfn) { |
719 | page_size_mask & (1<<PG_LEVEL_2M)); | 730 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, |
731 | page_size_mask & (1<<PG_LEVEL_2M)); | ||
732 | pos = end_pfn << PAGE_SHIFT; | ||
733 | } | ||
720 | 734 | ||
721 | /* big page (1G) range */ | 735 | /* big page (1G) range */ |
722 | start_pfn = end_pfn; | 736 | start_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT) |
723 | end_pfn = (end>>PUD_SHIFT) << (PUD_SHIFT - PAGE_SHIFT); | 737 | << (PUD_SHIFT - PAGE_SHIFT); |
724 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, | 738 | end_pfn = (end >> PUD_SHIFT) << (PUD_SHIFT - PAGE_SHIFT); |
739 | if (start_pfn < end_pfn) { | ||
740 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, | ||
725 | page_size_mask & | 741 | page_size_mask & |
726 | ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G))); | 742 | ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G))); |
743 | pos = end_pfn << PAGE_SHIFT; | ||
744 | } | ||
727 | 745 | ||
728 | /* tail is not big page (1G) alignment */ | 746 | /* tail is not big page (1G) alignment */ |
729 | start_pfn = end_pfn; | 747 | start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT) |
730 | end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT); | 748 | << (PMD_SHIFT - PAGE_SHIFT); |
731 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, | 749 | end_pfn = (end >> PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT); |
732 | page_size_mask & (1<<PG_LEVEL_2M)); | 750 | if (start_pfn < end_pfn) { |
751 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, | ||
752 | page_size_mask & (1<<PG_LEVEL_2M)); | ||
753 | pos = end_pfn << PAGE_SHIFT; | ||
754 | } | ||
733 | 755 | ||
734 | /* tail is not big page (2M) alignment */ | 756 | /* tail is not big page (2M) alignment */ |
735 | start_pfn = end_pfn; | 757 | start_pfn = pos>>PAGE_SHIFT; |
736 | end_pfn = end>>PAGE_SHIFT; | 758 | end_pfn = end>>PAGE_SHIFT; |
737 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); | 759 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); |
738 | 760 | ||
@@ -831,12 +853,12 @@ int arch_add_memory(int nid, u64 start, u64 size) | |||
831 | unsigned long nr_pages = size >> PAGE_SHIFT; | 853 | unsigned long nr_pages = size >> PAGE_SHIFT; |
832 | int ret; | 854 | int ret; |
833 | 855 | ||
834 | last_mapped_pfn = init_memory_mapping(start, start + size-1); | 856 | last_mapped_pfn = init_memory_mapping(start, start + size); |
835 | if (last_mapped_pfn > max_pfn_mapped) | 857 | if (last_mapped_pfn > max_pfn_mapped) |
836 | max_pfn_mapped = last_mapped_pfn; | 858 | max_pfn_mapped = last_mapped_pfn; |
837 | 859 | ||
838 | ret = __add_pages(zone, start_pfn, nr_pages); | 860 | ret = __add_pages(zone, start_pfn, nr_pages); |
839 | WARN_ON(1); | 861 | WARN_ON_ONCE(ret); |
840 | 862 | ||
841 | return ret; | 863 | return ret; |
842 | } | 864 | } |
@@ -878,6 +900,7 @@ static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, | |||
878 | void __init mem_init(void) | 900 | void __init mem_init(void) |
879 | { | 901 | { |
880 | long codesize, reservedpages, datasize, initsize; | 902 | long codesize, reservedpages, datasize, initsize; |
903 | unsigned long absent_pages; | ||
881 | 904 | ||
882 | start_periodic_check_for_corruption(); | 905 | start_periodic_check_for_corruption(); |
883 | 906 | ||
@@ -893,8 +916,9 @@ void __init mem_init(void) | |||
893 | #else | 916 | #else |
894 | totalram_pages = free_all_bootmem(); | 917 | totalram_pages = free_all_bootmem(); |
895 | #endif | 918 | #endif |
896 | reservedpages = max_pfn - totalram_pages - | 919 | |
897 | absent_pages_in_range(0, max_pfn); | 920 | absent_pages = absent_pages_in_range(0, max_pfn); |
921 | reservedpages = max_pfn - totalram_pages - absent_pages; | ||
898 | after_bootmem = 1; | 922 | after_bootmem = 1; |
899 | 923 | ||
900 | codesize = (unsigned long) &_etext - (unsigned long) &_text; | 924 | codesize = (unsigned long) &_etext - (unsigned long) &_text; |
@@ -911,10 +935,11 @@ void __init mem_init(void) | |||
911 | VSYSCALL_END - VSYSCALL_START); | 935 | VSYSCALL_END - VSYSCALL_START); |
912 | 936 | ||
913 | printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, " | 937 | printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, " |
914 | "%ldk reserved, %ldk data, %ldk init)\n", | 938 | "%ldk absent, %ldk reserved, %ldk data, %ldk init)\n", |
915 | (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), | 939 | (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), |
916 | max_pfn << (PAGE_SHIFT-10), | 940 | max_pfn << (PAGE_SHIFT-10), |
917 | codesize >> 10, | 941 | codesize >> 10, |
942 | absent_pages << (PAGE_SHIFT-10), | ||
918 | reservedpages << (PAGE_SHIFT-10), | 943 | reservedpages << (PAGE_SHIFT-10), |
919 | datasize >> 10, | 944 | datasize >> 10, |
920 | initsize >> 10); | 945 | initsize >> 10); |
diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c new file mode 100644 index 000000000000..d0151d8ce452 --- /dev/null +++ b/arch/x86/mm/iomap_32.c | |||
@@ -0,0 +1,59 @@ | |||
1 | /* | ||
2 | * Copyright © 2008 Ingo Molnar | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but | ||
10 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
12 | * General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along | ||
15 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
16 | * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. | ||
17 | */ | ||
18 | |||
19 | #include <asm/iomap.h> | ||
20 | #include <linux/module.h> | ||
21 | |||
22 | /* Map 'pfn' using fixed map 'type' and protections 'prot' | ||
23 | */ | ||
24 | void * | ||
25 | iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) | ||
26 | { | ||
27 | enum fixed_addresses idx; | ||
28 | unsigned long vaddr; | ||
29 | |||
30 | pagefault_disable(); | ||
31 | |||
32 | idx = type + KM_TYPE_NR*smp_processor_id(); | ||
33 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | ||
34 | set_pte(kmap_pte-idx, pfn_pte(pfn, prot)); | ||
35 | arch_flush_lazy_mmu_mode(); | ||
36 | |||
37 | return (void*) vaddr; | ||
38 | } | ||
39 | EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn); | ||
40 | |||
41 | void | ||
42 | iounmap_atomic(void *kvaddr, enum km_type type) | ||
43 | { | ||
44 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; | ||
45 | enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); | ||
46 | |||
47 | /* | ||
48 | * Force other mappings to Oops if they'll try to access this pte | ||
49 | * without first remap it. Keeping stale mappings around is a bad idea | ||
50 | * also, in case the page changes cacheability attributes or becomes | ||
51 | * a protected page in a hypervisor. | ||
52 | */ | ||
53 | if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx)) | ||
54 | kpte_clear_flush(kmap_pte-idx, vaddr); | ||
55 | |||
56 | arch_flush_lazy_mmu_mode(); | ||
57 | pagefault_enable(); | ||
58 | } | ||
59 | EXPORT_SYMBOL_GPL(iounmap_atomic); | ||
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index ae71e11eb3e5..d4c4307ff3e0 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c | |||
@@ -387,7 +387,7 @@ static void __iomem *ioremap_default(resource_size_t phys_addr, | |||
387 | unsigned long size) | 387 | unsigned long size) |
388 | { | 388 | { |
389 | unsigned long flags; | 389 | unsigned long flags; |
390 | void *ret; | 390 | void __iomem *ret; |
391 | int err; | 391 | int err; |
392 | 392 | ||
393 | /* | 393 | /* |
@@ -399,11 +399,11 @@ static void __iomem *ioremap_default(resource_size_t phys_addr, | |||
399 | if (err < 0) | 399 | if (err < 0) |
400 | return NULL; | 400 | return NULL; |
401 | 401 | ||
402 | ret = (void *) __ioremap_caller(phys_addr, size, flags, | 402 | ret = __ioremap_caller(phys_addr, size, flags, |
403 | __builtin_return_address(0)); | 403 | __builtin_return_address(0)); |
404 | 404 | ||
405 | free_memtype(phys_addr, phys_addr + size); | 405 | free_memtype(phys_addr, phys_addr + size); |
406 | return (void __iomem *)ret; | 406 | return ret; |
407 | } | 407 | } |
408 | 408 | ||
409 | void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size, | 409 | void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size, |
@@ -622,7 +622,7 @@ static inline void __init early_clear_fixmap(enum fixed_addresses idx) | |||
622 | __early_set_fixmap(idx, 0, __pgprot(0)); | 622 | __early_set_fixmap(idx, 0, __pgprot(0)); |
623 | } | 623 | } |
624 | 624 | ||
625 | static void *prev_map[FIX_BTMAPS_SLOTS] __initdata; | 625 | static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata; |
626 | static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata; | 626 | static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata; |
627 | static int __init check_early_ioremap_leak(void) | 627 | static int __init check_early_ioremap_leak(void) |
628 | { | 628 | { |
@@ -645,7 +645,7 @@ static int __init check_early_ioremap_leak(void) | |||
645 | } | 645 | } |
646 | late_initcall(check_early_ioremap_leak); | 646 | late_initcall(check_early_ioremap_leak); |
647 | 647 | ||
648 | static void __init *__early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot) | 648 | static void __init __iomem *__early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot) |
649 | { | 649 | { |
650 | unsigned long offset, last_addr; | 650 | unsigned long offset, last_addr; |
651 | unsigned int nrpages; | 651 | unsigned int nrpages; |
@@ -713,23 +713,23 @@ static void __init *__early_ioremap(unsigned long phys_addr, unsigned long size, | |||
713 | if (early_ioremap_debug) | 713 | if (early_ioremap_debug) |
714 | printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0)); | 714 | printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0)); |
715 | 715 | ||
716 | prev_map[slot] = (void *) (offset + fix_to_virt(idx0)); | 716 | prev_map[slot] = (void __iomem *)(offset + fix_to_virt(idx0)); |
717 | return prev_map[slot]; | 717 | return prev_map[slot]; |
718 | } | 718 | } |
719 | 719 | ||
720 | /* Remap an IO device */ | 720 | /* Remap an IO device */ |
721 | void __init *early_ioremap(unsigned long phys_addr, unsigned long size) | 721 | void __init __iomem *early_ioremap(unsigned long phys_addr, unsigned long size) |
722 | { | 722 | { |
723 | return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO); | 723 | return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO); |
724 | } | 724 | } |
725 | 725 | ||
726 | /* Remap memory */ | 726 | /* Remap memory */ |
727 | void __init *early_memremap(unsigned long phys_addr, unsigned long size) | 727 | void __init __iomem *early_memremap(unsigned long phys_addr, unsigned long size) |
728 | { | 728 | { |
729 | return __early_ioremap(phys_addr, size, PAGE_KERNEL); | 729 | return __early_ioremap(phys_addr, size, PAGE_KERNEL); |
730 | } | 730 | } |
731 | 731 | ||
732 | void __init early_iounmap(void *addr, unsigned long size) | 732 | void __init early_iounmap(void __iomem *addr, unsigned long size) |
733 | { | 733 | { |
734 | unsigned long virt_addr; | 734 | unsigned long virt_addr; |
735 | unsigned long offset; | 735 | unsigned long offset; |
@@ -779,7 +779,7 @@ void __init early_iounmap(void *addr, unsigned long size) | |||
779 | --idx; | 779 | --idx; |
780 | --nrpages; | 780 | --nrpages; |
781 | } | 781 | } |
782 | prev_map[slot] = 0; | 782 | prev_map[slot] = NULL; |
783 | } | 783 | } |
784 | 784 | ||
785 | void __this_fixmap_does_not_exist(void) | 785 | void __this_fixmap_does_not_exist(void) |
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index f1dc1b75d166..e89d24815f26 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -67,18 +67,18 @@ static void split_page_count(int level) | |||
67 | 67 | ||
68 | void arch_report_meminfo(struct seq_file *m) | 68 | void arch_report_meminfo(struct seq_file *m) |
69 | { | 69 | { |
70 | seq_printf(m, "DirectMap4k: %8lu kB\n", | 70 | seq_printf(m, "DirectMap4k: %8lu kB\n", |
71 | direct_pages_count[PG_LEVEL_4K] << 2); | 71 | direct_pages_count[PG_LEVEL_4K] << 2); |
72 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) | 72 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) |
73 | seq_printf(m, "DirectMap2M: %8lu kB\n", | 73 | seq_printf(m, "DirectMap2M: %8lu kB\n", |
74 | direct_pages_count[PG_LEVEL_2M] << 11); | 74 | direct_pages_count[PG_LEVEL_2M] << 11); |
75 | #else | 75 | #else |
76 | seq_printf(m, "DirectMap4M: %8lu kB\n", | 76 | seq_printf(m, "DirectMap4M: %8lu kB\n", |
77 | direct_pages_count[PG_LEVEL_2M] << 12); | 77 | direct_pages_count[PG_LEVEL_2M] << 12); |
78 | #endif | 78 | #endif |
79 | #ifdef CONFIG_X86_64 | 79 | #ifdef CONFIG_X86_64 |
80 | if (direct_gbpages) | 80 | if (direct_gbpages) |
81 | seq_printf(m, "DirectMap1G: %8lu kB\n", | 81 | seq_printf(m, "DirectMap1G: %8lu kB\n", |
82 | direct_pages_count[PG_LEVEL_1G] << 20); | 82 | direct_pages_count[PG_LEVEL_1G] << 20); |
83 | #endif | 83 | #endif |
84 | } | 84 | } |
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 738fd0f24958..eb1bf000d12e 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c | |||
@@ -481,12 +481,16 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size) | |||
481 | return 1; | 481 | return 1; |
482 | } | 482 | } |
483 | #else | 483 | #else |
484 | /* This check is needed to avoid cache aliasing when PAT is enabled */ | ||
484 | static inline int range_is_allowed(unsigned long pfn, unsigned long size) | 485 | static inline int range_is_allowed(unsigned long pfn, unsigned long size) |
485 | { | 486 | { |
486 | u64 from = ((u64)pfn) << PAGE_SHIFT; | 487 | u64 from = ((u64)pfn) << PAGE_SHIFT; |
487 | u64 to = from + size; | 488 | u64 to = from + size; |
488 | u64 cursor = from; | 489 | u64 cursor = from; |
489 | 490 | ||
491 | if (!pat_enabled) | ||
492 | return 1; | ||
493 | |||
490 | while (cursor < to) { | 494 | while (cursor < to) { |
491 | if (!devmem_is_allowed(pfn)) { | 495 | if (!devmem_is_allowed(pfn)) { |
492 | printk(KERN_INFO | 496 | printk(KERN_INFO |
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c index 0620d6d45f7d..3f1b81a83e2e 100644 --- a/arch/x86/oprofile/op_model_ppro.c +++ b/arch/x86/oprofile/op_model_ppro.c | |||
@@ -27,8 +27,7 @@ static int num_counters = 2; | |||
27 | static int counter_width = 32; | 27 | static int counter_width = 32; |
28 | 28 | ||
29 | #define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0) | 29 | #define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0) |
30 | #define CTR_READ(l, h, msrs, c) do {rdmsr(msrs->counters[(c)].addr, (l), (h)); } while (0) | 30 | #define CTR_OVERFLOWED(n) (!((n) & (1ULL<<(counter_width-1)))) |
31 | #define CTR_OVERFLOWED(n) (!((n) & (1U<<(counter_width-1)))) | ||
32 | 31 | ||
33 | #define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0) | 32 | #define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0) |
34 | #define CTRL_READ(l, h, msrs, c) do {rdmsr((msrs->controls[(c)].addr), (l), (h)); } while (0) | 33 | #define CTRL_READ(l, h, msrs, c) do {rdmsr((msrs->controls[(c)].addr), (l), (h)); } while (0) |
@@ -124,14 +123,14 @@ static void ppro_setup_ctrs(struct op_msrs const * const msrs) | |||
124 | static int ppro_check_ctrs(struct pt_regs * const regs, | 123 | static int ppro_check_ctrs(struct pt_regs * const regs, |
125 | struct op_msrs const * const msrs) | 124 | struct op_msrs const * const msrs) |
126 | { | 125 | { |
127 | unsigned int low, high; | 126 | u64 val; |
128 | int i; | 127 | int i; |
129 | 128 | ||
130 | for (i = 0 ; i < num_counters; ++i) { | 129 | for (i = 0 ; i < num_counters; ++i) { |
131 | if (!reset_value[i]) | 130 | if (!reset_value[i]) |
132 | continue; | 131 | continue; |
133 | CTR_READ(low, high, msrs, i); | 132 | rdmsrl(msrs->counters[i].addr, val); |
134 | if (CTR_OVERFLOWED(low)) { | 133 | if (CTR_OVERFLOWED(val)) { |
135 | oprofile_add_sample(regs, i); | 134 | oprofile_add_sample(regs, i); |
136 | wrmsrl(msrs->counters[i].addr, -reset_value[i]); | 135 | wrmsrl(msrs->counters[i].addr, -reset_value[i]); |
137 | } | 136 | } |
diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile index 313947940a1a..6dcefba7836f 100644 --- a/arch/x86/xen/Makefile +++ b/arch/x86/xen/Makefile | |||
@@ -1,4 +1,4 @@ | |||
1 | ifdef CONFIG_FTRACE | 1 | ifdef CONFIG_FUNCTION_TRACER |
2 | # Do not profile debug and lowlevel utilities | 2 | # Do not profile debug and lowlevel utilities |
3 | CFLAGS_REMOVE_spinlock.o = -pg | 3 | CFLAGS_REMOVE_spinlock.o = -pg |
4 | CFLAGS_REMOVE_time.o = -pg | 4 | CFLAGS_REMOVE_time.o = -pg |
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index b61534c7a4c4..5e4686d70f62 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -863,15 +863,16 @@ static void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned l | |||
863 | if (PagePinned(virt_to_page(mm->pgd))) { | 863 | if (PagePinned(virt_to_page(mm->pgd))) { |
864 | SetPagePinned(page); | 864 | SetPagePinned(page); |
865 | 865 | ||
866 | vm_unmap_aliases(); | ||
866 | if (!PageHighMem(page)) { | 867 | if (!PageHighMem(page)) { |
867 | make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn))); | 868 | make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn))); |
868 | if (level == PT_PTE && USE_SPLIT_PTLOCKS) | 869 | if (level == PT_PTE && USE_SPLIT_PTLOCKS) |
869 | pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); | 870 | pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); |
870 | } else | 871 | } else { |
871 | /* make sure there are no stray mappings of | 872 | /* make sure there are no stray mappings of |
872 | this page */ | 873 | this page */ |
873 | kmap_flush_unused(); | 874 | kmap_flush_unused(); |
874 | vm_unmap_aliases(); | 875 | } |
875 | } | 876 | } |
876 | } | 877 | } |
877 | 878 | ||
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index d4d52f5a1cf7..688936044dc9 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -246,11 +246,21 @@ xmaddr_t arbitrary_virt_to_machine(void *vaddr) | |||
246 | { | 246 | { |
247 | unsigned long address = (unsigned long)vaddr; | 247 | unsigned long address = (unsigned long)vaddr; |
248 | unsigned int level; | 248 | unsigned int level; |
249 | pte_t *pte = lookup_address(address, &level); | 249 | pte_t *pte; |
250 | unsigned offset = address & ~PAGE_MASK; | 250 | unsigned offset; |
251 | 251 | ||
252 | BUG_ON(pte == NULL); | 252 | /* |
253 | * if the PFN is in the linear mapped vaddr range, we can just use | ||
254 | * the (quick) virt_to_machine() p2m lookup | ||
255 | */ | ||
256 | if (virt_addr_valid(vaddr)) | ||
257 | return virt_to_machine(vaddr); | ||
253 | 258 | ||
259 | /* otherwise we have to do a (slower) full page-table walk */ | ||
260 | |||
261 | pte = lookup_address(address, &level); | ||
262 | BUG_ON(pte == NULL); | ||
263 | offset = address & ~PAGE_MASK; | ||
254 | return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset); | 264 | return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset); |
255 | } | 265 | } |
256 | 266 | ||
@@ -410,7 +420,7 @@ void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, | |||
410 | 420 | ||
411 | xen_mc_batch(); | 421 | xen_mc_batch(); |
412 | 422 | ||
413 | u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD; | 423 | u.ptr = arbitrary_virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD; |
414 | u.val = pte_val_ma(pte); | 424 | u.val = pte_val_ma(pte); |
415 | xen_extend_mmu_update(&u); | 425 | xen_extend_mmu_update(&u); |
416 | 426 | ||
@@ -840,13 +850,16 @@ static int xen_pin_page(struct mm_struct *mm, struct page *page, | |||
840 | read-only, and can be pinned. */ | 850 | read-only, and can be pinned. */ |
841 | static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd) | 851 | static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd) |
842 | { | 852 | { |
853 | vm_unmap_aliases(); | ||
854 | |||
843 | xen_mc_batch(); | 855 | xen_mc_batch(); |
844 | 856 | ||
845 | if (xen_pgd_walk(mm, xen_pin_page, USER_LIMIT)) { | 857 | if (xen_pgd_walk(mm, xen_pin_page, USER_LIMIT)) { |
846 | /* re-enable interrupts for kmap_flush_unused */ | 858 | /* re-enable interrupts for flushing */ |
847 | xen_mc_issue(0); | 859 | xen_mc_issue(0); |
860 | |||
848 | kmap_flush_unused(); | 861 | kmap_flush_unused(); |
849 | vm_unmap_aliases(); | 862 | |
850 | xen_mc_batch(); | 863 | xen_mc_batch(); |
851 | } | 864 | } |
852 | 865 | ||
@@ -864,7 +877,7 @@ static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd) | |||
864 | #else /* CONFIG_X86_32 */ | 877 | #else /* CONFIG_X86_32 */ |
865 | #ifdef CONFIG_X86_PAE | 878 | #ifdef CONFIG_X86_PAE |
866 | /* Need to make sure unshared kernel PMD is pinnable */ | 879 | /* Need to make sure unshared kernel PMD is pinnable */ |
867 | xen_pin_page(mm, virt_to_page(pgd_page(pgd[pgd_index(TASK_SIZE)])), | 880 | xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]), |
868 | PT_PMD); | 881 | PT_PMD); |
869 | #endif | 882 | #endif |
870 | xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd))); | 883 | xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd))); |
@@ -981,7 +994,7 @@ static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd) | |||
981 | 994 | ||
982 | #ifdef CONFIG_X86_PAE | 995 | #ifdef CONFIG_X86_PAE |
983 | /* Need to make sure unshared kernel PMD is unpinned */ | 996 | /* Need to make sure unshared kernel PMD is unpinned */ |
984 | xen_unpin_page(mm, virt_to_page(pgd_page(pgd[pgd_index(TASK_SIZE)])), | 997 | xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]), |
985 | PT_PMD); | 998 | PT_PMD); |
986 | #endif | 999 | #endif |
987 | 1000 | ||