diff options
author | Paul Mundt <lethal@linux-sh.org> | 2008-10-31 03:18:02 -0400 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2008-10-31 03:18:02 -0400 |
commit | 0803d540db06f53acd2fabf1347d5b665218f14a (patch) | |
tree | f5f8e5510a76bb59d3583478961daf5afc32df08 /arch/x86 | |
parent | a8884e3415c29c58a5875d54c109c4a7fcaa6fb4 (diff) | |
parent | 721d5dfe7e516954c501d5e9d0dfab379cf4241a (diff) |
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'arch/x86')
43 files changed, 252 insertions, 141 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 350bee1d54dc..6f20718d3156 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -28,7 +28,7 @@ config X86 | |||
28 | select HAVE_KRETPROBES | 28 | select HAVE_KRETPROBES |
29 | select HAVE_FTRACE_MCOUNT_RECORD | 29 | select HAVE_FTRACE_MCOUNT_RECORD |
30 | select HAVE_DYNAMIC_FTRACE | 30 | select HAVE_DYNAMIC_FTRACE |
31 | select HAVE_FTRACE | 31 | select HAVE_FUNCTION_TRACER |
32 | select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64) | 32 | select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64) |
33 | select HAVE_ARCH_KGDB if !X86_VOYAGER | 33 | select HAVE_ARCH_KGDB if !X86_VOYAGER |
34 | select HAVE_ARCH_TRACEHOOK | 34 | select HAVE_ARCH_TRACEHOOK |
@@ -231,6 +231,10 @@ config SMP | |||
231 | 231 | ||
232 | If you don't know what to do here, say N. | 232 | If you don't know what to do here, say N. |
233 | 233 | ||
234 | config X86_HAS_BOOT_CPU_ID | ||
235 | def_bool y | ||
236 | depends on X86_VOYAGER | ||
237 | |||
234 | config X86_FIND_SMP_CONFIG | 238 | config X86_FIND_SMP_CONFIG |
235 | def_bool y | 239 | def_bool y |
236 | depends on X86_MPPARSE || X86_VOYAGER | 240 | depends on X86_MPPARSE || X86_VOYAGER |
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu index 0b7c4a3f0651..b815664fe370 100644 --- a/arch/x86/Kconfig.cpu +++ b/arch/x86/Kconfig.cpu | |||
@@ -513,19 +513,19 @@ config CPU_SUP_UMC_32 | |||
513 | If unsure, say N. | 513 | If unsure, say N. |
514 | 514 | ||
515 | config X86_DS | 515 | config X86_DS |
516 | bool "Debug Store support" | 516 | def_bool X86_PTRACE_BTS |
517 | default y | 517 | depends on X86_DEBUGCTLMSR |
518 | help | ||
519 | Add support for Debug Store. | ||
520 | This allows the kernel to provide a memory buffer to the hardware | ||
521 | to store various profiling and tracing events. | ||
522 | 518 | ||
523 | config X86_PTRACE_BTS | 519 | config X86_PTRACE_BTS |
524 | bool "ptrace interface to Branch Trace Store" | 520 | bool "Branch Trace Store" |
525 | default y | 521 | default y |
526 | depends on (X86_DS && X86_DEBUGCTLMSR) | 522 | depends on X86_DEBUGCTLMSR |
527 | help | 523 | help |
528 | Add a ptrace interface to allow collecting an execution trace | 524 | This adds a ptrace interface to the hardware's branch trace store. |
529 | of the traced task. | 525 | |
530 | This collects control flow changes in a (cyclic) buffer and allows | 526 | Debuggers may use it to collect an execution trace of the debugged |
531 | debuggers to fill in the gaps and show an execution trace of the debuggee. | 527 | application in order to answer the question 'how did I get here?'. |
528 | Debuggers may trace user mode as well as kernel mode. | ||
529 | |||
530 | Say Y unless there is no application development on this machine | ||
531 | and you want to save a small amount of code size. | ||
diff --git a/arch/x86/boot/compressed/.gitignore b/arch/x86/boot/compressed/.gitignore index be0ed065249b..63eff3b04d01 100644 --- a/arch/x86/boot/compressed/.gitignore +++ b/arch/x86/boot/compressed/.gitignore | |||
@@ -1 +1,3 @@ | |||
1 | relocs | 1 | relocs |
2 | vmlinux.bin.all | ||
3 | vmlinux.relocs | ||
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index 4a5397bfce27..7f225a4b2a26 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h | |||
@@ -255,9 +255,11 @@ static inline unsigned long dma_alloc_coherent_mask(struct device *dev, | |||
255 | 255 | ||
256 | static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp) | 256 | static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp) |
257 | { | 257 | { |
258 | #ifdef CONFIG_X86_64 | ||
259 | unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp); | 258 | unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp); |
260 | 259 | ||
260 | if (dma_mask <= DMA_24BIT_MASK) | ||
261 | gfp |= GFP_DMA; | ||
262 | #ifdef CONFIG_X86_64 | ||
261 | if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA)) | 263 | if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA)) |
262 | gfp |= GFP_DMA32; | 264 | gfp |= GFP_DMA32; |
263 | #endif | 265 | #endif |
diff --git a/arch/x86/include/asm/es7000/wakecpu.h b/arch/x86/include/asm/es7000/wakecpu.h index 3ffc5a7bf667..398493461913 100644 --- a/arch/x86/include/asm/es7000/wakecpu.h +++ b/arch/x86/include/asm/es7000/wakecpu.h | |||
@@ -50,10 +50,9 @@ static inline void restore_NMI_vector(unsigned short *high, unsigned short *low) | |||
50 | { | 50 | { |
51 | } | 51 | } |
52 | 52 | ||
53 | #if APIC_DEBUG | 53 | #define inquire_remote_apic(apicid) do { \ |
54 | #define inquire_remote_apic(apicid) __inquire_remote_apic(apicid) | 54 | if (apic_verbosity >= APIC_DEBUG) \ |
55 | #else | 55 | __inquire_remote_apic(apicid); \ |
56 | #define inquire_remote_apic(apicid) {} | 56 | } while (0) |
57 | #endif | ||
58 | 57 | ||
59 | #endif /* __ASM_MACH_WAKECPU_H */ | 58 | #endif /* __ASM_MACH_WAKECPU_H */ |
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h index 47f7e65e6c1d..9e8bc29b8b17 100644 --- a/arch/x86/include/asm/ftrace.h +++ b/arch/x86/include/asm/ftrace.h | |||
@@ -1,7 +1,7 @@ | |||
1 | #ifndef _ASM_X86_FTRACE_H | 1 | #ifndef _ASM_X86_FTRACE_H |
2 | #define _ASM_X86_FTRACE_H | 2 | #define _ASM_X86_FTRACE_H |
3 | 3 | ||
4 | #ifdef CONFIG_FTRACE | 4 | #ifdef CONFIG_FUNCTION_TRACER |
5 | #define MCOUNT_ADDR ((long)(mcount)) | 5 | #define MCOUNT_ADDR ((long)(mcount)) |
6 | #define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */ | 6 | #define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */ |
7 | 7 | ||
@@ -19,6 +19,6 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr) | |||
19 | } | 19 | } |
20 | #endif | 20 | #endif |
21 | 21 | ||
22 | #endif /* CONFIG_FTRACE */ | 22 | #endif /* CONFIG_FUNCTION_TRACER */ |
23 | 23 | ||
24 | #endif /* _ASM_X86_FTRACE_H */ | 24 | #endif /* _ASM_X86_FTRACE_H */ |
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index 5618a103f395..ac2abc88cd95 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h | |||
@@ -82,9 +82,9 @@ extern void __iomem *ioremap_wc(unsigned long offset, unsigned long size); | |||
82 | extern void early_ioremap_init(void); | 82 | extern void early_ioremap_init(void); |
83 | extern void early_ioremap_clear(void); | 83 | extern void early_ioremap_clear(void); |
84 | extern void early_ioremap_reset(void); | 84 | extern void early_ioremap_reset(void); |
85 | extern void *early_ioremap(unsigned long offset, unsigned long size); | 85 | extern void __iomem *early_ioremap(unsigned long offset, unsigned long size); |
86 | extern void *early_memremap(unsigned long offset, unsigned long size); | 86 | extern void __iomem *early_memremap(unsigned long offset, unsigned long size); |
87 | extern void early_iounmap(void *addr, unsigned long size); | 87 | extern void early_iounmap(void __iomem *addr, unsigned long size); |
88 | extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys); | 88 | extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys); |
89 | 89 | ||
90 | 90 | ||
diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h index 98e28ea8cd16..e4a552d44465 100644 --- a/arch/x86/include/asm/iommu.h +++ b/arch/x86/include/asm/iommu.h | |||
@@ -7,7 +7,6 @@ extern struct dma_mapping_ops nommu_dma_ops; | |||
7 | extern int force_iommu, no_iommu; | 7 | extern int force_iommu, no_iommu; |
8 | extern int iommu_detected; | 8 | extern int iommu_detected; |
9 | extern int dmar_disabled; | 9 | extern int dmar_disabled; |
10 | extern int forbid_dac; | ||
11 | 10 | ||
12 | extern unsigned long iommu_nr_pages(unsigned long addr, unsigned long len); | 11 | extern unsigned long iommu_nr_pages(unsigned long addr, unsigned long len); |
13 | 12 | ||
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 65679d006337..8346be87cfa1 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -364,6 +364,9 @@ struct kvm_arch{ | |||
364 | 364 | ||
365 | struct page *ept_identity_pagetable; | 365 | struct page *ept_identity_pagetable; |
366 | bool ept_identity_pagetable_done; | 366 | bool ept_identity_pagetable_done; |
367 | |||
368 | unsigned long irq_sources_bitmap; | ||
369 | unsigned long irq_states[KVM_IOAPIC_NUM_PINS]; | ||
367 | }; | 370 | }; |
368 | 371 | ||
369 | struct kvm_vm_stat { | 372 | struct kvm_vm_stat { |
diff --git a/arch/x86/include/asm/mach-default/mach_wakecpu.h b/arch/x86/include/asm/mach-default/mach_wakecpu.h index d5c0b826a4ff..9d80db91e992 100644 --- a/arch/x86/include/asm/mach-default/mach_wakecpu.h +++ b/arch/x86/include/asm/mach-default/mach_wakecpu.h | |||
@@ -33,10 +33,9 @@ static inline void restore_NMI_vector(unsigned short *high, unsigned short *low) | |||
33 | { | 33 | { |
34 | } | 34 | } |
35 | 35 | ||
36 | #if APIC_DEBUG | 36 | #define inquire_remote_apic(apicid) do { \ |
37 | #define inquire_remote_apic(apicid) __inquire_remote_apic(apicid) | 37 | if (apic_verbosity >= APIC_DEBUG) \ |
38 | #else | 38 | __inquire_remote_apic(apicid); \ |
39 | #define inquire_remote_apic(apicid) {} | 39 | } while (0) |
40 | #endif | ||
41 | 40 | ||
42 | #endif /* _ASM_X86_MACH_DEFAULT_MACH_WAKECPU_H */ | 41 | #endif /* _ASM_X86_MACH_DEFAULT_MACH_WAKECPU_H */ |
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h index fb16cec702e4..52597aeadfff 100644 --- a/arch/x86/include/asm/pgtable-3level.h +++ b/arch/x86/include/asm/pgtable-3level.h | |||
@@ -120,13 +120,13 @@ static inline void pud_clear(pud_t *pudp) | |||
120 | write_cr3(pgd); | 120 | write_cr3(pgd); |
121 | } | 121 | } |
122 | 122 | ||
123 | #define pud_page(pud) ((struct page *) __va(pud_val(pud) & PTE_PFN_MASK)) | 123 | #define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT) |
124 | 124 | ||
125 | #define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PTE_PFN_MASK)) | 125 | #define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PTE_PFN_MASK)) |
126 | 126 | ||
127 | 127 | ||
128 | /* Find an entry in the second-level page table.. */ | 128 | /* Find an entry in the second-level page table.. */ |
129 | #define pmd_offset(pud, address) ((pmd_t *)pud_page(*(pud)) + \ | 129 | #define pmd_offset(pud, address) ((pmd_t *)pud_page_vaddr(*(pud)) + \ |
130 | pmd_index(address)) | 130 | pmd_index(address)) |
131 | 131 | ||
132 | #ifdef CONFIG_SMP | 132 | #ifdef CONFIG_SMP |
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index 2766021aef80..d12811ce51d9 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h | |||
@@ -225,5 +225,11 @@ static inline int hard_smp_processor_id(void) | |||
225 | 225 | ||
226 | #endif /* CONFIG_X86_LOCAL_APIC */ | 226 | #endif /* CONFIG_X86_LOCAL_APIC */ |
227 | 227 | ||
228 | #ifdef CONFIG_X86_HAS_BOOT_CPU_ID | ||
229 | extern unsigned char boot_cpu_id; | ||
230 | #else | ||
231 | #define boot_cpu_id 0 | ||
232 | #endif | ||
233 | |||
228 | #endif /* __ASSEMBLY__ */ | 234 | #endif /* __ASSEMBLY__ */ |
229 | #endif /* _ASM_X86_SMP_H */ | 235 | #endif /* _ASM_X86_SMP_H */ |
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h index c6ad93e315c8..7a5782610b2b 100644 --- a/arch/x86/include/asm/uv/uv_hub.h +++ b/arch/x86/include/asm/uv/uv_hub.h | |||
@@ -13,6 +13,7 @@ | |||
13 | 13 | ||
14 | #include <linux/numa.h> | 14 | #include <linux/numa.h> |
15 | #include <linux/percpu.h> | 15 | #include <linux/percpu.h> |
16 | #include <linux/timer.h> | ||
16 | #include <asm/types.h> | 17 | #include <asm/types.h> |
17 | #include <asm/percpu.h> | 18 | #include <asm/percpu.h> |
18 | 19 | ||
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index d7e5a58ee22f..e489ff9cb3e2 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
@@ -6,11 +6,12 @@ extra-y := head_$(BITS).o head$(BITS).o head.o init_task.o vmlinu | |||
6 | 6 | ||
7 | CPPFLAGS_vmlinux.lds += -U$(UTS_MACHINE) | 7 | CPPFLAGS_vmlinux.lds += -U$(UTS_MACHINE) |
8 | 8 | ||
9 | ifdef CONFIG_FTRACE | 9 | ifdef CONFIG_FUNCTION_TRACER |
10 | # Do not profile debug and lowlevel utilities | 10 | # Do not profile debug and lowlevel utilities |
11 | CFLAGS_REMOVE_tsc.o = -pg | 11 | CFLAGS_REMOVE_tsc.o = -pg |
12 | CFLAGS_REMOVE_rtc.o = -pg | 12 | CFLAGS_REMOVE_rtc.o = -pg |
13 | CFLAGS_REMOVE_paravirt-spinlocks.o = -pg | 13 | CFLAGS_REMOVE_paravirt-spinlocks.o = -pg |
14 | CFLAGS_REMOVE_ftrace.o = -pg | ||
14 | endif | 15 | endif |
15 | 16 | ||
16 | # | 17 | # |
diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c index 0d9c993aa93e..ef8f831af823 100644 --- a/arch/x86/kernel/cpu/addon_cpuid_features.c +++ b/arch/x86/kernel/cpu/addon_cpuid_features.c | |||
@@ -69,7 +69,7 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c) | |||
69 | */ | 69 | */ |
70 | void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c) | 70 | void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c) |
71 | { | 71 | { |
72 | #ifdef CONFIG_SMP | 72 | #ifdef CONFIG_X86_SMP |
73 | unsigned int eax, ebx, ecx, edx, sub_index; | 73 | unsigned int eax, ebx, ecx, edx, sub_index; |
74 | unsigned int ht_mask_width, core_plus_mask_width; | 74 | unsigned int ht_mask_width, core_plus_mask_width; |
75 | unsigned int core_select_mask, core_level_siblings; | 75 | unsigned int core_select_mask, core_level_siblings; |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 25581dcb280e..003a65395bd5 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -549,6 +549,10 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) | |||
549 | this_cpu->c_early_init(c); | 549 | this_cpu->c_early_init(c); |
550 | 550 | ||
551 | validate_pat_support(c); | 551 | validate_pat_support(c); |
552 | |||
553 | #ifdef CONFIG_SMP | ||
554 | c->cpu_index = boot_cpu_id; | ||
555 | #endif | ||
552 | } | 556 | } |
553 | 557 | ||
554 | void __init early_cpu_init(void) | 558 | void __init early_cpu_init(void) |
@@ -1134,7 +1138,7 @@ void __cpuinit cpu_init(void) | |||
1134 | /* | 1138 | /* |
1135 | * Boot processor to setup the FP and extended state context info. | 1139 | * Boot processor to setup the FP and extended state context info. |
1136 | */ | 1140 | */ |
1137 | if (!smp_processor_id()) | 1141 | if (smp_processor_id() == boot_cpu_id) |
1138 | init_thread_xstate(); | 1142 | init_thread_xstate(); |
1139 | 1143 | ||
1140 | xsave_init(); | 1144 | xsave_init(); |
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index dd65143941a8..28b597ef9ca1 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S | |||
@@ -1149,7 +1149,7 @@ ENDPROC(xen_failsafe_callback) | |||
1149 | 1149 | ||
1150 | #endif /* CONFIG_XEN */ | 1150 | #endif /* CONFIG_XEN */ |
1151 | 1151 | ||
1152 | #ifdef CONFIG_FTRACE | 1152 | #ifdef CONFIG_FUNCTION_TRACER |
1153 | #ifdef CONFIG_DYNAMIC_FTRACE | 1153 | #ifdef CONFIG_DYNAMIC_FTRACE |
1154 | 1154 | ||
1155 | ENTRY(mcount) | 1155 | ENTRY(mcount) |
@@ -1204,7 +1204,7 @@ trace: | |||
1204 | jmp ftrace_stub | 1204 | jmp ftrace_stub |
1205 | END(mcount) | 1205 | END(mcount) |
1206 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 1206 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
1207 | #endif /* CONFIG_FTRACE */ | 1207 | #endif /* CONFIG_FUNCTION_TRACER */ |
1208 | 1208 | ||
1209 | .section .rodata,"a" | 1209 | .section .rodata,"a" |
1210 | #include "syscall_table_32.S" | 1210 | #include "syscall_table_32.S" |
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 09e7145484c5..b86f332c96a6 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -61,7 +61,7 @@ | |||
61 | 61 | ||
62 | .code64 | 62 | .code64 |
63 | 63 | ||
64 | #ifdef CONFIG_FTRACE | 64 | #ifdef CONFIG_FUNCTION_TRACER |
65 | #ifdef CONFIG_DYNAMIC_FTRACE | 65 | #ifdef CONFIG_DYNAMIC_FTRACE |
66 | ENTRY(mcount) | 66 | ENTRY(mcount) |
67 | retq | 67 | retq |
@@ -138,7 +138,7 @@ trace: | |||
138 | jmp ftrace_stub | 138 | jmp ftrace_stub |
139 | END(mcount) | 139 | END(mcount) |
140 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 140 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
141 | #endif /* CONFIG_FTRACE */ | 141 | #endif /* CONFIG_FUNCTION_TRACER */ |
142 | 142 | ||
143 | #ifndef CONFIG_PREEMPT | 143 | #ifndef CONFIG_PREEMPT |
144 | #define retint_kernel retint_restore_args | 144 | #define retint_kernel retint_restore_args |
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index d073d981a730..50ea0ac8c9bf 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c | |||
@@ -21,8 +21,7 @@ | |||
21 | #include <asm/nops.h> | 21 | #include <asm/nops.h> |
22 | 22 | ||
23 | 23 | ||
24 | /* Long is fine, even if it is only 4 bytes ;-) */ | 24 | static unsigned char ftrace_nop[MCOUNT_INSN_SIZE]; |
25 | static unsigned long *ftrace_nop; | ||
26 | 25 | ||
27 | union ftrace_code_union { | 26 | union ftrace_code_union { |
28 | char code[MCOUNT_INSN_SIZE]; | 27 | char code[MCOUNT_INSN_SIZE]; |
@@ -33,17 +32,17 @@ union ftrace_code_union { | |||
33 | }; | 32 | }; |
34 | 33 | ||
35 | 34 | ||
36 | static int notrace ftrace_calc_offset(long ip, long addr) | 35 | static int ftrace_calc_offset(long ip, long addr) |
37 | { | 36 | { |
38 | return (int)(addr - ip); | 37 | return (int)(addr - ip); |
39 | } | 38 | } |
40 | 39 | ||
41 | notrace unsigned char *ftrace_nop_replace(void) | 40 | unsigned char *ftrace_nop_replace(void) |
42 | { | 41 | { |
43 | return (char *)ftrace_nop; | 42 | return ftrace_nop; |
44 | } | 43 | } |
45 | 44 | ||
46 | notrace unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) | 45 | unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) |
47 | { | 46 | { |
48 | static union ftrace_code_union calc; | 47 | static union ftrace_code_union calc; |
49 | 48 | ||
@@ -57,7 +56,7 @@ notrace unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) | |||
57 | return calc.code; | 56 | return calc.code; |
58 | } | 57 | } |
59 | 58 | ||
60 | notrace int | 59 | int |
61 | ftrace_modify_code(unsigned long ip, unsigned char *old_code, | 60 | ftrace_modify_code(unsigned long ip, unsigned char *old_code, |
62 | unsigned char *new_code) | 61 | unsigned char *new_code) |
63 | { | 62 | { |
@@ -66,26 +65,31 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code, | |||
66 | /* | 65 | /* |
67 | * Note: Due to modules and __init, code can | 66 | * Note: Due to modules and __init, code can |
68 | * disappear and change, we need to protect against faulting | 67 | * disappear and change, we need to protect against faulting |
69 | * as well as code changing. | 68 | * as well as code changing. We do this by using the |
69 | * probe_kernel_* functions. | ||
70 | * | 70 | * |
71 | * No real locking needed, this code is run through | 71 | * No real locking needed, this code is run through |
72 | * kstop_machine, or before SMP starts. | 72 | * kstop_machine, or before SMP starts. |
73 | */ | 73 | */ |
74 | if (__copy_from_user_inatomic(replaced, (char __user *)ip, MCOUNT_INSN_SIZE)) | ||
75 | return 1; | ||
76 | 74 | ||
75 | /* read the text we want to modify */ | ||
76 | if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE)) | ||
77 | return -EFAULT; | ||
78 | |||
79 | /* Make sure it is what we expect it to be */ | ||
77 | if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0) | 80 | if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0) |
78 | return 2; | 81 | return -EINVAL; |
79 | 82 | ||
80 | WARN_ON_ONCE(__copy_to_user_inatomic((char __user *)ip, new_code, | 83 | /* replace the text with the new text */ |
81 | MCOUNT_INSN_SIZE)); | 84 | if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE)) |
85 | return -EPERM; | ||
82 | 86 | ||
83 | sync_core(); | 87 | sync_core(); |
84 | 88 | ||
85 | return 0; | 89 | return 0; |
86 | } | 90 | } |
87 | 91 | ||
88 | notrace int ftrace_update_ftrace_func(ftrace_func_t func) | 92 | int ftrace_update_ftrace_func(ftrace_func_t func) |
89 | { | 93 | { |
90 | unsigned long ip = (unsigned long)(&ftrace_call); | 94 | unsigned long ip = (unsigned long)(&ftrace_call); |
91 | unsigned char old[MCOUNT_INSN_SIZE], *new; | 95 | unsigned char old[MCOUNT_INSN_SIZE], *new; |
@@ -98,13 +102,6 @@ notrace int ftrace_update_ftrace_func(ftrace_func_t func) | |||
98 | return ret; | 102 | return ret; |
99 | } | 103 | } |
100 | 104 | ||
101 | notrace int ftrace_mcount_set(unsigned long *data) | ||
102 | { | ||
103 | /* mcount is initialized as a nop */ | ||
104 | *data = 0; | ||
105 | return 0; | ||
106 | } | ||
107 | |||
108 | int __init ftrace_dyn_arch_init(void *data) | 105 | int __init ftrace_dyn_arch_init(void *data) |
109 | { | 106 | { |
110 | extern const unsigned char ftrace_test_p6nop[]; | 107 | extern const unsigned char ftrace_test_p6nop[]; |
@@ -127,9 +124,6 @@ int __init ftrace_dyn_arch_init(void *data) | |||
127 | * TODO: check the cpuid to determine the best nop. | 124 | * TODO: check the cpuid to determine the best nop. |
128 | */ | 125 | */ |
129 | asm volatile ( | 126 | asm volatile ( |
130 | "jmp ftrace_test_jmp\n" | ||
131 | /* This code needs to stay around */ | ||
132 | ".section .text, \"ax\"\n" | ||
133 | "ftrace_test_jmp:" | 127 | "ftrace_test_jmp:" |
134 | "jmp ftrace_test_p6nop\n" | 128 | "jmp ftrace_test_p6nop\n" |
135 | "nop\n" | 129 | "nop\n" |
@@ -140,8 +134,6 @@ int __init ftrace_dyn_arch_init(void *data) | |||
140 | "jmp 1f\n" | 134 | "jmp 1f\n" |
141 | "ftrace_test_nop5:" | 135 | "ftrace_test_nop5:" |
142 | ".byte 0x66,0x66,0x66,0x66,0x90\n" | 136 | ".byte 0x66,0x66,0x66,0x66,0x90\n" |
143 | "jmp 1f\n" | ||
144 | ".previous\n" | ||
145 | "1:" | 137 | "1:" |
146 | ".section .fixup, \"ax\"\n" | 138 | ".section .fixup, \"ax\"\n" |
147 | "2: movl $1, %0\n" | 139 | "2: movl $1, %0\n" |
@@ -156,15 +148,15 @@ int __init ftrace_dyn_arch_init(void *data) | |||
156 | switch (faulted) { | 148 | switch (faulted) { |
157 | case 0: | 149 | case 0: |
158 | pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n"); | 150 | pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n"); |
159 | ftrace_nop = (unsigned long *)ftrace_test_p6nop; | 151 | memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE); |
160 | break; | 152 | break; |
161 | case 1: | 153 | case 1: |
162 | pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n"); | 154 | pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n"); |
163 | ftrace_nop = (unsigned long *)ftrace_test_nop5; | 155 | memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE); |
164 | break; | 156 | break; |
165 | case 2: | 157 | case 2: |
166 | pr_info("ftrace: converting mcount calls to jmp . + 5\n"); | 158 | pr_info("ftrace: converting mcount calls to jmp . + 5\n"); |
167 | ftrace_nop = (unsigned long *)ftrace_test_jmp; | 159 | memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE); |
168 | break; | 160 | break; |
169 | } | 161 | } |
170 | 162 | ||
diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c index 680a06557c5e..2c7dbdb98278 100644 --- a/arch/x86/kernel/genx2apic_uv_x.c +++ b/arch/x86/kernel/genx2apic_uv_x.c | |||
@@ -15,7 +15,6 @@ | |||
15 | #include <linux/ctype.h> | 15 | #include <linux/ctype.h> |
16 | #include <linux/init.h> | 16 | #include <linux/init.h> |
17 | #include <linux/sched.h> | 17 | #include <linux/sched.h> |
18 | #include <linux/bootmem.h> | ||
19 | #include <linux/module.h> | 18 | #include <linux/module.h> |
20 | #include <linux/hardirq.h> | 19 | #include <linux/hardirq.h> |
21 | #include <asm/smp.h> | 20 | #include <asm/smp.h> |
@@ -398,16 +397,16 @@ void __init uv_system_init(void) | |||
398 | printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades()); | 397 | printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades()); |
399 | 398 | ||
400 | bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades(); | 399 | bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades(); |
401 | uv_blade_info = alloc_bootmem_pages(bytes); | 400 | uv_blade_info = kmalloc(bytes, GFP_KERNEL); |
402 | 401 | ||
403 | get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size); | 402 | get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size); |
404 | 403 | ||
405 | bytes = sizeof(uv_node_to_blade[0]) * num_possible_nodes(); | 404 | bytes = sizeof(uv_node_to_blade[0]) * num_possible_nodes(); |
406 | uv_node_to_blade = alloc_bootmem_pages(bytes); | 405 | uv_node_to_blade = kmalloc(bytes, GFP_KERNEL); |
407 | memset(uv_node_to_blade, 255, bytes); | 406 | memset(uv_node_to_blade, 255, bytes); |
408 | 407 | ||
409 | bytes = sizeof(uv_cpu_to_blade[0]) * num_possible_cpus(); | 408 | bytes = sizeof(uv_cpu_to_blade[0]) * num_possible_cpus(); |
410 | uv_cpu_to_blade = alloc_bootmem_pages(bytes); | 409 | uv_cpu_to_blade = kmalloc(bytes, GFP_KERNEL); |
411 | memset(uv_cpu_to_blade, 255, bytes); | 410 | memset(uv_cpu_to_blade, 255, bytes); |
412 | 411 | ||
413 | blade = 0; | 412 | blade = 0; |
diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c index dd7ebee446af..43cec6bdda63 100644 --- a/arch/x86/kernel/i386_ksyms_32.c +++ b/arch/x86/kernel/i386_ksyms_32.c | |||
@@ -5,7 +5,7 @@ | |||
5 | #include <asm/desc.h> | 5 | #include <asm/desc.h> |
6 | #include <asm/ftrace.h> | 6 | #include <asm/ftrace.h> |
7 | 7 | ||
8 | #ifdef CONFIG_FTRACE | 8 | #ifdef CONFIG_FUNCTION_TRACER |
9 | /* mcount is defined in assembly */ | 9 | /* mcount is defined in assembly */ |
10 | EXPORT_SYMBOL(mcount); | 10 | EXPORT_SYMBOL(mcount); |
11 | #endif | 11 | #endif |
diff --git a/arch/x86/kernel/k8.c b/arch/x86/kernel/k8.c index 304d8bad6559..cbc4332a77b2 100644 --- a/arch/x86/kernel/k8.c +++ b/arch/x86/kernel/k8.c | |||
@@ -18,7 +18,6 @@ static u32 *flush_words; | |||
18 | struct pci_device_id k8_nb_ids[] = { | 18 | struct pci_device_id k8_nb_ids[] = { |
19 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, | 19 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, |
20 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, | 20 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, |
21 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) }, | ||
22 | {} | 21 | {} |
23 | }; | 22 | }; |
24 | EXPORT_SYMBOL(k8_nb_ids); | 23 | EXPORT_SYMBOL(k8_nb_ids); |
diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c index 0732adba05ca..7a385746509a 100644 --- a/arch/x86/kernel/machine_kexec_32.c +++ b/arch/x86/kernel/machine_kexec_32.c | |||
@@ -162,7 +162,10 @@ void machine_kexec(struct kimage *image) | |||
162 | page_list[VA_PTE_0] = (unsigned long)kexec_pte0; | 162 | page_list[VA_PTE_0] = (unsigned long)kexec_pte0; |
163 | page_list[PA_PTE_1] = __pa(kexec_pte1); | 163 | page_list[PA_PTE_1] = __pa(kexec_pte1); |
164 | page_list[VA_PTE_1] = (unsigned long)kexec_pte1; | 164 | page_list[VA_PTE_1] = (unsigned long)kexec_pte1; |
165 | page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page) << PAGE_SHIFT); | 165 | |
166 | if (image->type == KEXEC_TYPE_DEFAULT) | ||
167 | page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page) | ||
168 | << PAGE_SHIFT); | ||
166 | 169 | ||
167 | /* The segment registers are funny things, they have both a | 170 | /* The segment registers are funny things, they have both a |
168 | * visible and an invisible part. Whenever the visible part is | 171 | * visible and an invisible part. Whenever the visible part is |
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c index 7a1f8eeac2c7..5f8e5d75a254 100644 --- a/arch/x86/kernel/microcode_amd.c +++ b/arch/x86/kernel/microcode_amd.c | |||
@@ -39,7 +39,7 @@ | |||
39 | #include <asm/microcode.h> | 39 | #include <asm/microcode.h> |
40 | 40 | ||
41 | MODULE_DESCRIPTION("AMD Microcode Update Driver"); | 41 | MODULE_DESCRIPTION("AMD Microcode Update Driver"); |
42 | MODULE_AUTHOR("Peter Oruba <peter.oruba@amd.com>"); | 42 | MODULE_AUTHOR("Peter Oruba"); |
43 | MODULE_LICENSE("GPL v2"); | 43 | MODULE_LICENSE("GPL v2"); |
44 | 44 | ||
45 | #define UCODE_MAGIC 0x00414d44 | 45 | #define UCODE_MAGIC 0x00414d44 |
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c index 936d8d55f230..82fb2809ce32 100644 --- a/arch/x86/kernel/microcode_core.c +++ b/arch/x86/kernel/microcode_core.c | |||
@@ -480,8 +480,8 @@ static int __init microcode_init(void) | |||
480 | 480 | ||
481 | printk(KERN_INFO | 481 | printk(KERN_INFO |
482 | "Microcode Update Driver: v" MICROCODE_VERSION | 482 | "Microcode Update Driver: v" MICROCODE_VERSION |
483 | " <tigran@aivazian.fsnet.co.uk>" | 483 | " <tigran@aivazian.fsnet.co.uk>," |
484 | " <peter.oruba@amd.com>\n"); | 484 | " Peter Oruba\n"); |
485 | 485 | ||
486 | return 0; | 486 | return 0; |
487 | } | 487 | } |
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index 1972266e8ba5..192624820217 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c | |||
@@ -9,6 +9,8 @@ | |||
9 | #include <asm/calgary.h> | 9 | #include <asm/calgary.h> |
10 | #include <asm/amd_iommu.h> | 10 | #include <asm/amd_iommu.h> |
11 | 11 | ||
12 | static int forbid_dac __read_mostly; | ||
13 | |||
12 | struct dma_mapping_ops *dma_ops; | 14 | struct dma_mapping_ops *dma_ops; |
13 | EXPORT_SYMBOL(dma_ops); | 15 | EXPORT_SYMBOL(dma_ops); |
14 | 16 | ||
@@ -291,3 +293,17 @@ void pci_iommu_shutdown(void) | |||
291 | } | 293 | } |
292 | /* Must execute after PCI subsystem */ | 294 | /* Must execute after PCI subsystem */ |
293 | fs_initcall(pci_iommu_init); | 295 | fs_initcall(pci_iommu_init); |
296 | |||
297 | #ifdef CONFIG_PCI | ||
298 | /* Many VIA bridges seem to corrupt data for DAC. Disable it here */ | ||
299 | |||
300 | static __devinit void via_no_dac(struct pci_dev *dev) | ||
301 | { | ||
302 | if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) { | ||
303 | printk(KERN_INFO "PCI: VIA PCI bridge detected." | ||
304 | "Disabling DAC.\n"); | ||
305 | forbid_dac = 1; | ||
306 | } | ||
307 | } | ||
308 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac); | ||
309 | #endif | ||
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index e3f75bbcedea..a42b02b4df68 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c | |||
@@ -744,7 +744,7 @@ void __init gart_iommu_init(void) | |||
744 | long i; | 744 | long i; |
745 | 745 | ||
746 | if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0) { | 746 | if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0) { |
747 | printk(KERN_INFO "PCI-GART: No AMD northbridge found.\n"); | 747 | printk(KERN_INFO "PCI-GART: No AMD GART found.\n"); |
748 | return; | 748 | return; |
749 | } | 749 | } |
750 | 750 | ||
diff --git a/arch/x86/kernel/pci-swiotlb_64.c b/arch/x86/kernel/pci-swiotlb_64.c index c4ce0332759e..3c539d111abb 100644 --- a/arch/x86/kernel/pci-swiotlb_64.c +++ b/arch/x86/kernel/pci-swiotlb_64.c | |||
@@ -18,9 +18,21 @@ swiotlb_map_single_phys(struct device *hwdev, phys_addr_t paddr, size_t size, | |||
18 | return swiotlb_map_single(hwdev, phys_to_virt(paddr), size, direction); | 18 | return swiotlb_map_single(hwdev, phys_to_virt(paddr), size, direction); |
19 | } | 19 | } |
20 | 20 | ||
21 | static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, | ||
22 | dma_addr_t *dma_handle, gfp_t flags) | ||
23 | { | ||
24 | void *vaddr; | ||
25 | |||
26 | vaddr = dma_generic_alloc_coherent(hwdev, size, dma_handle, flags); | ||
27 | if (vaddr) | ||
28 | return vaddr; | ||
29 | |||
30 | return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags); | ||
31 | } | ||
32 | |||
21 | struct dma_mapping_ops swiotlb_dma_ops = { | 33 | struct dma_mapping_ops swiotlb_dma_ops = { |
22 | .mapping_error = swiotlb_dma_mapping_error, | 34 | .mapping_error = swiotlb_dma_mapping_error, |
23 | .alloc_coherent = swiotlb_alloc_coherent, | 35 | .alloc_coherent = x86_swiotlb_alloc_coherent, |
24 | .free_coherent = swiotlb_free_coherent, | 36 | .free_coherent = swiotlb_free_coherent, |
25 | .map_single = swiotlb_map_single_phys, | 37 | .map_single = swiotlb_map_single_phys, |
26 | .unmap_single = swiotlb_unmap_single, | 38 | .unmap_single = swiotlb_unmap_single, |
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 161bb850fc47..62348e4fd8d1 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c | |||
@@ -759,7 +759,7 @@ __cpuinit int unsynchronized_tsc(void) | |||
759 | if (!cpu_has_tsc || tsc_unstable) | 759 | if (!cpu_has_tsc || tsc_unstable) |
760 | return 1; | 760 | return 1; |
761 | 761 | ||
762 | #ifdef CONFIG_SMP | 762 | #ifdef CONFIG_X86_SMP |
763 | if (apic_is_clustered_box()) | 763 | if (apic_is_clustered_box()) |
764 | return 1; | 764 | return 1; |
765 | #endif | 765 | #endif |
diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c index 7766d36983fc..a688f3bfaec2 100644 --- a/arch/x86/kernel/vsmp_64.c +++ b/arch/x86/kernel/vsmp_64.c | |||
@@ -78,7 +78,7 @@ static unsigned __init_or_module vsmp_patch(u8 type, u16 clobbers, void *ibuf, | |||
78 | 78 | ||
79 | static void __init set_vsmp_pv_ops(void) | 79 | static void __init set_vsmp_pv_ops(void) |
80 | { | 80 | { |
81 | void *address; | 81 | void __iomem *address; |
82 | unsigned int cap, ctl, cfg; | 82 | unsigned int cap, ctl, cfg; |
83 | 83 | ||
84 | /* set vSMP magic bits to indicate vSMP capable kernel */ | 84 | /* set vSMP magic bits to indicate vSMP capable kernel */ |
diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c index b545f371b5f5..695e426aa354 100644 --- a/arch/x86/kernel/x8664_ksyms_64.c +++ b/arch/x86/kernel/x8664_ksyms_64.c | |||
@@ -12,7 +12,7 @@ | |||
12 | #include <asm/desc.h> | 12 | #include <asm/desc.h> |
13 | #include <asm/ftrace.h> | 13 | #include <asm/ftrace.h> |
14 | 14 | ||
15 | #ifdef CONFIG_FTRACE | 15 | #ifdef CONFIG_FUNCTION_TRACER |
16 | /* mcount is defined in assembly */ | 16 | /* mcount is defined in assembly */ |
17 | EXPORT_SYMBOL(mcount); | 17 | EXPORT_SYMBOL(mcount); |
18 | #endif | 18 | #endif |
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index 11c6725fb798..8772dc946823 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c | |||
@@ -545,6 +545,12 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm) | |||
545 | if (!pit) | 545 | if (!pit) |
546 | return NULL; | 546 | return NULL; |
547 | 547 | ||
548 | mutex_lock(&kvm->lock); | ||
549 | pit->irq_source_id = kvm_request_irq_source_id(kvm); | ||
550 | mutex_unlock(&kvm->lock); | ||
551 | if (pit->irq_source_id < 0) | ||
552 | return NULL; | ||
553 | |||
548 | mutex_init(&pit->pit_state.lock); | 554 | mutex_init(&pit->pit_state.lock); |
549 | mutex_lock(&pit->pit_state.lock); | 555 | mutex_lock(&pit->pit_state.lock); |
550 | spin_lock_init(&pit->pit_state.inject_lock); | 556 | spin_lock_init(&pit->pit_state.inject_lock); |
@@ -587,6 +593,7 @@ void kvm_free_pit(struct kvm *kvm) | |||
587 | mutex_lock(&kvm->arch.vpit->pit_state.lock); | 593 | mutex_lock(&kvm->arch.vpit->pit_state.lock); |
588 | timer = &kvm->arch.vpit->pit_state.pit_timer.timer; | 594 | timer = &kvm->arch.vpit->pit_state.pit_timer.timer; |
589 | hrtimer_cancel(timer); | 595 | hrtimer_cancel(timer); |
596 | kvm_free_irq_source_id(kvm, kvm->arch.vpit->irq_source_id); | ||
590 | mutex_unlock(&kvm->arch.vpit->pit_state.lock); | 597 | mutex_unlock(&kvm->arch.vpit->pit_state.lock); |
591 | kfree(kvm->arch.vpit); | 598 | kfree(kvm->arch.vpit); |
592 | } | 599 | } |
@@ -595,8 +602,8 @@ void kvm_free_pit(struct kvm *kvm) | |||
595 | static void __inject_pit_timer_intr(struct kvm *kvm) | 602 | static void __inject_pit_timer_intr(struct kvm *kvm) |
596 | { | 603 | { |
597 | mutex_lock(&kvm->lock); | 604 | mutex_lock(&kvm->lock); |
598 | kvm_set_irq(kvm, 0, 1); | 605 | kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 1); |
599 | kvm_set_irq(kvm, 0, 0); | 606 | kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 0); |
600 | mutex_unlock(&kvm->lock); | 607 | mutex_unlock(&kvm->lock); |
601 | } | 608 | } |
602 | 609 | ||
diff --git a/arch/x86/kvm/i8254.h b/arch/x86/kvm/i8254.h index e436d4983aa1..4178022b97aa 100644 --- a/arch/x86/kvm/i8254.h +++ b/arch/x86/kvm/i8254.h | |||
@@ -44,6 +44,7 @@ struct kvm_pit { | |||
44 | struct kvm_io_device speaker_dev; | 44 | struct kvm_io_device speaker_dev; |
45 | struct kvm *kvm; | 45 | struct kvm *kvm; |
46 | struct kvm_kpit_state pit_state; | 46 | struct kvm_kpit_state pit_state; |
47 | int irq_source_id; | ||
47 | }; | 48 | }; |
48 | 49 | ||
49 | #define KVM_PIT_BASE_ADDRESS 0x40 | 50 | #define KVM_PIT_BASE_ADDRESS 0x40 |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 99c239c5c0ac..2a5e64881d9b 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -2634,6 +2634,7 @@ static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu, | |||
2634 | static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu) | 2634 | static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu) |
2635 | { | 2635 | { |
2636 | kvm_x86_ops->tlb_flush(vcpu); | 2636 | kvm_x86_ops->tlb_flush(vcpu); |
2637 | set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests); | ||
2637 | return 1; | 2638 | return 1; |
2638 | } | 2639 | } |
2639 | 2640 | ||
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 4f0677d1eae8..f1f8ff2f1fa2 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -1742,7 +1742,8 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
1742 | goto out; | 1742 | goto out; |
1743 | if (irqchip_in_kernel(kvm)) { | 1743 | if (irqchip_in_kernel(kvm)) { |
1744 | mutex_lock(&kvm->lock); | 1744 | mutex_lock(&kvm->lock); |
1745 | kvm_set_irq(kvm, irq_event.irq, irq_event.level); | 1745 | kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, |
1746 | irq_event.irq, irq_event.level); | ||
1746 | mutex_unlock(&kvm->lock); | 1747 | mutex_unlock(&kvm->lock); |
1747 | r = 0; | 1748 | r = 0; |
1748 | } | 1749 | } |
@@ -4013,6 +4014,9 @@ struct kvm *kvm_arch_create_vm(void) | |||
4013 | INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); | 4014 | INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); |
4014 | INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); | 4015 | INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); |
4015 | 4016 | ||
4017 | /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */ | ||
4018 | set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); | ||
4019 | |||
4016 | return kvm; | 4020 | return kvm; |
4017 | } | 4021 | } |
4018 | 4022 | ||
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index 48ee4f9435f4..a5d8e1ace1cf 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c | |||
@@ -367,10 +367,9 @@ static void lguest_cpuid(unsigned int *ax, unsigned int *bx, | |||
367 | * lazily after a task switch, and Linux uses that gratefully, but wouldn't a | 367 | * lazily after a task switch, and Linux uses that gratefully, but wouldn't a |
368 | * name like "FPUTRAP bit" be a little less cryptic? | 368 | * name like "FPUTRAP bit" be a little less cryptic? |
369 | * | 369 | * |
370 | * We store cr0 (and cr3) locally, because the Host never changes it. The | 370 | * We store cr0 locally because the Host never changes it. The Guest sometimes |
371 | * Guest sometimes wants to read it and we'd prefer not to bother the Host | 371 | * wants to read it and we'd prefer not to bother the Host unnecessarily. */ |
372 | * unnecessarily. */ | 372 | static unsigned long current_cr0; |
373 | static unsigned long current_cr0, current_cr3; | ||
374 | static void lguest_write_cr0(unsigned long val) | 373 | static void lguest_write_cr0(unsigned long val) |
375 | { | 374 | { |
376 | lazy_hcall(LHCALL_TS, val & X86_CR0_TS, 0, 0); | 375 | lazy_hcall(LHCALL_TS, val & X86_CR0_TS, 0, 0); |
@@ -399,17 +398,23 @@ static unsigned long lguest_read_cr2(void) | |||
399 | return lguest_data.cr2; | 398 | return lguest_data.cr2; |
400 | } | 399 | } |
401 | 400 | ||
401 | /* See lguest_set_pte() below. */ | ||
402 | static bool cr3_changed = false; | ||
403 | |||
402 | /* cr3 is the current toplevel pagetable page: the principle is the same as | 404 | /* cr3 is the current toplevel pagetable page: the principle is the same as |
403 | * cr0. Keep a local copy, and tell the Host when it changes. */ | 405 | * cr0. Keep a local copy, and tell the Host when it changes. The only |
406 | * difference is that our local copy is in lguest_data because the Host needs | ||
407 | * to set it upon our initial hypercall. */ | ||
404 | static void lguest_write_cr3(unsigned long cr3) | 408 | static void lguest_write_cr3(unsigned long cr3) |
405 | { | 409 | { |
410 | lguest_data.pgdir = cr3; | ||
406 | lazy_hcall(LHCALL_NEW_PGTABLE, cr3, 0, 0); | 411 | lazy_hcall(LHCALL_NEW_PGTABLE, cr3, 0, 0); |
407 | current_cr3 = cr3; | 412 | cr3_changed = true; |
408 | } | 413 | } |
409 | 414 | ||
410 | static unsigned long lguest_read_cr3(void) | 415 | static unsigned long lguest_read_cr3(void) |
411 | { | 416 | { |
412 | return current_cr3; | 417 | return lguest_data.pgdir; |
413 | } | 418 | } |
414 | 419 | ||
415 | /* cr4 is used to enable and disable PGE, but we don't care. */ | 420 | /* cr4 is used to enable and disable PGE, but we don't care. */ |
@@ -498,13 +503,13 @@ static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval) | |||
498 | * to forget all of them. Fortunately, this is very rare. | 503 | * to forget all of them. Fortunately, this is very rare. |
499 | * | 504 | * |
500 | * ... except in early boot when the kernel sets up the initial pagetables, | 505 | * ... except in early boot when the kernel sets up the initial pagetables, |
501 | * which makes booting astonishingly slow. So we don't even tell the Host | 506 | * which makes booting astonishingly slow: 1.83 seconds! So we don't even tell |
502 | * anything changed until we've done the first page table switch. */ | 507 | * the Host anything changed until we've done the first page table switch, |
508 | * which brings boot back to 0.25 seconds. */ | ||
503 | static void lguest_set_pte(pte_t *ptep, pte_t pteval) | 509 | static void lguest_set_pte(pte_t *ptep, pte_t pteval) |
504 | { | 510 | { |
505 | *ptep = pteval; | 511 | *ptep = pteval; |
506 | /* Don't bother with hypercall before initial setup. */ | 512 | if (cr3_changed) |
507 | if (current_cr3) | ||
508 | lazy_hcall(LHCALL_FLUSH_TLB, 1, 0, 0); | 513 | lazy_hcall(LHCALL_FLUSH_TLB, 1, 0, 0); |
509 | } | 514 | } |
510 | 515 | ||
@@ -521,7 +526,7 @@ static void lguest_set_pte(pte_t *ptep, pte_t pteval) | |||
521 | static void lguest_flush_tlb_single(unsigned long addr) | 526 | static void lguest_flush_tlb_single(unsigned long addr) |
522 | { | 527 | { |
523 | /* Simply set it to zero: if it was not, it will fault back in. */ | 528 | /* Simply set it to zero: if it was not, it will fault back in. */ |
524 | lazy_hcall(LHCALL_SET_PTE, current_cr3, addr, 0); | 529 | lazy_hcall(LHCALL_SET_PTE, lguest_data.pgdir, addr, 0); |
525 | } | 530 | } |
526 | 531 | ||
527 | /* This is what happens after the Guest has removed a large number of entries. | 532 | /* This is what happens after the Guest has removed a large number of entries. |
@@ -581,6 +586,9 @@ static void __init lguest_init_IRQ(void) | |||
581 | 586 | ||
582 | for (i = 0; i < LGUEST_IRQS; i++) { | 587 | for (i = 0; i < LGUEST_IRQS; i++) { |
583 | int vector = FIRST_EXTERNAL_VECTOR + i; | 588 | int vector = FIRST_EXTERNAL_VECTOR + i; |
589 | /* Some systems map "vectors" to interrupts weirdly. Lguest has | ||
590 | * a straightforward 1 to 1 mapping, so force that here. */ | ||
591 | __get_cpu_var(vector_irq)[vector] = i; | ||
584 | if (vector != SYSCALL_VECTOR) { | 592 | if (vector != SYSCALL_VECTOR) { |
585 | set_intr_gate(vector, interrupt[vector]); | 593 | set_intr_gate(vector, interrupt[vector]); |
586 | set_irq_chip_and_handler_name(i, &lguest_irq_controller, | 594 | set_irq_chip_and_handler_name(i, &lguest_irq_controller, |
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c index 0f6e8a6523ae..7f4c6af14351 100644 --- a/arch/x86/mach-voyager/voyager_smp.c +++ b/arch/x86/mach-voyager/voyager_smp.c | |||
@@ -90,6 +90,7 @@ static void ack_vic_irq(unsigned int irq); | |||
90 | static void vic_enable_cpi(void); | 90 | static void vic_enable_cpi(void); |
91 | static void do_boot_cpu(__u8 cpuid); | 91 | static void do_boot_cpu(__u8 cpuid); |
92 | static void do_quad_bootstrap(void); | 92 | static void do_quad_bootstrap(void); |
93 | static void initialize_secondary(void); | ||
93 | 94 | ||
94 | int hard_smp_processor_id(void); | 95 | int hard_smp_processor_id(void); |
95 | int safe_smp_processor_id(void); | 96 | int safe_smp_processor_id(void); |
@@ -344,6 +345,12 @@ static void do_quad_bootstrap(void) | |||
344 | } | 345 | } |
345 | } | 346 | } |
346 | 347 | ||
348 | void prefill_possible_map(void) | ||
349 | { | ||
350 | /* This is empty on voyager because we need a much | ||
351 | * earlier detection which is done in find_smp_config */ | ||
352 | } | ||
353 | |||
347 | /* Set up all the basic stuff: read the SMP config and make all the | 354 | /* Set up all the basic stuff: read the SMP config and make all the |
348 | * SMP information reflect only the boot cpu. All others will be | 355 | * SMP information reflect only the boot cpu. All others will be |
349 | * brought on-line later. */ | 356 | * brought on-line later. */ |
@@ -413,6 +420,7 @@ void __init smp_store_cpu_info(int id) | |||
413 | struct cpuinfo_x86 *c = &cpu_data(id); | 420 | struct cpuinfo_x86 *c = &cpu_data(id); |
414 | 421 | ||
415 | *c = boot_cpu_data; | 422 | *c = boot_cpu_data; |
423 | c->cpu_index = id; | ||
416 | 424 | ||
417 | identify_secondary_cpu(c); | 425 | identify_secondary_cpu(c); |
418 | } | 426 | } |
@@ -650,6 +658,8 @@ void __init smp_boot_cpus(void) | |||
650 | smp_tune_scheduling(); | 658 | smp_tune_scheduling(); |
651 | */ | 659 | */ |
652 | smp_store_cpu_info(boot_cpu_id); | 660 | smp_store_cpu_info(boot_cpu_id); |
661 | /* setup the jump vector */ | ||
662 | initial_code = (unsigned long)initialize_secondary; | ||
653 | printk("CPU%d: ", boot_cpu_id); | 663 | printk("CPU%d: ", boot_cpu_id); |
654 | print_cpu_info(&cpu_data(boot_cpu_id)); | 664 | print_cpu_info(&cpu_data(boot_cpu_id)); |
655 | 665 | ||
@@ -702,7 +712,7 @@ void __init smp_boot_cpus(void) | |||
702 | 712 | ||
703 | /* Reload the secondary CPUs task structure (this function does not | 713 | /* Reload the secondary CPUs task structure (this function does not |
704 | * return ) */ | 714 | * return ) */ |
705 | void __init initialize_secondary(void) | 715 | static void __init initialize_secondary(void) |
706 | { | 716 | { |
707 | #if 0 | 717 | #if 0 |
708 | // AC kernels only | 718 | // AC kernels only |
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c index 4ba373c5b8c8..be54176e9eb2 100644 --- a/arch/x86/mm/gup.c +++ b/arch/x86/mm/gup.c | |||
@@ -233,7 +233,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, | |||
233 | len = (unsigned long) nr_pages << PAGE_SHIFT; | 233 | len = (unsigned long) nr_pages << PAGE_SHIFT; |
234 | end = start + len; | 234 | end = start + len; |
235 | if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ, | 235 | if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ, |
236 | start, len))) | 236 | (void __user *)start, len))) |
237 | goto slow_irqon; | 237 | goto slow_irqon; |
238 | 238 | ||
239 | /* | 239 | /* |
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index b8e461d49412..9db01db6e3cd 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -350,8 +350,10 @@ phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end, | |||
350 | * pagetable pages as RO. So assume someone who pre-setup | 350 | * pagetable pages as RO. So assume someone who pre-setup |
351 | * these mappings are more intelligent. | 351 | * these mappings are more intelligent. |
352 | */ | 352 | */ |
353 | if (pte_val(*pte)) | 353 | if (pte_val(*pte)) { |
354 | pages++; | ||
354 | continue; | 355 | continue; |
356 | } | ||
355 | 357 | ||
356 | if (0) | 358 | if (0) |
357 | printk(" pte=%p addr=%lx pte=%016lx\n", | 359 | printk(" pte=%p addr=%lx pte=%016lx\n", |
@@ -418,8 +420,10 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end, | |||
418 | * not differ with respect to page frame and | 420 | * not differ with respect to page frame and |
419 | * attributes. | 421 | * attributes. |
420 | */ | 422 | */ |
421 | if (page_size_mask & (1 << PG_LEVEL_2M)) | 423 | if (page_size_mask & (1 << PG_LEVEL_2M)) { |
424 | pages++; | ||
422 | continue; | 425 | continue; |
426 | } | ||
423 | new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd)); | 427 | new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd)); |
424 | } | 428 | } |
425 | 429 | ||
@@ -499,8 +503,10 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end, | |||
499 | * not differ with respect to page frame and | 503 | * not differ with respect to page frame and |
500 | * attributes. | 504 | * attributes. |
501 | */ | 505 | */ |
502 | if (page_size_mask & (1 << PG_LEVEL_1G)) | 506 | if (page_size_mask & (1 << PG_LEVEL_1G)) { |
507 | pages++; | ||
503 | continue; | 508 | continue; |
509 | } | ||
504 | prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud)); | 510 | prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud)); |
505 | } | 511 | } |
506 | 512 | ||
@@ -665,12 +671,13 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, | |||
665 | unsigned long last_map_addr = 0; | 671 | unsigned long last_map_addr = 0; |
666 | unsigned long page_size_mask = 0; | 672 | unsigned long page_size_mask = 0; |
667 | unsigned long start_pfn, end_pfn; | 673 | unsigned long start_pfn, end_pfn; |
674 | unsigned long pos; | ||
668 | 675 | ||
669 | struct map_range mr[NR_RANGE_MR]; | 676 | struct map_range mr[NR_RANGE_MR]; |
670 | int nr_range, i; | 677 | int nr_range, i; |
671 | int use_pse, use_gbpages; | 678 | int use_pse, use_gbpages; |
672 | 679 | ||
673 | printk(KERN_INFO "init_memory_mapping\n"); | 680 | printk(KERN_INFO "init_memory_mapping: %016lx-%016lx\n", start, end); |
674 | 681 | ||
675 | /* | 682 | /* |
676 | * Find space for the kernel direct mapping tables. | 683 | * Find space for the kernel direct mapping tables. |
@@ -704,35 +711,50 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, | |||
704 | 711 | ||
705 | /* head if not big page alignment ?*/ | 712 | /* head if not big page alignment ?*/ |
706 | start_pfn = start >> PAGE_SHIFT; | 713 | start_pfn = start >> PAGE_SHIFT; |
707 | end_pfn = ((start + (PMD_SIZE - 1)) >> PMD_SHIFT) | 714 | pos = start_pfn << PAGE_SHIFT; |
715 | end_pfn = ((pos + (PMD_SIZE - 1)) >> PMD_SHIFT) | ||
708 | << (PMD_SHIFT - PAGE_SHIFT); | 716 | << (PMD_SHIFT - PAGE_SHIFT); |
709 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); | 717 | if (start_pfn < end_pfn) { |
718 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); | ||
719 | pos = end_pfn << PAGE_SHIFT; | ||
720 | } | ||
710 | 721 | ||
711 | /* big page (2M) range*/ | 722 | /* big page (2M) range*/ |
712 | start_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT) | 723 | start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT) |
713 | << (PMD_SHIFT - PAGE_SHIFT); | 724 | << (PMD_SHIFT - PAGE_SHIFT); |
714 | end_pfn = ((start + (PUD_SIZE - 1))>>PUD_SHIFT) | 725 | end_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT) |
715 | << (PUD_SHIFT - PAGE_SHIFT); | 726 | << (PUD_SHIFT - PAGE_SHIFT); |
716 | if (end_pfn > ((end>>PUD_SHIFT)<<(PUD_SHIFT - PAGE_SHIFT))) | 727 | if (end_pfn > ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT))) |
717 | end_pfn = ((end>>PUD_SHIFT)<<(PUD_SHIFT - PAGE_SHIFT)); | 728 | end_pfn = ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT)); |
718 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, | 729 | if (start_pfn < end_pfn) { |
719 | page_size_mask & (1<<PG_LEVEL_2M)); | 730 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, |
731 | page_size_mask & (1<<PG_LEVEL_2M)); | ||
732 | pos = end_pfn << PAGE_SHIFT; | ||
733 | } | ||
720 | 734 | ||
721 | /* big page (1G) range */ | 735 | /* big page (1G) range */ |
722 | start_pfn = end_pfn; | 736 | start_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT) |
723 | end_pfn = (end>>PUD_SHIFT) << (PUD_SHIFT - PAGE_SHIFT); | 737 | << (PUD_SHIFT - PAGE_SHIFT); |
724 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, | 738 | end_pfn = (end >> PUD_SHIFT) << (PUD_SHIFT - PAGE_SHIFT); |
739 | if (start_pfn < end_pfn) { | ||
740 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, | ||
725 | page_size_mask & | 741 | page_size_mask & |
726 | ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G))); | 742 | ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G))); |
743 | pos = end_pfn << PAGE_SHIFT; | ||
744 | } | ||
727 | 745 | ||
728 | /* tail is not big page (1G) alignment */ | 746 | /* tail is not big page (1G) alignment */ |
729 | start_pfn = end_pfn; | 747 | start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT) |
730 | end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT); | 748 | << (PMD_SHIFT - PAGE_SHIFT); |
731 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, | 749 | end_pfn = (end >> PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT); |
732 | page_size_mask & (1<<PG_LEVEL_2M)); | 750 | if (start_pfn < end_pfn) { |
751 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, | ||
752 | page_size_mask & (1<<PG_LEVEL_2M)); | ||
753 | pos = end_pfn << PAGE_SHIFT; | ||
754 | } | ||
733 | 755 | ||
734 | /* tail is not big page (2M) alignment */ | 756 | /* tail is not big page (2M) alignment */ |
735 | start_pfn = end_pfn; | 757 | start_pfn = pos>>PAGE_SHIFT; |
736 | end_pfn = end>>PAGE_SHIFT; | 758 | end_pfn = end>>PAGE_SHIFT; |
737 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); | 759 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); |
738 | 760 | ||
@@ -831,12 +853,12 @@ int arch_add_memory(int nid, u64 start, u64 size) | |||
831 | unsigned long nr_pages = size >> PAGE_SHIFT; | 853 | unsigned long nr_pages = size >> PAGE_SHIFT; |
832 | int ret; | 854 | int ret; |
833 | 855 | ||
834 | last_mapped_pfn = init_memory_mapping(start, start + size-1); | 856 | last_mapped_pfn = init_memory_mapping(start, start + size); |
835 | if (last_mapped_pfn > max_pfn_mapped) | 857 | if (last_mapped_pfn > max_pfn_mapped) |
836 | max_pfn_mapped = last_mapped_pfn; | 858 | max_pfn_mapped = last_mapped_pfn; |
837 | 859 | ||
838 | ret = __add_pages(zone, start_pfn, nr_pages); | 860 | ret = __add_pages(zone, start_pfn, nr_pages); |
839 | WARN_ON(1); | 861 | WARN_ON_ONCE(ret); |
840 | 862 | ||
841 | return ret; | 863 | return ret; |
842 | } | 864 | } |
@@ -878,6 +900,7 @@ static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, | |||
878 | void __init mem_init(void) | 900 | void __init mem_init(void) |
879 | { | 901 | { |
880 | long codesize, reservedpages, datasize, initsize; | 902 | long codesize, reservedpages, datasize, initsize; |
903 | unsigned long absent_pages; | ||
881 | 904 | ||
882 | start_periodic_check_for_corruption(); | 905 | start_periodic_check_for_corruption(); |
883 | 906 | ||
@@ -893,8 +916,9 @@ void __init mem_init(void) | |||
893 | #else | 916 | #else |
894 | totalram_pages = free_all_bootmem(); | 917 | totalram_pages = free_all_bootmem(); |
895 | #endif | 918 | #endif |
896 | reservedpages = max_pfn - totalram_pages - | 919 | |
897 | absent_pages_in_range(0, max_pfn); | 920 | absent_pages = absent_pages_in_range(0, max_pfn); |
921 | reservedpages = max_pfn - totalram_pages - absent_pages; | ||
898 | after_bootmem = 1; | 922 | after_bootmem = 1; |
899 | 923 | ||
900 | codesize = (unsigned long) &_etext - (unsigned long) &_text; | 924 | codesize = (unsigned long) &_etext - (unsigned long) &_text; |
@@ -911,10 +935,11 @@ void __init mem_init(void) | |||
911 | VSYSCALL_END - VSYSCALL_START); | 935 | VSYSCALL_END - VSYSCALL_START); |
912 | 936 | ||
913 | printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, " | 937 | printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, " |
914 | "%ldk reserved, %ldk data, %ldk init)\n", | 938 | "%ldk absent, %ldk reserved, %ldk data, %ldk init)\n", |
915 | (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), | 939 | (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), |
916 | max_pfn << (PAGE_SHIFT-10), | 940 | max_pfn << (PAGE_SHIFT-10), |
917 | codesize >> 10, | 941 | codesize >> 10, |
942 | absent_pages << (PAGE_SHIFT-10), | ||
918 | reservedpages << (PAGE_SHIFT-10), | 943 | reservedpages << (PAGE_SHIFT-10), |
919 | datasize >> 10, | 944 | datasize >> 10, |
920 | initsize >> 10); | 945 | initsize >> 10); |
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index ae71e11eb3e5..d4c4307ff3e0 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c | |||
@@ -387,7 +387,7 @@ static void __iomem *ioremap_default(resource_size_t phys_addr, | |||
387 | unsigned long size) | 387 | unsigned long size) |
388 | { | 388 | { |
389 | unsigned long flags; | 389 | unsigned long flags; |
390 | void *ret; | 390 | void __iomem *ret; |
391 | int err; | 391 | int err; |
392 | 392 | ||
393 | /* | 393 | /* |
@@ -399,11 +399,11 @@ static void __iomem *ioremap_default(resource_size_t phys_addr, | |||
399 | if (err < 0) | 399 | if (err < 0) |
400 | return NULL; | 400 | return NULL; |
401 | 401 | ||
402 | ret = (void *) __ioremap_caller(phys_addr, size, flags, | 402 | ret = __ioremap_caller(phys_addr, size, flags, |
403 | __builtin_return_address(0)); | 403 | __builtin_return_address(0)); |
404 | 404 | ||
405 | free_memtype(phys_addr, phys_addr + size); | 405 | free_memtype(phys_addr, phys_addr + size); |
406 | return (void __iomem *)ret; | 406 | return ret; |
407 | } | 407 | } |
408 | 408 | ||
409 | void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size, | 409 | void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size, |
@@ -622,7 +622,7 @@ static inline void __init early_clear_fixmap(enum fixed_addresses idx) | |||
622 | __early_set_fixmap(idx, 0, __pgprot(0)); | 622 | __early_set_fixmap(idx, 0, __pgprot(0)); |
623 | } | 623 | } |
624 | 624 | ||
625 | static void *prev_map[FIX_BTMAPS_SLOTS] __initdata; | 625 | static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata; |
626 | static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata; | 626 | static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata; |
627 | static int __init check_early_ioremap_leak(void) | 627 | static int __init check_early_ioremap_leak(void) |
628 | { | 628 | { |
@@ -645,7 +645,7 @@ static int __init check_early_ioremap_leak(void) | |||
645 | } | 645 | } |
646 | late_initcall(check_early_ioremap_leak); | 646 | late_initcall(check_early_ioremap_leak); |
647 | 647 | ||
648 | static void __init *__early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot) | 648 | static void __init __iomem *__early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot) |
649 | { | 649 | { |
650 | unsigned long offset, last_addr; | 650 | unsigned long offset, last_addr; |
651 | unsigned int nrpages; | 651 | unsigned int nrpages; |
@@ -713,23 +713,23 @@ static void __init *__early_ioremap(unsigned long phys_addr, unsigned long size, | |||
713 | if (early_ioremap_debug) | 713 | if (early_ioremap_debug) |
714 | printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0)); | 714 | printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0)); |
715 | 715 | ||
716 | prev_map[slot] = (void *) (offset + fix_to_virt(idx0)); | 716 | prev_map[slot] = (void __iomem *)(offset + fix_to_virt(idx0)); |
717 | return prev_map[slot]; | 717 | return prev_map[slot]; |
718 | } | 718 | } |
719 | 719 | ||
720 | /* Remap an IO device */ | 720 | /* Remap an IO device */ |
721 | void __init *early_ioremap(unsigned long phys_addr, unsigned long size) | 721 | void __init __iomem *early_ioremap(unsigned long phys_addr, unsigned long size) |
722 | { | 722 | { |
723 | return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO); | 723 | return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO); |
724 | } | 724 | } |
725 | 725 | ||
726 | /* Remap memory */ | 726 | /* Remap memory */ |
727 | void __init *early_memremap(unsigned long phys_addr, unsigned long size) | 727 | void __init __iomem *early_memremap(unsigned long phys_addr, unsigned long size) |
728 | { | 728 | { |
729 | return __early_ioremap(phys_addr, size, PAGE_KERNEL); | 729 | return __early_ioremap(phys_addr, size, PAGE_KERNEL); |
730 | } | 730 | } |
731 | 731 | ||
732 | void __init early_iounmap(void *addr, unsigned long size) | 732 | void __init early_iounmap(void __iomem *addr, unsigned long size) |
733 | { | 733 | { |
734 | unsigned long virt_addr; | 734 | unsigned long virt_addr; |
735 | unsigned long offset; | 735 | unsigned long offset; |
@@ -779,7 +779,7 @@ void __init early_iounmap(void *addr, unsigned long size) | |||
779 | --idx; | 779 | --idx; |
780 | --nrpages; | 780 | --nrpages; |
781 | } | 781 | } |
782 | prev_map[slot] = 0; | 782 | prev_map[slot] = NULL; |
783 | } | 783 | } |
784 | 784 | ||
785 | void __this_fixmap_does_not_exist(void) | 785 | void __this_fixmap_does_not_exist(void) |
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 738fd0f24958..eb1bf000d12e 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c | |||
@@ -481,12 +481,16 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size) | |||
481 | return 1; | 481 | return 1; |
482 | } | 482 | } |
483 | #else | 483 | #else |
484 | /* This check is needed to avoid cache aliasing when PAT is enabled */ | ||
484 | static inline int range_is_allowed(unsigned long pfn, unsigned long size) | 485 | static inline int range_is_allowed(unsigned long pfn, unsigned long size) |
485 | { | 486 | { |
486 | u64 from = ((u64)pfn) << PAGE_SHIFT; | 487 | u64 from = ((u64)pfn) << PAGE_SHIFT; |
487 | u64 to = from + size; | 488 | u64 to = from + size; |
488 | u64 cursor = from; | 489 | u64 cursor = from; |
489 | 490 | ||
491 | if (!pat_enabled) | ||
492 | return 1; | ||
493 | |||
490 | while (cursor < to) { | 494 | while (cursor < to) { |
491 | if (!devmem_is_allowed(pfn)) { | 495 | if (!devmem_is_allowed(pfn)) { |
492 | printk(KERN_INFO | 496 | printk(KERN_INFO |
diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile index 313947940a1a..6dcefba7836f 100644 --- a/arch/x86/xen/Makefile +++ b/arch/x86/xen/Makefile | |||
@@ -1,4 +1,4 @@ | |||
1 | ifdef CONFIG_FTRACE | 1 | ifdef CONFIG_FUNCTION_TRACER |
2 | # Do not profile debug and lowlevel utilities | 2 | # Do not profile debug and lowlevel utilities |
3 | CFLAGS_REMOVE_spinlock.o = -pg | 3 | CFLAGS_REMOVE_spinlock.o = -pg |
4 | CFLAGS_REMOVE_time.o = -pg | 4 | CFLAGS_REMOVE_time.o = -pg |
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index d4d52f5a1cf7..aba77b2b7d18 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -246,11 +246,21 @@ xmaddr_t arbitrary_virt_to_machine(void *vaddr) | |||
246 | { | 246 | { |
247 | unsigned long address = (unsigned long)vaddr; | 247 | unsigned long address = (unsigned long)vaddr; |
248 | unsigned int level; | 248 | unsigned int level; |
249 | pte_t *pte = lookup_address(address, &level); | 249 | pte_t *pte; |
250 | unsigned offset = address & ~PAGE_MASK; | 250 | unsigned offset; |
251 | 251 | ||
252 | BUG_ON(pte == NULL); | 252 | /* |
253 | * if the PFN is in the linear mapped vaddr range, we can just use | ||
254 | * the (quick) virt_to_machine() p2m lookup | ||
255 | */ | ||
256 | if (virt_addr_valid(vaddr)) | ||
257 | return virt_to_machine(vaddr); | ||
253 | 258 | ||
259 | /* otherwise we have to do a (slower) full page-table walk */ | ||
260 | |||
261 | pte = lookup_address(address, &level); | ||
262 | BUG_ON(pte == NULL); | ||
263 | offset = address & ~PAGE_MASK; | ||
254 | return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset); | 264 | return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset); |
255 | } | 265 | } |
256 | 266 | ||
@@ -410,7 +420,7 @@ void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, | |||
410 | 420 | ||
411 | xen_mc_batch(); | 421 | xen_mc_batch(); |
412 | 422 | ||
413 | u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD; | 423 | u.ptr = arbitrary_virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD; |
414 | u.val = pte_val_ma(pte); | 424 | u.val = pte_val_ma(pte); |
415 | xen_extend_mmu_update(&u); | 425 | xen_extend_mmu_update(&u); |
416 | 426 | ||