aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig621
-rw-r--r--arch/x86/Kconfig.cpu80
-rw-r--r--arch/x86/Kconfig.debug50
-rw-r--r--arch/x86/Makefile43
-rw-r--r--arch/x86/configs/i386_defconfig1
-rw-r--r--arch/x86/configs/x86_64_defconfig1
-rw-r--r--arch/x86/ia32/ia32_signal.c365
-rw-r--r--arch/x86/ia32/ia32entry.S8
-rw-r--r--arch/x86/include/asm/a.out-core.h2
-rw-r--r--arch/x86/include/asm/acpi.h3
-rw-r--r--arch/x86/include/asm/apic.h74
-rw-r--r--arch/x86/include/asm/apicnum.h12
-rw-r--r--arch/x86/include/asm/apm.h (renamed from arch/x86/include/asm/mach-default/apm.h)0
-rw-r--r--arch/x86/include/asm/bigsmp/apic.h155
-rw-r--r--arch/x86/include/asm/bigsmp/apicdef.h13
-rw-r--r--arch/x86/include/asm/bigsmp/ipi.h22
-rw-r--r--arch/x86/include/asm/calling.h56
-rw-r--r--arch/x86/include/asm/cpu.h17
-rw-r--r--arch/x86/include/asm/cpumask.h32
-rw-r--r--arch/x86/include/asm/current.h24
-rw-r--r--arch/x86/include/asm/do_timer.h (renamed from arch/x86/include/asm/mach-default/do_timer.h)0
-rw-r--r--arch/x86/include/asm/elf.h15
-rw-r--r--arch/x86/include/asm/entry_arch.h (renamed from arch/x86/include/asm/mach-default/entry_arch.h)25
-rw-r--r--arch/x86/include/asm/es7000/apic.h242
-rw-r--r--arch/x86/include/asm/es7000/apicdef.h13
-rw-r--r--arch/x86/include/asm/es7000/ipi.h22
-rw-r--r--arch/x86/include/asm/es7000/mpparse.h29
-rw-r--r--arch/x86/include/asm/es7000/wakecpu.h37
-rw-r--r--arch/x86/include/asm/fixmap_32.h4
-rw-r--r--arch/x86/include/asm/fixmap_64.h4
-rw-r--r--arch/x86/include/asm/genapic.h262
-rw-r--r--arch/x86/include/asm/genapic_32.h148
-rw-r--r--arch/x86/include/asm/genapic_64.h66
-rw-r--r--arch/x86/include/asm/hardirq.h49
-rw-r--r--arch/x86/include/asm/hardirq_32.h30
-rw-r--r--arch/x86/include/asm/hardirq_64.h25
-rw-r--r--arch/x86/include/asm/hw_irq.h24
-rw-r--r--arch/x86/include/asm/io.h93
-rw-r--r--arch/x86/include/asm/io_32.h88
-rw-r--r--arch/x86/include/asm/io_64.h61
-rw-r--r--arch/x86/include/asm/io_apic.h41
-rw-r--r--arch/x86/include/asm/ipi.h77
-rw-r--r--arch/x86/include/asm/irq.h4
-rw-r--r--arch/x86/include/asm/irq_regs.h36
-rw-r--r--arch/x86/include/asm/irq_regs_32.h31
-rw-r--r--arch/x86/include/asm/irq_regs_64.h1
-rw-r--r--arch/x86/include/asm/irq_vectors.h209
-rw-r--r--arch/x86/include/asm/kexec.h27
-rw-r--r--arch/x86/include/asm/mach-default/mach_apic.h168
-rw-r--r--arch/x86/include/asm/mach-default/mach_apicdef.h24
-rw-r--r--arch/x86/include/asm/mach-default/mach_ipi.h64
-rw-r--r--arch/x86/include/asm/mach-default/mach_mpparse.h17
-rw-r--r--arch/x86/include/asm/mach-default/mach_mpspec.h12
-rw-r--r--arch/x86/include/asm/mach-default/mach_wakecpu.h41
-rw-r--r--arch/x86/include/asm/mach-generic/gpio.h15
-rw-r--r--arch/x86/include/asm/mach-generic/mach_apic.h35
-rw-r--r--arch/x86/include/asm/mach-generic/mach_apicdef.h11
-rw-r--r--arch/x86/include/asm/mach-generic/mach_ipi.h10
-rw-r--r--arch/x86/include/asm/mach-generic/mach_mpparse.h9
-rw-r--r--arch/x86/include/asm/mach-generic/mach_mpspec.h12
-rw-r--r--arch/x86/include/asm/mach-generic/mach_wakecpu.h12
-rw-r--r--arch/x86/include/asm/mach-rdc321x/gpio.h60
-rw-r--r--arch/x86/include/asm/mach_timer.h (renamed from arch/x86/include/asm/mach-default/mach_timer.h)0
-rw-r--r--arch/x86/include/asm/mach_traps.h (renamed from arch/x86/include/asm/mach-default/mach_traps.h)0
-rw-r--r--arch/x86/include/asm/mmu_context.h63
-rw-r--r--arch/x86/include/asm/mmu_context_32.h55
-rw-r--r--arch/x86/include/asm/mmu_context_64.h54
-rw-r--r--arch/x86/include/asm/mpspec.h35
-rw-r--r--arch/x86/include/asm/mpspec_def.h23
-rw-r--r--arch/x86/include/asm/numaq.h2
-rw-r--r--arch/x86/include/asm/numaq/apic.h142
-rw-r--r--arch/x86/include/asm/numaq/apicdef.h14
-rw-r--r--arch/x86/include/asm/numaq/ipi.h22
-rw-r--r--arch/x86/include/asm/numaq/mpparse.h6
-rw-r--r--arch/x86/include/asm/numaq/wakecpu.h45
-rw-r--r--arch/x86/include/asm/page.h19
-rw-r--r--arch/x86/include/asm/page_64.h4
-rw-r--r--arch/x86/include/asm/paravirt.h463
-rw-r--r--arch/x86/include/asm/pat.h4
-rw-r--r--arch/x86/include/asm/pci-functions.h (renamed from arch/x86/include/asm/mach-default/pci-functions.h)0
-rw-r--r--arch/x86/include/asm/pda.h137
-rw-r--r--arch/x86/include/asm/percpu.h169
-rw-r--r--arch/x86/include/asm/pgtable-2level.h2
-rw-r--r--arch/x86/include/asm/pgtable-3level.h35
-rw-r--r--arch/x86/include/asm/pgtable.h226
-rw-r--r--arch/x86/include/asm/pgtable_32.h46
-rw-r--r--arch/x86/include/asm/pgtable_64.h65
-rw-r--r--arch/x86/include/asm/prctl.h4
-rw-r--r--arch/x86/include/asm/processor.h28
-rw-r--r--arch/x86/include/asm/proto.h4
-rw-r--r--arch/x86/include/asm/ptrace.h4
-rw-r--r--arch/x86/include/asm/rdc321x_defs.h (renamed from arch/x86/include/asm/mach-rdc321x/rdc321x_defs.h)0
-rw-r--r--arch/x86/include/asm/segment.h9
-rw-r--r--arch/x86/include/asm/setup.h9
-rw-r--r--arch/x86/include/asm/setup_arch.h (renamed from arch/x86/include/asm/mach-default/setup_arch.h)0
-rw-r--r--arch/x86/include/asm/smp.h69
-rw-r--r--arch/x86/include/asm/smpboot_hooks.h (renamed from arch/x86/include/asm/mach-default/smpboot_hooks.h)6
-rw-r--r--arch/x86/include/asm/spinlock.h69
-rw-r--r--arch/x86/include/asm/stackprotector.h124
-rw-r--r--arch/x86/include/asm/summit/apic.h202
-rw-r--r--arch/x86/include/asm/summit/apicdef.h13
-rw-r--r--arch/x86/include/asm/summit/ipi.h26
-rw-r--r--arch/x86/include/asm/summit/mpparse.h109
-rw-r--r--arch/x86/include/asm/syscalls.h20
-rw-r--r--arch/x86/include/asm/system.h67
-rw-r--r--arch/x86/include/asm/thread_info.h21
-rw-r--r--arch/x86/include/asm/tlbflush.h17
-rw-r--r--arch/x86/include/asm/topology.h31
-rw-r--r--arch/x86/include/asm/trampoline.h1
-rw-r--r--arch/x86/include/asm/traps.h2
-rw-r--r--arch/x86/include/asm/uaccess.h138
-rw-r--r--arch/x86/include/asm/uv/uv.h36
-rw-r--r--arch/x86/include/asm/uv/uv_bau.h1
-rw-r--r--arch/x86/include/asm/voyager.h42
-rw-r--r--arch/x86/include/asm/xen/events.h6
-rw-r--r--arch/x86/kernel/Makefile25
-rw-r--r--arch/x86/kernel/acpi/boot.c168
-rw-r--r--arch/x86/kernel/acpi/sleep.c1
-rw-r--r--arch/x86/kernel/apic.c193
-rw-r--r--arch/x86/kernel/apm_32.c2
-rw-r--r--arch/x86/kernel/asm-offsets_32.c1
-rw-r--r--arch/x86/kernel/asm-offsets_64.c11
-rw-r--r--arch/x86/kernel/bigsmp_32.c266
-rw-r--r--arch/x86/kernel/cpu/addon_cpuid_features.c54
-rw-r--r--arch/x86/kernel/cpu/amd.c2
-rw-r--r--arch/x86/kernel/cpu/common.c251
-rw-r--r--arch/x86/kernel/cpu/intel.c14
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c63
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd_64.c21
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_intel_64.c1
-rw-r--r--arch/x86/kernel/crash.c4
-rw-r--r--arch/x86/kernel/dumpstack.c2
-rw-r--r--arch/x86/kernel/dumpstack_64.c35
-rw-r--r--arch/x86/kernel/early_printk.c2
-rw-r--r--arch/x86/kernel/efi.c2
-rw-r--r--arch/x86/kernel/efi_64.c1
-rw-r--r--arch/x86/kernel/entry_32.S451
-rw-r--r--arch/x86/kernel/entry_64.S47
-rw-r--r--arch/x86/kernel/es7000_32.c477
-rw-r--r--arch/x86/kernel/genapic_64.c24
-rw-r--r--arch/x86/kernel/genapic_flat_64.c176
-rw-r--r--arch/x86/kernel/genx2apic_cluster.c133
-rw-r--r--arch/x86/kernel/genx2apic_phys.c125
-rw-r--r--arch/x86/kernel/genx2apic_uv_x.c112
-rw-r--r--arch/x86/kernel/head64.c23
-rw-r--r--arch/x86/kernel/head_32.S40
-rw-r--r--arch/x86/kernel/head_64.S19
-rw-r--r--arch/x86/kernel/io_apic.c423
-rw-r--r--arch/x86/kernel/ioport.c3
-rw-r--r--arch/x86/kernel/ipi.c176
-rw-r--r--arch/x86/kernel/irq.c44
-rw-r--r--arch/x86/kernel/irq_32.c33
-rw-r--r--arch/x86/kernel/irq_64.c43
-rw-r--r--arch/x86/kernel/irqinit_32.c23
-rw-r--r--arch/x86/kernel/kgdb.c4
-rw-r--r--arch/x86/kernel/machine_kexec_64.c82
-rw-r--r--arch/x86/kernel/microcode_intel.c10
-rw-r--r--arch/x86/kernel/module_32.c6
-rw-r--r--arch/x86/kernel/module_64.c32
-rw-r--r--arch/x86/kernel/mpparse.c178
-rw-r--r--arch/x86/kernel/msr.c2
-rw-r--r--arch/x86/kernel/nmi.c12
-rw-r--r--arch/x86/kernel/numaq_32.c307
-rw-r--r--arch/x86/kernel/paravirt-spinlocks.c10
-rw-r--r--arch/x86/kernel/paravirt.c55
-rw-r--r--arch/x86/kernel/paravirt_patch_32.c12
-rw-r--r--arch/x86/kernel/paravirt_patch_64.c15
-rw-r--r--arch/x86/kernel/probe_32.c411
-rw-r--r--arch/x86/kernel/probe_roms_32.c2
-rw-r--r--arch/x86/kernel/process.c2
-rw-r--r--arch/x86/kernel/process_32.c59
-rw-r--r--arch/x86/kernel/process_64.c42
-rw-r--r--arch/x86/kernel/ptrace.c19
-rw-r--r--arch/x86/kernel/reboot.c5
-rw-r--r--arch/x86/kernel/relocate_kernel_64.S125
-rw-r--r--arch/x86/kernel/setup.c32
-rw-r--r--arch/x86/kernel/setup_percpu.c414
-rw-r--r--arch/x86/kernel/signal.c346
-rw-r--r--arch/x86/kernel/smp.c15
-rw-r--r--arch/x86/kernel/smpboot.c117
-rw-r--r--arch/x86/kernel/smpcommon.c30
-rw-r--r--arch/x86/kernel/stacktrace.c2
-rw-r--r--arch/x86/kernel/summit_32.c416
-rw-r--r--arch/x86/kernel/syscall_table_32.S20
-rw-r--r--arch/x86/kernel/time_32.c2
-rw-r--r--arch/x86/kernel/tlb_32.c256
-rw-r--r--arch/x86/kernel/tlb_uv.c72
-rw-r--r--arch/x86/kernel/trampoline_64.S19
-rw-r--r--arch/x86/kernel/traps.c12
-rw-r--r--arch/x86/kernel/tsc.c2
-rw-r--r--arch/x86/kernel/visws_quirks.c6
-rw-r--r--arch/x86/kernel/vm86_32.c20
-rw-r--r--arch/x86/kernel/vmi_32.c9
-rw-r--r--arch/x86/kernel/vmiclock_32.c2
-rw-r--r--arch/x86/kernel/vmlinux_32.lds.S9
-rw-r--r--arch/x86/kernel/vmlinux_64.lds.S35
-rw-r--r--arch/x86/kernel/vsmp_64.c12
-rw-r--r--arch/x86/kernel/x8664_ksyms_64.c2
-rw-r--r--arch/x86/lguest/boot.c15
-rw-r--r--arch/x86/mach-default/Makefile5
-rw-r--r--arch/x86/mach-default/setup.c174
-rw-r--r--arch/x86/mach-generic/Makefile11
-rw-r--r--arch/x86/mach-generic/bigsmp.c60
-rw-r--r--arch/x86/mach-generic/default.c27
-rw-r--r--arch/x86/mach-generic/es7000.c103
-rw-r--r--arch/x86/mach-generic/numaq.c53
-rw-r--r--arch/x86/mach-generic/probe.c152
-rw-r--r--arch/x86/mach-generic/summit.c40
-rw-r--r--arch/x86/mach-rdc321x/Makefile5
-rw-r--r--arch/x86/mach-rdc321x/gpio.c194
-rw-r--r--arch/x86/mach-rdc321x/platform.c69
-rw-r--r--arch/x86/mach-voyager/setup.c1
-rw-r--r--arch/x86/mach-voyager/voyager_smp.c9
-rw-r--r--arch/x86/math-emu/get_address.c6
-rw-r--r--arch/x86/mm/Makefile2
-rw-r--r--arch/x86/mm/extable.c6
-rw-r--r--arch/x86/mm/fault.c447
-rw-r--r--arch/x86/mm/init_32.c135
-rw-r--r--arch/x86/mm/ioremap.c2
-rw-r--r--arch/x86/mm/mmap.c2
-rw-r--r--arch/x86/mm/numa_64.c217
-rw-r--r--arch/x86/mm/pat.c31
-rw-r--r--arch/x86/mm/srat_64.c1
-rw-r--r--arch/x86/mm/tlb.c (renamed from arch/x86/kernel/tlb_64.c)124
-rw-r--r--arch/x86/pci/numaq_32.c6
-rw-r--r--arch/x86/pci/pcbios.c2
-rw-r--r--arch/x86/vdso/Makefile2
-rw-r--r--arch/x86/xen/Makefile3
-rw-r--r--arch/x86/xen/enlighten.c789
-rw-r--r--arch/x86/xen/irq.c39
-rw-r--r--arch/x86/xen/mmu.c753
-rw-r--r--arch/x86/xen/mmu.h3
-rw-r--r--arch/x86/xen/multicalls.h2
-rw-r--r--arch/x86/xen/smp.c41
-rw-r--r--arch/x86/xen/suspend.c1
-rw-r--r--arch/x86/xen/xen-asm.S142
-rw-r--r--arch/x86/xen/xen-asm.h12
-rw-r--r--arch/x86/xen/xen-asm_32.S343
-rw-r--r--arch/x86/xen/xen-asm_64.S252
-rw-r--r--arch/x86/xen/xen-ops.h10
240 files changed, 8576 insertions, 8867 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 9c39095b33fc..1042d69b267d 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -5,7 +5,7 @@ mainmenu "Linux Kernel Configuration for x86"
5config 64BIT 5config 64BIT
6 bool "64-bit kernel" if ARCH = "x86" 6 bool "64-bit kernel" if ARCH = "x86"
7 default ARCH = "x86_64" 7 default ARCH = "x86_64"
8 help 8 ---help---
9 Say yes to build a 64-bit kernel - formerly known as x86_64 9 Say yes to build a 64-bit kernel - formerly known as x86_64
10 Say no to build a 32-bit kernel - formerly known as i386 10 Say no to build a 32-bit kernel - formerly known as i386
11 11
@@ -34,8 +34,8 @@ config X86
34 select HAVE_FUNCTION_TRACER 34 select HAVE_FUNCTION_TRACER
35 select HAVE_FUNCTION_GRAPH_TRACER 35 select HAVE_FUNCTION_GRAPH_TRACER
36 select HAVE_FUNCTION_TRACE_MCOUNT_TEST 36 select HAVE_FUNCTION_TRACE_MCOUNT_TEST
37 select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64) 37 select HAVE_KVM
38 select HAVE_ARCH_KGDB if !X86_VOYAGER 38 select HAVE_ARCH_KGDB
39 select HAVE_ARCH_TRACEHOOK 39 select HAVE_ARCH_TRACEHOOK
40 select HAVE_GENERIC_DMA_COHERENT if X86_32 40 select HAVE_GENERIC_DMA_COHERENT if X86_32
41 select HAVE_EFFICIENT_UNALIGNED_ACCESS 41 select HAVE_EFFICIENT_UNALIGNED_ACCESS
@@ -133,18 +133,16 @@ config ARCH_HAS_CACHE_LINE_SIZE
133 def_bool y 133 def_bool y
134 134
135config HAVE_SETUP_PER_CPU_AREA 135config HAVE_SETUP_PER_CPU_AREA
136 def_bool X86_64_SMP || (X86_SMP && !X86_VOYAGER) 136 def_bool y
137 137
138config HAVE_CPUMASK_OF_CPU_MAP 138config HAVE_CPUMASK_OF_CPU_MAP
139 def_bool X86_64_SMP 139 def_bool X86_64_SMP
140 140
141config ARCH_HIBERNATION_POSSIBLE 141config ARCH_HIBERNATION_POSSIBLE
142 def_bool y 142 def_bool y
143 depends on !SMP || !X86_VOYAGER
144 143
145config ARCH_SUSPEND_POSSIBLE 144config ARCH_SUSPEND_POSSIBLE
146 def_bool y 145 def_bool y
147 depends on !X86_VOYAGER
148 146
149config ZONE_DMA32 147config ZONE_DMA32
150 bool 148 bool
@@ -174,11 +172,6 @@ config GENERIC_PENDING_IRQ
174 depends on GENERIC_HARDIRQS && SMP 172 depends on GENERIC_HARDIRQS && SMP
175 default y 173 default y
176 174
177config X86_SMP
178 bool
179 depends on SMP && ((X86_32 && !X86_VOYAGER) || X86_64)
180 default y
181
182config USE_GENERIC_SMP_HELPERS 175config USE_GENERIC_SMP_HELPERS
183 def_bool y 176 def_bool y
184 depends on SMP 177 depends on SMP
@@ -194,19 +187,17 @@ config X86_64_SMP
194config X86_HT 187config X86_HT
195 bool 188 bool
196 depends on SMP 189 depends on SMP
197 depends on (X86_32 && !X86_VOYAGER) || X86_64
198 default y
199
200config X86_BIOS_REBOOT
201 bool
202 depends on !X86_VOYAGER
203 default y 190 default y
204 191
205config X86_TRAMPOLINE 192config X86_TRAMPOLINE
206 bool 193 bool
207 depends on X86_SMP || (X86_VOYAGER && SMP) || (64BIT && ACPI_SLEEP) 194 depends on SMP || (64BIT && ACPI_SLEEP)
208 default y 195 default y
209 196
197config X86_32_LAZY_GS
198 def_bool y
199 depends on X86_32 && !CC_STACKPROTECTOR
200
210config KTIME_SCALAR 201config KTIME_SCALAR
211 def_bool X86_32 202 def_bool X86_32
212source "init/Kconfig" 203source "init/Kconfig"
@@ -244,14 +235,10 @@ config SMP
244 235
245 If you don't know what to do here, say N. 236 If you don't know what to do here, say N.
246 237
247config X86_HAS_BOOT_CPU_ID
248 def_bool y
249 depends on X86_VOYAGER
250
251config SPARSE_IRQ 238config SPARSE_IRQ
252 bool "Support sparse irq numbering" 239 bool "Support sparse irq numbering"
253 depends on PCI_MSI || HT_IRQ 240 depends on PCI_MSI || HT_IRQ
254 help 241 ---help---
255 This enables support for sparse irqs. This is useful for distro 242 This enables support for sparse irqs. This is useful for distro
256 kernels that want to define a high CONFIG_NR_CPUS value but still 243 kernels that want to define a high CONFIG_NR_CPUS value but still
257 want to have low kernel memory footprint on smaller machines. 244 want to have low kernel memory footprint on smaller machines.
@@ -265,137 +252,154 @@ config NUMA_MIGRATE_IRQ_DESC
265 bool "Move irq desc when changing irq smp_affinity" 252 bool "Move irq desc when changing irq smp_affinity"
266 depends on SPARSE_IRQ && NUMA 253 depends on SPARSE_IRQ && NUMA
267 default n 254 default n
268 help 255 ---help---
269 This enables moving irq_desc to cpu/node that irq will use handled. 256 This enables moving irq_desc to cpu/node that irq will use handled.
270 257
271 If you don't know what to do here, say N. 258 If you don't know what to do here, say N.
272 259
273config X86_FIND_SMP_CONFIG
274 def_bool y
275 depends on X86_MPPARSE || X86_VOYAGER
276
277config X86_MPPARSE 260config X86_MPPARSE
278 bool "Enable MPS table" if ACPI 261 bool "Enable MPS table" if ACPI
279 default y 262 default y
280 depends on X86_LOCAL_APIC 263 depends on X86_LOCAL_APIC
281 help 264 ---help---
282 For old smp systems that do not have proper acpi support. Newer systems 265 For old smp systems that do not have proper acpi support. Newer systems
283 (esp with 64bit cpus) with acpi support, MADT and DSDT will override it 266 (esp with 64bit cpus) with acpi support, MADT and DSDT will override it
284 267
285choice 268config X86_BIGSMP
286 prompt "Subarchitecture Type" 269 bool "Support for big SMP systems with more than 8 CPUs"
287 default X86_PC 270 depends on X86_32 && SMP
271 ---help---
272 This option is needed for the systems that have more than 8 CPUs
288 273
289config X86_PC 274config X86_EXTENDED_PLATFORM
290 bool "PC-compatible" 275 bool "Support for extended (non-PC) x86 platforms"
291 help 276 default y
292 Choose this option if your computer is a standard PC or compatible. 277 ---help---
278 If you disable this option then the kernel will only support
279 standard PC platforms. (which covers the vast majority of
280 systems out there.)
281
282 If you enable this option then you'll be able to select a number
283 of non-PC x86 platforms.
284
285 If you have one of these systems, or if you want to build a
286 generic distribution kernel, say Y here - otherwise say N.
287
288# This is an alphabetically sorted list of 64 bit extended platforms
289# Please maintain the alphabetic order if and when there are additions
290
291config X86_VSMP
292 bool "ScaleMP vSMP"
293 select PARAVIRT
294 depends on X86_64 && PCI
295 depends on X86_EXTENDED_PLATFORM
296 ---help---
297 Support for ScaleMP vSMP systems. Say 'Y' here if this kernel is
298 supposed to run on these EM64T-based machines. Only choose this option
299 if you have one of these machines.
300
301config X86_UV
302 bool "SGI Ultraviolet"
303 depends on X86_64
304 depends on X86_EXTENDED_PLATFORM
305 ---help---
306 This option is needed in order to support SGI Ultraviolet systems.
307 If you don't have one of these, you should say N here.
308
309# Following is an alphabetically sorted list of 32 bit extended platforms
310# Please maintain the alphabetic order if and when there are additions
293 311
294config X86_ELAN 312config X86_ELAN
295 bool "AMD Elan" 313 bool "AMD Elan"
296 depends on X86_32 314 depends on X86_32
297 help 315 depends on X86_EXTENDED_PLATFORM
316 ---help---
298 Select this for an AMD Elan processor. 317 Select this for an AMD Elan processor.
299 318
300 Do not use this option for K6/Athlon/Opteron processors! 319 Do not use this option for K6/Athlon/Opteron processors!
301 320
302 If unsure, choose "PC-compatible" instead. 321 If unsure, choose "PC-compatible" instead.
303 322
304config X86_VOYAGER 323config X86_RDC321X
305 bool "Voyager (NCR)" 324 bool "RDC R-321x SoC"
306 depends on X86_32 && (SMP || BROKEN) && !PCI
307 help
308 Voyager is an MCA-based 32-way capable SMP architecture proprietary
309 to NCR Corp. Machine classes 345x/35xx/4100/51xx are Voyager-based.
310
311 *** WARNING ***
312
313 If you do not specifically know you have a Voyager based machine,
314 say N here, otherwise the kernel you build will not be bootable.
315
316config X86_GENERICARCH
317 bool "Generic architecture"
318 depends on X86_32 325 depends on X86_32
319 help 326 depends on X86_EXTENDED_PLATFORM
320 This option compiles in the NUMAQ, Summit, bigsmp, ES7000, default 327 select M486
328 select X86_REBOOTFIXUPS
329 ---help---
330 This option is needed for RDC R-321x system-on-chip, also known
331 as R-8610-(G).
332 If you don't have one of these chips, you should say N here.
333
334config X86_32_NON_STANDARD
335 bool "Support non-standard 32-bit SMP architectures"
336 depends on X86_32 && SMP
337 depends on X86_EXTENDED_PLATFORM
338 ---help---
339 This option compiles in the NUMAQ, Summit, bigsmp, ES7000, default
321 subarchitectures. It is intended for a generic binary kernel. 340 subarchitectures. It is intended for a generic binary kernel.
322 if you select them all, kernel will probe it one by one. and will 341 if you select them all, kernel will probe it one by one. and will
323 fallback to default. 342 fallback to default.
324 343
325if X86_GENERICARCH 344# Alphabetically sorted list of Non standard 32 bit platforms
326 345
327config X86_NUMAQ 346config X86_NUMAQ
328 bool "NUMAQ (IBM/Sequent)" 347 bool "NUMAQ (IBM/Sequent)"
329 depends on SMP && X86_32 && PCI && X86_MPPARSE 348 depends on X86_32_NON_STANDARD
330 select NUMA 349 select NUMA
331 help 350 select X86_MPPARSE
351 ---help---
332 This option is used for getting Linux to run on a NUMAQ (IBM/Sequent) 352 This option is used for getting Linux to run on a NUMAQ (IBM/Sequent)
333 NUMA multiquad box. This changes the way that processors are 353 NUMA multiquad box. This changes the way that processors are
334 bootstrapped, and uses Clustered Logical APIC addressing mode instead 354 bootstrapped, and uses Clustered Logical APIC addressing mode instead
335 of Flat Logical. You will need a new lynxer.elf file to flash your 355 of Flat Logical. You will need a new lynxer.elf file to flash your
336 firmware with - send email to <Martin.Bligh@us.ibm.com>. 356 firmware with - send email to <Martin.Bligh@us.ibm.com>.
337 357
358config X86_VISWS
359 bool "SGI 320/540 (Visual Workstation)"
360 depends on X86_32 && PCI && X86_MPPARSE && PCI_GODIRECT
361 depends on X86_32_NON_STANDARD
362 ---help---
363 The SGI Visual Workstation series is an IA32-based workstation
364 based on SGI systems chips with some legacy PC hardware attached.
365
366 Say Y here to create a kernel to run on the SGI 320 or 540.
367
368 A kernel compiled for the Visual Workstation will run on general
369 PCs as well. See <file:Documentation/sgi-visws.txt> for details.
370
338config X86_SUMMIT 371config X86_SUMMIT
339 bool "Summit/EXA (IBM x440)" 372 bool "Summit/EXA (IBM x440)"
340 depends on X86_32 && SMP 373 depends on X86_32_NON_STANDARD
341 help 374 ---help---
342 This option is needed for IBM systems that use the Summit/EXA chipset. 375 This option is needed for IBM systems that use the Summit/EXA chipset.
343 In particular, it is needed for the x440. 376 In particular, it is needed for the x440.
344 377
345config X86_ES7000 378config X86_ES7000
346 bool "Support for Unisys ES7000 IA32 series" 379 bool "Unisys ES7000 IA32 series"
347 depends on X86_32 && SMP 380 depends on X86_32_NON_STANDARD && X86_BIGSMP
348 help 381 ---help---
349 Support for Unisys ES7000 systems. Say 'Y' here if this kernel is 382 Support for Unisys ES7000 systems. Say 'Y' here if this kernel is
350 supposed to run on an IA32-based Unisys ES7000 system. 383 supposed to run on an IA32-based Unisys ES7000 system.
351 384
352config X86_BIGSMP 385config X86_VOYAGER
353 bool "Support for big SMP systems with more than 8 CPUs" 386 bool "Voyager (NCR)"
354 depends on X86_32 && SMP 387 depends on SMP && !PCI && BROKEN
355 help 388 depends on X86_32_NON_STANDARD
356 This option is needed for the systems that have more than 8 CPUs 389 ---help---
357 and if the system is not of any sub-arch type above. 390 Voyager is an MCA-based 32-way capable SMP architecture proprietary
358 391 to NCR Corp. Machine classes 345x/35xx/4100/51xx are Voyager-based.
359endif
360
361config X86_VSMP
362 bool "Support for ScaleMP vSMP"
363 select PARAVIRT
364 depends on X86_64 && PCI
365 help
366 Support for ScaleMP vSMP systems. Say 'Y' here if this kernel is
367 supposed to run on these EM64T-based machines. Only choose this option
368 if you have one of these machines.
369
370endchoice
371
372config X86_VISWS
373 bool "SGI 320/540 (Visual Workstation)"
374 depends on X86_32 && PCI && !X86_VOYAGER && X86_MPPARSE && PCI_GODIRECT
375 help
376 The SGI Visual Workstation series is an IA32-based workstation
377 based on SGI systems chips with some legacy PC hardware attached.
378
379 Say Y here to create a kernel to run on the SGI 320 or 540.
380 392
381 A kernel compiled for the Visual Workstation will run on general 393 *** WARNING ***
382 PCs as well. See <file:Documentation/sgi-visws.txt> for details.
383 394
384config X86_RDC321X 395 If you do not specifically know you have a Voyager based machine,
385 bool "RDC R-321x SoC" 396 say N here, otherwise the kernel you build will not be bootable.
386 depends on X86_32
387 select M486
388 select X86_REBOOTFIXUPS
389 help
390 This option is needed for RDC R-321x system-on-chip, also known
391 as R-8610-(G).
392 If you don't have one of these chips, you should say N here.
393 397
394config SCHED_OMIT_FRAME_POINTER 398config SCHED_OMIT_FRAME_POINTER
395 def_bool y 399 def_bool y
396 prompt "Single-depth WCHAN output" 400 prompt "Single-depth WCHAN output"
397 depends on X86 401 depends on X86
398 help 402 ---help---
399 Calculate simpler /proc/<PID>/wchan values. If this option 403 Calculate simpler /proc/<PID>/wchan values. If this option
400 is disabled then wchan values will recurse back to the 404 is disabled then wchan values will recurse back to the
401 caller function. This provides more accurate wchan values, 405 caller function. This provides more accurate wchan values,
@@ -405,7 +409,7 @@ config SCHED_OMIT_FRAME_POINTER
405 409
406menuconfig PARAVIRT_GUEST 410menuconfig PARAVIRT_GUEST
407 bool "Paravirtualized guest support" 411 bool "Paravirtualized guest support"
408 help 412 ---help---
409 Say Y here to get to see options related to running Linux under 413 Say Y here to get to see options related to running Linux under
410 various hypervisors. This option alone does not add any kernel code. 414 various hypervisors. This option alone does not add any kernel code.
411 415
@@ -419,8 +423,7 @@ config VMI
419 bool "VMI Guest support" 423 bool "VMI Guest support"
420 select PARAVIRT 424 select PARAVIRT
421 depends on X86_32 425 depends on X86_32
422 depends on !X86_VOYAGER 426 ---help---
423 help
424 VMI provides a paravirtualized interface to the VMware ESX server 427 VMI provides a paravirtualized interface to the VMware ESX server
425 (it could be used by other hypervisors in theory too, but is not 428 (it could be used by other hypervisors in theory too, but is not
426 at the moment), by linking the kernel to a GPL-ed ROM module 429 at the moment), by linking the kernel to a GPL-ed ROM module
@@ -430,8 +433,7 @@ config KVM_CLOCK
430 bool "KVM paravirtualized clock" 433 bool "KVM paravirtualized clock"
431 select PARAVIRT 434 select PARAVIRT
432 select PARAVIRT_CLOCK 435 select PARAVIRT_CLOCK
433 depends on !X86_VOYAGER 436 ---help---
434 help
435 Turning on this option will allow you to run a paravirtualized clock 437 Turning on this option will allow you to run a paravirtualized clock
436 when running over the KVM hypervisor. Instead of relying on a PIT 438 when running over the KVM hypervisor. Instead of relying on a PIT
437 (or probably other) emulation by the underlying device model, the host 439 (or probably other) emulation by the underlying device model, the host
@@ -441,17 +443,15 @@ config KVM_CLOCK
441config KVM_GUEST 443config KVM_GUEST
442 bool "KVM Guest support" 444 bool "KVM Guest support"
443 select PARAVIRT 445 select PARAVIRT
444 depends on !X86_VOYAGER 446 ---help---
445 help 447 This option enables various optimizations for running under the KVM
446 This option enables various optimizations for running under the KVM 448 hypervisor.
447 hypervisor.
448 449
449source "arch/x86/lguest/Kconfig" 450source "arch/x86/lguest/Kconfig"
450 451
451config PARAVIRT 452config PARAVIRT
452 bool "Enable paravirtualization code" 453 bool "Enable paravirtualization code"
453 depends on !X86_VOYAGER 454 ---help---
454 help
455 This changes the kernel so it can modify itself when it is run 455 This changes the kernel so it can modify itself when it is run
456 under a hypervisor, potentially improving performance significantly 456 under a hypervisor, potentially improving performance significantly
457 over full virtualization. However, when run without a hypervisor 457 over full virtualization. However, when run without a hypervisor
@@ -464,51 +464,51 @@ config PARAVIRT_CLOCK
464endif 464endif
465 465
466config PARAVIRT_DEBUG 466config PARAVIRT_DEBUG
467 bool "paravirt-ops debugging" 467 bool "paravirt-ops debugging"
468 depends on PARAVIRT && DEBUG_KERNEL 468 depends on PARAVIRT && DEBUG_KERNEL
469 help 469 ---help---
470 Enable to debug paravirt_ops internals. Specifically, BUG if 470 Enable to debug paravirt_ops internals. Specifically, BUG if
471 a paravirt_op is missing when it is called. 471 a paravirt_op is missing when it is called.
472 472
473config MEMTEST 473config MEMTEST
474 bool "Memtest" 474 bool "Memtest"
475 help 475 ---help---
476 This option adds a kernel parameter 'memtest', which allows memtest 476 This option adds a kernel parameter 'memtest', which allows memtest
477 to be set. 477 to be set.
478 memtest=0, mean disabled; -- default 478 memtest=0, mean disabled; -- default
479 memtest=1, mean do 1 test pattern; 479 memtest=1, mean do 1 test pattern;
480 ... 480 ...
481 memtest=4, mean do 4 test patterns. 481 memtest=4, mean do 4 test patterns.
482 If you are unsure how to answer this question, answer N. 482 If you are unsure how to answer this question, answer N.
483 483
484config X86_SUMMIT_NUMA 484config X86_SUMMIT_NUMA
485 def_bool y 485 def_bool y
486 depends on X86_32 && NUMA && X86_GENERICARCH 486 depends on X86_32 && NUMA && X86_32_NON_STANDARD
487 487
488config X86_CYCLONE_TIMER 488config X86_CYCLONE_TIMER
489 def_bool y 489 def_bool y
490 depends on X86_GENERICARCH 490 depends on X86_32_NON_STANDARD
491 491
492source "arch/x86/Kconfig.cpu" 492source "arch/x86/Kconfig.cpu"
493 493
494config HPET_TIMER 494config HPET_TIMER
495 def_bool X86_64 495 def_bool X86_64
496 prompt "HPET Timer Support" if X86_32 496 prompt "HPET Timer Support" if X86_32
497 help 497 ---help---
498 Use the IA-PC HPET (High Precision Event Timer) to manage 498 Use the IA-PC HPET (High Precision Event Timer) to manage
499 time in preference to the PIT and RTC, if a HPET is 499 time in preference to the PIT and RTC, if a HPET is
500 present. 500 present.
501 HPET is the next generation timer replacing legacy 8254s. 501 HPET is the next generation timer replacing legacy 8254s.
502 The HPET provides a stable time base on SMP 502 The HPET provides a stable time base on SMP
503 systems, unlike the TSC, but it is more expensive to access, 503 systems, unlike the TSC, but it is more expensive to access,
504 as it is off-chip. You can find the HPET spec at 504 as it is off-chip. You can find the HPET spec at
505 <http://www.intel.com/hardwaredesign/hpetspec_1.pdf>. 505 <http://www.intel.com/hardwaredesign/hpetspec_1.pdf>.
506 506
507 You can safely choose Y here. However, HPET will only be 507 You can safely choose Y here. However, HPET will only be
508 activated if the platform and the BIOS support this feature. 508 activated if the platform and the BIOS support this feature.
509 Otherwise the 8254 will be used for timing services. 509 Otherwise the 8254 will be used for timing services.
510 510
511 Choose N to continue using the legacy 8254 timer. 511 Choose N to continue using the legacy 8254 timer.
512 512
513config HPET_EMULATE_RTC 513config HPET_EMULATE_RTC
514 def_bool y 514 def_bool y
@@ -519,7 +519,7 @@ config HPET_EMULATE_RTC
519config DMI 519config DMI
520 default y 520 default y
521 bool "Enable DMI scanning" if EMBEDDED 521 bool "Enable DMI scanning" if EMBEDDED
522 help 522 ---help---
523 Enabled scanning of DMI to identify machine quirks. Say Y 523 Enabled scanning of DMI to identify machine quirks. Say Y
524 here unless you have verified that your setup is not 524 here unless you have verified that your setup is not
525 affected by entries in the DMI blacklist. Required by PNP 525 affected by entries in the DMI blacklist. Required by PNP
@@ -531,7 +531,7 @@ config GART_IOMMU
531 select SWIOTLB 531 select SWIOTLB
532 select AGP 532 select AGP
533 depends on X86_64 && PCI 533 depends on X86_64 && PCI
534 help 534 ---help---
535 Support for full DMA access of devices with 32bit memory access only 535 Support for full DMA access of devices with 32bit memory access only
536 on systems with more than 3GB. This is usually needed for USB, 536 on systems with more than 3GB. This is usually needed for USB,
537 sound, many IDE/SATA chipsets and some other devices. 537 sound, many IDE/SATA chipsets and some other devices.
@@ -546,7 +546,7 @@ config CALGARY_IOMMU
546 bool "IBM Calgary IOMMU support" 546 bool "IBM Calgary IOMMU support"
547 select SWIOTLB 547 select SWIOTLB
548 depends on X86_64 && PCI && EXPERIMENTAL 548 depends on X86_64 && PCI && EXPERIMENTAL
549 help 549 ---help---
550 Support for hardware IOMMUs in IBM's xSeries x366 and x460 550 Support for hardware IOMMUs in IBM's xSeries x366 and x460
551 systems. Needed to run systems with more than 3GB of memory 551 systems. Needed to run systems with more than 3GB of memory
552 properly with 32-bit PCI devices that do not support DAC 552 properly with 32-bit PCI devices that do not support DAC
@@ -564,7 +564,7 @@ config CALGARY_IOMMU_ENABLED_BY_DEFAULT
564 def_bool y 564 def_bool y
565 prompt "Should Calgary be enabled by default?" 565 prompt "Should Calgary be enabled by default?"
566 depends on CALGARY_IOMMU 566 depends on CALGARY_IOMMU
567 help 567 ---help---
568 Should Calgary be enabled by default? if you choose 'y', Calgary 568 Should Calgary be enabled by default? if you choose 'y', Calgary
569 will be used (if it exists). If you choose 'n', Calgary will not be 569 will be used (if it exists). If you choose 'n', Calgary will not be
570 used even if it exists. If you choose 'n' and would like to use 570 used even if it exists. If you choose 'n' and would like to use
@@ -576,7 +576,7 @@ config AMD_IOMMU
576 select SWIOTLB 576 select SWIOTLB
577 select PCI_MSI 577 select PCI_MSI
578 depends on X86_64 && PCI && ACPI 578 depends on X86_64 && PCI && ACPI
579 help 579 ---help---
580 With this option you can enable support for AMD IOMMU hardware in 580 With this option you can enable support for AMD IOMMU hardware in
581 your system. An IOMMU is a hardware component which provides 581 your system. An IOMMU is a hardware component which provides
582 remapping of DMA memory accesses from devices. With an AMD IOMMU you 582 remapping of DMA memory accesses from devices. With an AMD IOMMU you
@@ -591,7 +591,7 @@ config AMD_IOMMU_STATS
591 bool "Export AMD IOMMU statistics to debugfs" 591 bool "Export AMD IOMMU statistics to debugfs"
592 depends on AMD_IOMMU 592 depends on AMD_IOMMU
593 select DEBUG_FS 593 select DEBUG_FS
594 help 594 ---help---
595 This option enables code in the AMD IOMMU driver to collect various 595 This option enables code in the AMD IOMMU driver to collect various
596 statistics about whats happening in the driver and exports that 596 statistics about whats happening in the driver and exports that
597 information to userspace via debugfs. 597 information to userspace via debugfs.
@@ -600,7 +600,7 @@ config AMD_IOMMU_STATS
600# need this always selected by IOMMU for the VIA workaround 600# need this always selected by IOMMU for the VIA workaround
601config SWIOTLB 601config SWIOTLB
602 def_bool y if X86_64 602 def_bool y if X86_64
603 help 603 ---help---
604 Support for software bounce buffers used on x86-64 systems 604 Support for software bounce buffers used on x86-64 systems
605 which don't have a hardware IOMMU (e.g. the current generation 605 which don't have a hardware IOMMU (e.g. the current generation
606 of Intel's x86-64 CPUs). Using this PCI devices which can only 606 of Intel's x86-64 CPUs). Using this PCI devices which can only
@@ -618,7 +618,7 @@ config MAXSMP
618 depends on X86_64 && SMP && DEBUG_KERNEL && EXPERIMENTAL 618 depends on X86_64 && SMP && DEBUG_KERNEL && EXPERIMENTAL
619 select CPUMASK_OFFSTACK 619 select CPUMASK_OFFSTACK
620 default n 620 default n
621 help 621 ---help---
622 Configure maximum number of CPUS and NUMA Nodes for this architecture. 622 Configure maximum number of CPUS and NUMA Nodes for this architecture.
623 If unsure, say N. 623 If unsure, say N.
624 624
@@ -629,7 +629,7 @@ config NR_CPUS
629 default "4096" if MAXSMP 629 default "4096" if MAXSMP
630 default "32" if SMP && (X86_NUMAQ || X86_SUMMIT || X86_BIGSMP || X86_ES7000) 630 default "32" if SMP && (X86_NUMAQ || X86_SUMMIT || X86_BIGSMP || X86_ES7000)
631 default "8" if SMP 631 default "8" if SMP
632 help 632 ---help---
633 This allows you to specify the maximum number of CPUs which this 633 This allows you to specify the maximum number of CPUs which this
634 kernel will support. The maximum supported value is 512 and the 634 kernel will support. The maximum supported value is 512 and the
635 minimum value which makes sense is 2. 635 minimum value which makes sense is 2.
@@ -640,7 +640,7 @@ config NR_CPUS
640config SCHED_SMT 640config SCHED_SMT
641 bool "SMT (Hyperthreading) scheduler support" 641 bool "SMT (Hyperthreading) scheduler support"
642 depends on X86_HT 642 depends on X86_HT
643 help 643 ---help---
644 SMT scheduler support improves the CPU scheduler's decision making 644 SMT scheduler support improves the CPU scheduler's decision making
645 when dealing with Intel Pentium 4 chips with HyperThreading at a 645 when dealing with Intel Pentium 4 chips with HyperThreading at a
646 cost of slightly increased overhead in some places. If unsure say 646 cost of slightly increased overhead in some places. If unsure say
@@ -650,7 +650,7 @@ config SCHED_MC
650 def_bool y 650 def_bool y
651 prompt "Multi-core scheduler support" 651 prompt "Multi-core scheduler support"
652 depends on X86_HT 652 depends on X86_HT
653 help 653 ---help---
654 Multi-core scheduler support improves the CPU scheduler's decision 654 Multi-core scheduler support improves the CPU scheduler's decision
655 making when dealing with multi-core CPU chips at a cost of slightly 655 making when dealing with multi-core CPU chips at a cost of slightly
656 increased overhead in some places. If unsure say N here. 656 increased overhead in some places. If unsure say N here.
@@ -659,8 +659,8 @@ source "kernel/Kconfig.preempt"
659 659
660config X86_UP_APIC 660config X86_UP_APIC
661 bool "Local APIC support on uniprocessors" 661 bool "Local APIC support on uniprocessors"
662 depends on X86_32 && !SMP && !(X86_VOYAGER || X86_GENERICARCH) 662 depends on X86_32 && !SMP && !X86_32_NON_STANDARD
663 help 663 ---help---
664 A local APIC (Advanced Programmable Interrupt Controller) is an 664 A local APIC (Advanced Programmable Interrupt Controller) is an
665 integrated interrupt controller in the CPU. If you have a single-CPU 665 integrated interrupt controller in the CPU. If you have a single-CPU
666 system which has a processor with a local APIC, you can say Y here to 666 system which has a processor with a local APIC, you can say Y here to
@@ -673,7 +673,7 @@ config X86_UP_APIC
673config X86_UP_IOAPIC 673config X86_UP_IOAPIC
674 bool "IO-APIC support on uniprocessors" 674 bool "IO-APIC support on uniprocessors"
675 depends on X86_UP_APIC 675 depends on X86_UP_APIC
676 help 676 ---help---
677 An IO-APIC (I/O Advanced Programmable Interrupt Controller) is an 677 An IO-APIC (I/O Advanced Programmable Interrupt Controller) is an
678 SMP-capable replacement for PC-style interrupt controllers. Most 678 SMP-capable replacement for PC-style interrupt controllers. Most
679 SMP systems and many recent uniprocessor systems have one. 679 SMP systems and many recent uniprocessor systems have one.
@@ -684,11 +684,11 @@ config X86_UP_IOAPIC
684 684
685config X86_LOCAL_APIC 685config X86_LOCAL_APIC
686 def_bool y 686 def_bool y
687 depends on X86_64 || (X86_32 && (X86_UP_APIC || (SMP && !X86_VOYAGER) || X86_GENERICARCH)) 687 depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC
688 688
689config X86_IO_APIC 689config X86_IO_APIC
690 def_bool y 690 def_bool y
691 depends on X86_64 || (X86_32 && (X86_UP_IOAPIC || (SMP && !X86_VOYAGER) || X86_GENERICARCH)) 691 depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC
692 692
693config X86_VISWS_APIC 693config X86_VISWS_APIC
694 def_bool y 694 def_bool y
@@ -698,7 +698,7 @@ config X86_REROUTE_FOR_BROKEN_BOOT_IRQS
698 bool "Reroute for broken boot IRQs" 698 bool "Reroute for broken boot IRQs"
699 default n 699 default n
700 depends on X86_IO_APIC 700 depends on X86_IO_APIC
701 help 701 ---help---
702 This option enables a workaround that fixes a source of 702 This option enables a workaround that fixes a source of
703 spurious interrupts. This is recommended when threaded 703 spurious interrupts. This is recommended when threaded
704 interrupt handling is used on systems where the generation of 704 interrupt handling is used on systems where the generation of
@@ -720,7 +720,6 @@ config X86_REROUTE_FOR_BROKEN_BOOT_IRQS
720 720
721config X86_MCE 721config X86_MCE
722 bool "Machine Check Exception" 722 bool "Machine Check Exception"
723 depends on !X86_VOYAGER
724 ---help--- 723 ---help---
725 Machine Check Exception support allows the processor to notify the 724 Machine Check Exception support allows the processor to notify the
726 kernel if it detects a problem (e.g. overheating, component failure). 725 kernel if it detects a problem (e.g. overheating, component failure).
@@ -739,7 +738,7 @@ config X86_MCE_INTEL
739 def_bool y 738 def_bool y
740 prompt "Intel MCE features" 739 prompt "Intel MCE features"
741 depends on X86_64 && X86_MCE && X86_LOCAL_APIC 740 depends on X86_64 && X86_MCE && X86_LOCAL_APIC
742 help 741 ---help---
743 Additional support for intel specific MCE features such as 742 Additional support for intel specific MCE features such as
744 the thermal monitor. 743 the thermal monitor.
745 744
@@ -747,14 +746,14 @@ config X86_MCE_AMD
747 def_bool y 746 def_bool y
748 prompt "AMD MCE features" 747 prompt "AMD MCE features"
749 depends on X86_64 && X86_MCE && X86_LOCAL_APIC 748 depends on X86_64 && X86_MCE && X86_LOCAL_APIC
750 help 749 ---help---
751 Additional support for AMD specific MCE features such as 750 Additional support for AMD specific MCE features such as
752 the DRAM Error Threshold. 751 the DRAM Error Threshold.
753 752
754config X86_MCE_NONFATAL 753config X86_MCE_NONFATAL
755 tristate "Check for non-fatal errors on AMD Athlon/Duron / Intel Pentium 4" 754 tristate "Check for non-fatal errors on AMD Athlon/Duron / Intel Pentium 4"
756 depends on X86_32 && X86_MCE 755 depends on X86_32 && X86_MCE
757 help 756 ---help---
758 Enabling this feature starts a timer that triggers every 5 seconds which 757 Enabling this feature starts a timer that triggers every 5 seconds which
759 will look at the machine check registers to see if anything happened. 758 will look at the machine check registers to see if anything happened.
760 Non-fatal problems automatically get corrected (but still logged). 759 Non-fatal problems automatically get corrected (but still logged).
@@ -767,7 +766,7 @@ config X86_MCE_NONFATAL
767config X86_MCE_P4THERMAL 766config X86_MCE_P4THERMAL
768 bool "check for P4 thermal throttling interrupt." 767 bool "check for P4 thermal throttling interrupt."
769 depends on X86_32 && X86_MCE && (X86_UP_APIC || SMP) 768 depends on X86_32 && X86_MCE && (X86_UP_APIC || SMP)
770 help 769 ---help---
771 Enabling this feature will cause a message to be printed when the P4 770 Enabling this feature will cause a message to be printed when the P4
772 enters thermal throttling. 771 enters thermal throttling.
773 772
@@ -775,11 +774,11 @@ config VM86
775 bool "Enable VM86 support" if EMBEDDED 774 bool "Enable VM86 support" if EMBEDDED
776 default y 775 default y
777 depends on X86_32 776 depends on X86_32
778 help 777 ---help---
779 This option is required by programs like DOSEMU to run 16-bit legacy 778 This option is required by programs like DOSEMU to run 16-bit legacy
780 code on X86 processors. It also may be needed by software like 779 code on X86 processors. It also may be needed by software like
781 XFree86 to initialize some video cards via BIOS. Disabling this 780 XFree86 to initialize some video cards via BIOS. Disabling this
782 option saves about 6k. 781 option saves about 6k.
783 782
784config TOSHIBA 783config TOSHIBA
785 tristate "Toshiba Laptop support" 784 tristate "Toshiba Laptop support"
@@ -853,33 +852,33 @@ config MICROCODE
853 module will be called microcode. 852 module will be called microcode.
854 853
855config MICROCODE_INTEL 854config MICROCODE_INTEL
856 bool "Intel microcode patch loading support" 855 bool "Intel microcode patch loading support"
857 depends on MICROCODE 856 depends on MICROCODE
858 default MICROCODE 857 default MICROCODE
859 select FW_LOADER 858 select FW_LOADER
860 --help--- 859 ---help---
861 This options enables microcode patch loading support for Intel 860 This options enables microcode patch loading support for Intel
862 processors. 861 processors.
863 862
864 For latest news and information on obtaining all the required 863 For latest news and information on obtaining all the required
865 Intel ingredients for this driver, check: 864 Intel ingredients for this driver, check:
866 <http://www.urbanmyth.org/microcode/>. 865 <http://www.urbanmyth.org/microcode/>.
867 866
868config MICROCODE_AMD 867config MICROCODE_AMD
869 bool "AMD microcode patch loading support" 868 bool "AMD microcode patch loading support"
870 depends on MICROCODE 869 depends on MICROCODE
871 select FW_LOADER 870 select FW_LOADER
872 --help--- 871 ---help---
873 If you select this option, microcode patch loading support for AMD 872 If you select this option, microcode patch loading support for AMD
874 processors will be enabled. 873 processors will be enabled.
875 874
876 config MICROCODE_OLD_INTERFACE 875config MICROCODE_OLD_INTERFACE
877 def_bool y 876 def_bool y
878 depends on MICROCODE 877 depends on MICROCODE
879 878
880config X86_MSR 879config X86_MSR
881 tristate "/dev/cpu/*/msr - Model-specific register support" 880 tristate "/dev/cpu/*/msr - Model-specific register support"
882 help 881 ---help---
883 This device gives privileged processes access to the x86 882 This device gives privileged processes access to the x86
884 Model-Specific Registers (MSRs). It is a character device with 883 Model-Specific Registers (MSRs). It is a character device with
885 major 202 and minors 0 to 31 for /dev/cpu/0/msr to /dev/cpu/31/msr. 884 major 202 and minors 0 to 31 for /dev/cpu/0/msr to /dev/cpu/31/msr.
@@ -888,7 +887,7 @@ config X86_MSR
888 887
889config X86_CPUID 888config X86_CPUID
890 tristate "/dev/cpu/*/cpuid - CPU information support" 889 tristate "/dev/cpu/*/cpuid - CPU information support"
891 help 890 ---help---
892 This device gives processes access to the x86 CPUID instruction to 891 This device gives processes access to the x86 CPUID instruction to
893 be executed on a specific processor. It is a character device 892 be executed on a specific processor. It is a character device
894 with major 203 and minors 0 to 31 for /dev/cpu/0/cpuid to 893 with major 203 and minors 0 to 31 for /dev/cpu/0/cpuid to
@@ -940,7 +939,7 @@ config NOHIGHMEM
940config HIGHMEM4G 939config HIGHMEM4G
941 bool "4GB" 940 bool "4GB"
942 depends on !X86_NUMAQ 941 depends on !X86_NUMAQ
943 help 942 ---help---
944 Select this if you have a 32-bit processor and between 1 and 4 943 Select this if you have a 32-bit processor and between 1 and 4
945 gigabytes of physical RAM. 944 gigabytes of physical RAM.
946 945
@@ -948,7 +947,7 @@ config HIGHMEM64G
948 bool "64GB" 947 bool "64GB"
949 depends on !M386 && !M486 948 depends on !M386 && !M486
950 select X86_PAE 949 select X86_PAE
951 help 950 ---help---
952 Select this if you have a 32-bit processor and more than 4 951 Select this if you have a 32-bit processor and more than 4
953 gigabytes of physical RAM. 952 gigabytes of physical RAM.
954 953
@@ -959,7 +958,7 @@ choice
959 prompt "Memory split" if EMBEDDED 958 prompt "Memory split" if EMBEDDED
960 default VMSPLIT_3G 959 default VMSPLIT_3G
961 depends on X86_32 960 depends on X86_32
962 help 961 ---help---
963 Select the desired split between kernel and user memory. 962 Select the desired split between kernel and user memory.
964 963
965 If the address range available to the kernel is less than the 964 If the address range available to the kernel is less than the
@@ -1005,20 +1004,20 @@ config HIGHMEM
1005config X86_PAE 1004config X86_PAE
1006 bool "PAE (Physical Address Extension) Support" 1005 bool "PAE (Physical Address Extension) Support"
1007 depends on X86_32 && !HIGHMEM4G 1006 depends on X86_32 && !HIGHMEM4G
1008 help 1007 ---help---
1009 PAE is required for NX support, and furthermore enables 1008 PAE is required for NX support, and furthermore enables
1010 larger swapspace support for non-overcommit purposes. It 1009 larger swapspace support for non-overcommit purposes. It
1011 has the cost of more pagetable lookup overhead, and also 1010 has the cost of more pagetable lookup overhead, and also
1012 consumes more pagetable space per process. 1011 consumes more pagetable space per process.
1013 1012
1014config ARCH_PHYS_ADDR_T_64BIT 1013config ARCH_PHYS_ADDR_T_64BIT
1015 def_bool X86_64 || X86_PAE 1014 def_bool X86_64 || X86_PAE
1016 1015
1017config DIRECT_GBPAGES 1016config DIRECT_GBPAGES
1018 bool "Enable 1GB pages for kernel pagetables" if EMBEDDED 1017 bool "Enable 1GB pages for kernel pagetables" if EMBEDDED
1019 default y 1018 default y
1020 depends on X86_64 1019 depends on X86_64
1021 help 1020 ---help---
1022 Allow the kernel linear mapping to use 1GB pages on CPUs that 1021 Allow the kernel linear mapping to use 1GB pages on CPUs that
1023 support it. This can improve the kernel's performance a tiny bit by 1022 support it. This can improve the kernel's performance a tiny bit by
1024 reducing TLB pressure. If in doubt, say "Y". 1023 reducing TLB pressure. If in doubt, say "Y".
@@ -1028,9 +1027,8 @@ config NUMA
1028 bool "Numa Memory Allocation and Scheduler Support" 1027 bool "Numa Memory Allocation and Scheduler Support"
1029 depends on SMP 1028 depends on SMP
1030 depends on X86_64 || (X86_32 && HIGHMEM64G && (X86_NUMAQ || X86_BIGSMP || X86_SUMMIT && ACPI) && EXPERIMENTAL) 1029 depends on X86_64 || (X86_32 && HIGHMEM64G && (X86_NUMAQ || X86_BIGSMP || X86_SUMMIT && ACPI) && EXPERIMENTAL)
1031 default n if X86_PC
1032 default y if (X86_NUMAQ || X86_SUMMIT || X86_BIGSMP) 1030 default y if (X86_NUMAQ || X86_SUMMIT || X86_BIGSMP)
1033 help 1031 ---help---
1034 Enable NUMA (Non Uniform Memory Access) support. 1032 Enable NUMA (Non Uniform Memory Access) support.
1035 1033
1036 The kernel will try to allocate memory used by a CPU on the 1034 The kernel will try to allocate memory used by a CPU on the
@@ -1053,19 +1051,19 @@ config K8_NUMA
1053 def_bool y 1051 def_bool y
1054 prompt "Old style AMD Opteron NUMA detection" 1052 prompt "Old style AMD Opteron NUMA detection"
1055 depends on X86_64 && NUMA && PCI 1053 depends on X86_64 && NUMA && PCI
1056 help 1054 ---help---
1057 Enable K8 NUMA node topology detection. You should say Y here if 1055 Enable K8 NUMA node topology detection. You should say Y here if
1058 you have a multi processor AMD K8 system. This uses an old 1056 you have a multi processor AMD K8 system. This uses an old
1059 method to read the NUMA configuration directly from the builtin 1057 method to read the NUMA configuration directly from the builtin
1060 Northbridge of Opteron. It is recommended to use X86_64_ACPI_NUMA 1058 Northbridge of Opteron. It is recommended to use X86_64_ACPI_NUMA
1061 instead, which also takes priority if both are compiled in. 1059 instead, which also takes priority if both are compiled in.
1062 1060
1063config X86_64_ACPI_NUMA 1061config X86_64_ACPI_NUMA
1064 def_bool y 1062 def_bool y
1065 prompt "ACPI NUMA detection" 1063 prompt "ACPI NUMA detection"
1066 depends on X86_64 && NUMA && ACPI && PCI 1064 depends on X86_64 && NUMA && ACPI && PCI
1067 select ACPI_NUMA 1065 select ACPI_NUMA
1068 help 1066 ---help---
1069 Enable ACPI SRAT based node topology detection. 1067 Enable ACPI SRAT based node topology detection.
1070 1068
1071# Some NUMA nodes have memory ranges that span 1069# Some NUMA nodes have memory ranges that span
@@ -1080,7 +1078,7 @@ config NODES_SPAN_OTHER_NODES
1080config NUMA_EMU 1078config NUMA_EMU
1081 bool "NUMA emulation" 1079 bool "NUMA emulation"
1082 depends on X86_64 && NUMA 1080 depends on X86_64 && NUMA
1083 help 1081 ---help---
1084 Enable NUMA emulation. A flat machine will be split 1082 Enable NUMA emulation. A flat machine will be split
1085 into virtual nodes when booted with "numa=fake=N", where N is the 1083 into virtual nodes when booted with "numa=fake=N", where N is the
1086 number of nodes. This is only useful for debugging. 1084 number of nodes. This is only useful for debugging.
@@ -1093,7 +1091,7 @@ config NODES_SHIFT
1093 default "4" if X86_NUMAQ 1091 default "4" if X86_NUMAQ
1094 default "3" 1092 default "3"
1095 depends on NEED_MULTIPLE_NODES 1093 depends on NEED_MULTIPLE_NODES
1096 help 1094 ---help---
1097 Specify the maximum number of NUMA Nodes available on the target 1095 Specify the maximum number of NUMA Nodes available on the target
1098 system. Increases memory reserved to accomodate various tables. 1096 system. Increases memory reserved to accomodate various tables.
1099 1097
@@ -1131,7 +1129,7 @@ config ARCH_SPARSEMEM_DEFAULT
1131 1129
1132config ARCH_SPARSEMEM_ENABLE 1130config ARCH_SPARSEMEM_ENABLE
1133 def_bool y 1131 def_bool y
1134 depends on X86_64 || NUMA || (EXPERIMENTAL && X86_PC) || X86_GENERICARCH 1132 depends on X86_64 || NUMA || (EXPERIMENTAL && X86_32) || X86_32_NON_STANDARD
1135 select SPARSEMEM_STATIC if X86_32 1133 select SPARSEMEM_STATIC if X86_32
1136 select SPARSEMEM_VMEMMAP_ENABLE if X86_64 1134 select SPARSEMEM_VMEMMAP_ENABLE if X86_64
1137 1135
@@ -1148,61 +1146,61 @@ source "mm/Kconfig"
1148config HIGHPTE 1146config HIGHPTE
1149 bool "Allocate 3rd-level pagetables from highmem" 1147 bool "Allocate 3rd-level pagetables from highmem"
1150 depends on X86_32 && (HIGHMEM4G || HIGHMEM64G) 1148 depends on X86_32 && (HIGHMEM4G || HIGHMEM64G)
1151 help 1149 ---help---
1152 The VM uses one page table entry for each page of physical memory. 1150 The VM uses one page table entry for each page of physical memory.
1153 For systems with a lot of RAM, this can be wasteful of precious 1151 For systems with a lot of RAM, this can be wasteful of precious
1154 low memory. Setting this option will put user-space page table 1152 low memory. Setting this option will put user-space page table
1155 entries in high memory. 1153 entries in high memory.
1156 1154
1157config X86_CHECK_BIOS_CORRUPTION 1155config X86_CHECK_BIOS_CORRUPTION
1158 bool "Check for low memory corruption" 1156 bool "Check for low memory corruption"
1159 help 1157 ---help---
1160 Periodically check for memory corruption in low memory, which 1158 Periodically check for memory corruption in low memory, which
1161 is suspected to be caused by BIOS. Even when enabled in the 1159 is suspected to be caused by BIOS. Even when enabled in the
1162 configuration, it is disabled at runtime. Enable it by 1160 configuration, it is disabled at runtime. Enable it by
1163 setting "memory_corruption_check=1" on the kernel command 1161 setting "memory_corruption_check=1" on the kernel command
1164 line. By default it scans the low 64k of memory every 60 1162 line. By default it scans the low 64k of memory every 60
1165 seconds; see the memory_corruption_check_size and 1163 seconds; see the memory_corruption_check_size and
1166 memory_corruption_check_period parameters in 1164 memory_corruption_check_period parameters in
1167 Documentation/kernel-parameters.txt to adjust this. 1165 Documentation/kernel-parameters.txt to adjust this.
1168 1166
1169 When enabled with the default parameters, this option has 1167 When enabled with the default parameters, this option has
1170 almost no overhead, as it reserves a relatively small amount 1168 almost no overhead, as it reserves a relatively small amount
1171 of memory and scans it infrequently. It both detects corruption 1169 of memory and scans it infrequently. It both detects corruption
1172 and prevents it from affecting the running system. 1170 and prevents it from affecting the running system.
1173 1171
1174 It is, however, intended as a diagnostic tool; if repeatable 1172 It is, however, intended as a diagnostic tool; if repeatable
1175 BIOS-originated corruption always affects the same memory, 1173 BIOS-originated corruption always affects the same memory,
1176 you can use memmap= to prevent the kernel from using that 1174 you can use memmap= to prevent the kernel from using that
1177 memory. 1175 memory.
1178 1176
1179config X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK 1177config X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK
1180 bool "Set the default setting of memory_corruption_check" 1178 bool "Set the default setting of memory_corruption_check"
1181 depends on X86_CHECK_BIOS_CORRUPTION 1179 depends on X86_CHECK_BIOS_CORRUPTION
1182 default y 1180 default y
1183 help 1181 ---help---
1184 Set whether the default state of memory_corruption_check is 1182 Set whether the default state of memory_corruption_check is
1185 on or off. 1183 on or off.
1186 1184
1187config X86_RESERVE_LOW_64K 1185config X86_RESERVE_LOW_64K
1188 bool "Reserve low 64K of RAM on AMI/Phoenix BIOSen" 1186 bool "Reserve low 64K of RAM on AMI/Phoenix BIOSen"
1189 default y 1187 default y
1190 help 1188 ---help---
1191 Reserve the first 64K of physical RAM on BIOSes that are known 1189 Reserve the first 64K of physical RAM on BIOSes that are known
1192 to potentially corrupt that memory range. A numbers of BIOSes are 1190 to potentially corrupt that memory range. A numbers of BIOSes are
1193 known to utilize this area during suspend/resume, so it must not 1191 known to utilize this area during suspend/resume, so it must not
1194 be used by the kernel. 1192 be used by the kernel.
1195 1193
1196 Set this to N if you are absolutely sure that you trust the BIOS 1194 Set this to N if you are absolutely sure that you trust the BIOS
1197 to get all its memory reservations and usages right. 1195 to get all its memory reservations and usages right.
1198 1196
1199 If you have doubts about the BIOS (e.g. suspend/resume does not 1197 If you have doubts about the BIOS (e.g. suspend/resume does not
1200 work or there's kernel crashes after certain hardware hotplug 1198 work or there's kernel crashes after certain hardware hotplug
1201 events) and it's not AMI or Phoenix, then you might want to enable 1199 events) and it's not AMI or Phoenix, then you might want to enable
1202 X86_CHECK_BIOS_CORRUPTION=y to allow the kernel to check typical 1200 X86_CHECK_BIOS_CORRUPTION=y to allow the kernel to check typical
1203 corruption patterns. 1201 corruption patterns.
1204 1202
1205 Say Y if unsure. 1203 Say Y if unsure.
1206 1204
1207config MATH_EMULATION 1205config MATH_EMULATION
1208 bool 1206 bool
@@ -1268,7 +1266,7 @@ config MTRR_SANITIZER
1268 def_bool y 1266 def_bool y
1269 prompt "MTRR cleanup support" 1267 prompt "MTRR cleanup support"
1270 depends on MTRR 1268 depends on MTRR
1271 help 1269 ---help---
1272 Convert MTRR layout from continuous to discrete, so X drivers can 1270 Convert MTRR layout from continuous to discrete, so X drivers can
1273 add writeback entries. 1271 add writeback entries.
1274 1272
@@ -1283,7 +1281,7 @@ config MTRR_SANITIZER_ENABLE_DEFAULT
1283 range 0 1 1281 range 0 1
1284 default "0" 1282 default "0"
1285 depends on MTRR_SANITIZER 1283 depends on MTRR_SANITIZER
1286 help 1284 ---help---
1287 Enable mtrr cleanup default value 1285 Enable mtrr cleanup default value
1288 1286
1289config MTRR_SANITIZER_SPARE_REG_NR_DEFAULT 1287config MTRR_SANITIZER_SPARE_REG_NR_DEFAULT
@@ -1291,7 +1289,7 @@ config MTRR_SANITIZER_SPARE_REG_NR_DEFAULT
1291 range 0 7 1289 range 0 7
1292 default "1" 1290 default "1"
1293 depends on MTRR_SANITIZER 1291 depends on MTRR_SANITIZER
1294 help 1292 ---help---
1295 mtrr cleanup spare entries default, it can be changed via 1293 mtrr cleanup spare entries default, it can be changed via
1296 mtrr_spare_reg_nr=N on the kernel command line. 1294 mtrr_spare_reg_nr=N on the kernel command line.
1297 1295
@@ -1299,7 +1297,7 @@ config X86_PAT
1299 bool 1297 bool
1300 prompt "x86 PAT support" 1298 prompt "x86 PAT support"
1301 depends on MTRR 1299 depends on MTRR
1302 help 1300 ---help---
1303 Use PAT attributes to setup page level cache control. 1301 Use PAT attributes to setup page level cache control.
1304 1302
1305 PATs are the modern equivalents of MTRRs and are much more 1303 PATs are the modern equivalents of MTRRs and are much more
@@ -1314,20 +1312,20 @@ config EFI
1314 bool "EFI runtime service support" 1312 bool "EFI runtime service support"
1315 depends on ACPI 1313 depends on ACPI
1316 ---help--- 1314 ---help---
1317 This enables the kernel to use EFI runtime services that are 1315 This enables the kernel to use EFI runtime services that are
1318 available (such as the EFI variable services). 1316 available (such as the EFI variable services).
1319 1317
1320 This option is only useful on systems that have EFI firmware. 1318 This option is only useful on systems that have EFI firmware.
1321 In addition, you should use the latest ELILO loader available 1319 In addition, you should use the latest ELILO loader available
1322 at <http://elilo.sourceforge.net> in order to take advantage 1320 at <http://elilo.sourceforge.net> in order to take advantage
1323 of EFI runtime services. However, even with this option, the 1321 of EFI runtime services. However, even with this option, the
1324 resultant kernel should continue to boot on existing non-EFI 1322 resultant kernel should continue to boot on existing non-EFI
1325 platforms. 1323 platforms.
1326 1324
1327config SECCOMP 1325config SECCOMP
1328 def_bool y 1326 def_bool y
1329 prompt "Enable seccomp to safely compute untrusted bytecode" 1327 prompt "Enable seccomp to safely compute untrusted bytecode"
1330 help 1328 ---help---
1331 This kernel feature is useful for number crunching applications 1329 This kernel feature is useful for number crunching applications
1332 that may need to compute untrusted bytecode during their 1330 that may need to compute untrusted bytecode during their
1333 execution. By using pipes or other transports made available to 1331 execution. By using pipes or other transports made available to
@@ -1340,13 +1338,16 @@ config SECCOMP
1340 1338
1341 If unsure, say Y. Only embedded should say N here. 1339 If unsure, say Y. Only embedded should say N here.
1342 1340
1341config CC_STACKPROTECTOR_ALL
1342 bool
1343
1343config CC_STACKPROTECTOR 1344config CC_STACKPROTECTOR
1344 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)" 1345 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
1345 depends on X86_64 && EXPERIMENTAL && BROKEN 1346 select CC_STACKPROTECTOR_ALL
1346 help 1347 ---help---
1347 This option turns on the -fstack-protector GCC feature. This 1348 This option turns on the -fstack-protector GCC feature. This
1348 feature puts, at the beginning of critical functions, a canary 1349 feature puts, at the beginning of functions, a canary value on
1349 value on the stack just before the return address, and validates 1350 the stack just before the return address, and validates
1350 the value just before actually returning. Stack based buffer 1351 the value just before actually returning. Stack based buffer
1351 overflows (that need to overwrite this return address) now also 1352 overflows (that need to overwrite this return address) now also
1352 overwrite the canary, which gets detected and the attack is then 1353 overwrite the canary, which gets detected and the attack is then
@@ -1354,22 +1355,14 @@ config CC_STACKPROTECTOR
1354 1355
1355 This feature requires gcc version 4.2 or above, or a distribution 1356 This feature requires gcc version 4.2 or above, or a distribution
1356 gcc with the feature backported. Older versions are automatically 1357 gcc with the feature backported. Older versions are automatically
1357 detected and for those versions, this configuration option is ignored. 1358 detected and for those versions, this configuration option is
1358 1359 ignored. (and a warning is printed during bootup)
1359config CC_STACKPROTECTOR_ALL
1360 bool "Use stack-protector for all functions"
1361 depends on CC_STACKPROTECTOR
1362 help
1363 Normally, GCC only inserts the canary value protection for
1364 functions that use large-ish on-stack buffers. By enabling
1365 this option, GCC will be asked to do this for ALL functions.
1366 1360
1367source kernel/Kconfig.hz 1361source kernel/Kconfig.hz
1368 1362
1369config KEXEC 1363config KEXEC
1370 bool "kexec system call" 1364 bool "kexec system call"
1371 depends on X86_BIOS_REBOOT 1365 ---help---
1372 help
1373 kexec is a system call that implements the ability to shutdown your 1366 kexec is a system call that implements the ability to shutdown your
1374 current kernel, and to start another kernel. It is like a reboot 1367 current kernel, and to start another kernel. It is like a reboot
1375 but it is independent of the system firmware. And like a reboot 1368 but it is independent of the system firmware. And like a reboot
@@ -1386,7 +1379,7 @@ config KEXEC
1386config CRASH_DUMP 1379config CRASH_DUMP
1387 bool "kernel crash dumps" 1380 bool "kernel crash dumps"
1388 depends on X86_64 || (X86_32 && HIGHMEM) 1381 depends on X86_64 || (X86_32 && HIGHMEM)
1389 help 1382 ---help---
1390 Generate crash dump after being started by kexec. 1383 Generate crash dump after being started by kexec.
1391 This should be normally only set in special crash dump kernels 1384 This should be normally only set in special crash dump kernels
1392 which are loaded in the main kernel with kexec-tools into 1385 which are loaded in the main kernel with kexec-tools into
@@ -1401,7 +1394,7 @@ config KEXEC_JUMP
1401 bool "kexec jump (EXPERIMENTAL)" 1394 bool "kexec jump (EXPERIMENTAL)"
1402 depends on EXPERIMENTAL 1395 depends on EXPERIMENTAL
1403 depends on KEXEC && HIBERNATION && X86_32 1396 depends on KEXEC && HIBERNATION && X86_32
1404 help 1397 ---help---
1405 Jump between original kernel and kexeced kernel and invoke 1398 Jump between original kernel and kexeced kernel and invoke
1406 code in physical address mode via KEXEC 1399 code in physical address mode via KEXEC
1407 1400
@@ -1410,7 +1403,7 @@ config PHYSICAL_START
1410 default "0x1000000" if X86_NUMAQ 1403 default "0x1000000" if X86_NUMAQ
1411 default "0x200000" if X86_64 1404 default "0x200000" if X86_64
1412 default "0x100000" 1405 default "0x100000"
1413 help 1406 ---help---
1414 This gives the physical address where the kernel is loaded. 1407 This gives the physical address where the kernel is loaded.
1415 1408
1416 If kernel is a not relocatable (CONFIG_RELOCATABLE=n) then 1409 If kernel is a not relocatable (CONFIG_RELOCATABLE=n) then
@@ -1451,7 +1444,7 @@ config PHYSICAL_START
1451config RELOCATABLE 1444config RELOCATABLE
1452 bool "Build a relocatable kernel (EXPERIMENTAL)" 1445 bool "Build a relocatable kernel (EXPERIMENTAL)"
1453 depends on EXPERIMENTAL 1446 depends on EXPERIMENTAL
1454 help 1447 ---help---
1455 This builds a kernel image that retains relocation information 1448 This builds a kernel image that retains relocation information
1456 so it can be loaded someplace besides the default 1MB. 1449 so it can be loaded someplace besides the default 1MB.
1457 The relocations tend to make the kernel binary about 10% larger, 1450 The relocations tend to make the kernel binary about 10% larger,
@@ -1471,7 +1464,7 @@ config PHYSICAL_ALIGN
1471 default "0x100000" if X86_32 1464 default "0x100000" if X86_32
1472 default "0x200000" if X86_64 1465 default "0x200000" if X86_64
1473 range 0x2000 0x400000 1466 range 0x2000 0x400000
1474 help 1467 ---help---
1475 This value puts the alignment restrictions on physical address 1468 This value puts the alignment restrictions on physical address
1476 where kernel is loaded and run from. Kernel is compiled for an 1469 where kernel is loaded and run from. Kernel is compiled for an
1477 address which meets above alignment restriction. 1470 address which meets above alignment restriction.
@@ -1492,7 +1485,7 @@ config PHYSICAL_ALIGN
1492 1485
1493config HOTPLUG_CPU 1486config HOTPLUG_CPU
1494 bool "Support for hot-pluggable CPUs" 1487 bool "Support for hot-pluggable CPUs"
1495 depends on SMP && HOTPLUG && !X86_VOYAGER 1488 depends on SMP && HOTPLUG
1496 ---help--- 1489 ---help---
1497 Say Y here to allow turning CPUs off and on. CPUs can be 1490 Say Y here to allow turning CPUs off and on. CPUs can be
1498 controlled through /sys/devices/system/cpu. 1491 controlled through /sys/devices/system/cpu.
@@ -1504,7 +1497,7 @@ config COMPAT_VDSO
1504 def_bool y 1497 def_bool y
1505 prompt "Compat VDSO support" 1498 prompt "Compat VDSO support"
1506 depends on X86_32 || IA32_EMULATION 1499 depends on X86_32 || IA32_EMULATION
1507 help 1500 ---help---
1508 Map the 32-bit VDSO to the predictable old-style address too. 1501 Map the 32-bit VDSO to the predictable old-style address too.
1509 ---help--- 1502 ---help---
1510 Say N here if you are running a sufficiently recent glibc 1503 Say N here if you are running a sufficiently recent glibc
@@ -1516,7 +1509,7 @@ config COMPAT_VDSO
1516config CMDLINE_BOOL 1509config CMDLINE_BOOL
1517 bool "Built-in kernel command line" 1510 bool "Built-in kernel command line"
1518 default n 1511 default n
1519 help 1512 ---help---
1520 Allow for specifying boot arguments to the kernel at 1513 Allow for specifying boot arguments to the kernel at
1521 build time. On some systems (e.g. embedded ones), it is 1514 build time. On some systems (e.g. embedded ones), it is
1522 necessary or convenient to provide some or all of the 1515 necessary or convenient to provide some or all of the
@@ -1534,7 +1527,7 @@ config CMDLINE
1534 string "Built-in kernel command string" 1527 string "Built-in kernel command string"
1535 depends on CMDLINE_BOOL 1528 depends on CMDLINE_BOOL
1536 default "" 1529 default ""
1537 help 1530 ---help---
1538 Enter arguments here that should be compiled into the kernel 1531 Enter arguments here that should be compiled into the kernel
1539 image and used at boot time. If the boot loader provides a 1532 image and used at boot time. If the boot loader provides a
1540 command line at boot time, it is appended to this string to 1533 command line at boot time, it is appended to this string to
@@ -1551,7 +1544,7 @@ config CMDLINE_OVERRIDE
1551 bool "Built-in command line overrides boot loader arguments" 1544 bool "Built-in command line overrides boot loader arguments"
1552 default n 1545 default n
1553 depends on CMDLINE_BOOL 1546 depends on CMDLINE_BOOL
1554 help 1547 ---help---
1555 Set this option to 'Y' to have the kernel ignore the boot loader 1548 Set this option to 'Y' to have the kernel ignore the boot loader
1556 command line, and use ONLY the built-in command line. 1549 command line, and use ONLY the built-in command line.
1557 1550
@@ -1573,7 +1566,6 @@ config HAVE_ARCH_EARLY_PFN_TO_NID
1573 depends on NUMA 1566 depends on NUMA
1574 1567
1575menu "Power management and ACPI options" 1568menu "Power management and ACPI options"
1576 depends on !X86_VOYAGER
1577 1569
1578config ARCH_HIBERNATION_HEADER 1570config ARCH_HIBERNATION_HEADER
1579 def_bool y 1571 def_bool y
@@ -1651,7 +1643,7 @@ if APM
1651 1643
1652config APM_IGNORE_USER_SUSPEND 1644config APM_IGNORE_USER_SUSPEND
1653 bool "Ignore USER SUSPEND" 1645 bool "Ignore USER SUSPEND"
1654 help 1646 ---help---
1655 This option will ignore USER SUSPEND requests. On machines with a 1647 This option will ignore USER SUSPEND requests. On machines with a
1656 compliant APM BIOS, you want to say N. However, on the NEC Versa M 1648 compliant APM BIOS, you want to say N. However, on the NEC Versa M
1657 series notebooks, it is necessary to say Y because of a BIOS bug. 1649 series notebooks, it is necessary to say Y because of a BIOS bug.
@@ -1675,7 +1667,7 @@ config APM_DO_ENABLE
1675 1667
1676config APM_CPU_IDLE 1668config APM_CPU_IDLE
1677 bool "Make CPU Idle calls when idle" 1669 bool "Make CPU Idle calls when idle"
1678 help 1670 ---help---
1679 Enable calls to APM CPU Idle/CPU Busy inside the kernel's idle loop. 1671 Enable calls to APM CPU Idle/CPU Busy inside the kernel's idle loop.
1680 On some machines, this can activate improved power savings, such as 1672 On some machines, this can activate improved power savings, such as
1681 a slowed CPU clock rate, when the machine is idle. These idle calls 1673 a slowed CPU clock rate, when the machine is idle. These idle calls
@@ -1686,7 +1678,7 @@ config APM_CPU_IDLE
1686 1678
1687config APM_DISPLAY_BLANK 1679config APM_DISPLAY_BLANK
1688 bool "Enable console blanking using APM" 1680 bool "Enable console blanking using APM"
1689 help 1681 ---help---
1690 Enable console blanking using the APM. Some laptops can use this to 1682 Enable console blanking using the APM. Some laptops can use this to
1691 turn off the LCD backlight when the screen blanker of the Linux 1683 turn off the LCD backlight when the screen blanker of the Linux
1692 virtual console blanks the screen. Note that this is only used by 1684 virtual console blanks the screen. Note that this is only used by
@@ -1699,7 +1691,7 @@ config APM_DISPLAY_BLANK
1699 1691
1700config APM_ALLOW_INTS 1692config APM_ALLOW_INTS
1701 bool "Allow interrupts during APM BIOS calls" 1693 bool "Allow interrupts during APM BIOS calls"
1702 help 1694 ---help---
1703 Normally we disable external interrupts while we are making calls to 1695 Normally we disable external interrupts while we are making calls to
1704 the APM BIOS as a measure to lessen the effects of a badly behaving 1696 the APM BIOS as a measure to lessen the effects of a badly behaving
1705 BIOS implementation. The BIOS should reenable interrupts if it 1697 BIOS implementation. The BIOS should reenable interrupts if it
@@ -1724,7 +1716,7 @@ config PCI
1724 bool "PCI support" 1716 bool "PCI support"
1725 default y 1717 default y
1726 select ARCH_SUPPORTS_MSI if (X86_LOCAL_APIC && X86_IO_APIC) 1718 select ARCH_SUPPORTS_MSI if (X86_LOCAL_APIC && X86_IO_APIC)
1727 help 1719 ---help---
1728 Find out whether you have a PCI motherboard. PCI is the name of a 1720 Find out whether you have a PCI motherboard. PCI is the name of a
1729 bus system, i.e. the way the CPU talks to the other stuff inside 1721 bus system, i.e. the way the CPU talks to the other stuff inside
1730 your box. Other bus systems are ISA, EISA, MicroChannel (MCA) or 1722 your box. Other bus systems are ISA, EISA, MicroChannel (MCA) or
@@ -1795,7 +1787,7 @@ config PCI_MMCONFIG
1795config DMAR 1787config DMAR
1796 bool "Support for DMA Remapping Devices (EXPERIMENTAL)" 1788 bool "Support for DMA Remapping Devices (EXPERIMENTAL)"
1797 depends on X86_64 && PCI_MSI && ACPI && EXPERIMENTAL 1789 depends on X86_64 && PCI_MSI && ACPI && EXPERIMENTAL
1798 help 1790 ---help---
1799 DMA remapping (DMAR) devices support enables independent address 1791 DMA remapping (DMAR) devices support enables independent address
1800 translations for Direct Memory Access (DMA) from devices. 1792 translations for Direct Memory Access (DMA) from devices.
1801 These DMA remapping devices are reported via ACPI tables 1793 These DMA remapping devices are reported via ACPI tables
@@ -1817,29 +1809,29 @@ config DMAR_GFX_WA
1817 def_bool y 1809 def_bool y
1818 prompt "Support for Graphics workaround" 1810 prompt "Support for Graphics workaround"
1819 depends on DMAR 1811 depends on DMAR
1820 help 1812 ---help---
1821 Current Graphics drivers tend to use physical address 1813 Current Graphics drivers tend to use physical address
1822 for DMA and avoid using DMA APIs. Setting this config 1814 for DMA and avoid using DMA APIs. Setting this config
1823 option permits the IOMMU driver to set a unity map for 1815 option permits the IOMMU driver to set a unity map for
1824 all the OS-visible memory. Hence the driver can continue 1816 all the OS-visible memory. Hence the driver can continue
1825 to use physical addresses for DMA. 1817 to use physical addresses for DMA.
1826 1818
1827config DMAR_FLOPPY_WA 1819config DMAR_FLOPPY_WA
1828 def_bool y 1820 def_bool y
1829 depends on DMAR 1821 depends on DMAR
1830 help 1822 ---help---
1831 Floppy disk drivers are know to bypass DMA API calls 1823 Floppy disk drivers are know to bypass DMA API calls
1832 thereby failing to work when IOMMU is enabled. This 1824 thereby failing to work when IOMMU is enabled. This
1833 workaround will setup a 1:1 mapping for the first 1825 workaround will setup a 1:1 mapping for the first
1834 16M to make floppy (an ISA device) work. 1826 16M to make floppy (an ISA device) work.
1835 1827
1836config INTR_REMAP 1828config INTR_REMAP
1837 bool "Support for Interrupt Remapping (EXPERIMENTAL)" 1829 bool "Support for Interrupt Remapping (EXPERIMENTAL)"
1838 depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI && EXPERIMENTAL 1830 depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI && EXPERIMENTAL
1839 help 1831 ---help---
1840 Supports Interrupt remapping for IO-APIC and MSI devices. 1832 Supports Interrupt remapping for IO-APIC and MSI devices.
1841 To use x2apic mode in the CPU's which support x2APIC enhancements or 1833 To use x2apic mode in the CPU's which support x2APIC enhancements or
1842 to support platforms with CPU's having > 8 bit APIC ID, say Y. 1834 to support platforms with CPU's having > 8 bit APIC ID, say Y.
1843 1835
1844source "drivers/pci/pcie/Kconfig" 1836source "drivers/pci/pcie/Kconfig"
1845 1837
@@ -1853,8 +1845,7 @@ if X86_32
1853 1845
1854config ISA 1846config ISA
1855 bool "ISA support" 1847 bool "ISA support"
1856 depends on !X86_VOYAGER 1848 ---help---
1857 help
1858 Find out whether you have ISA slots on your motherboard. ISA is the 1849 Find out whether you have ISA slots on your motherboard. ISA is the
1859 name of a bus system, i.e. the way the CPU talks to the other stuff 1850 name of a bus system, i.e. the way the CPU talks to the other stuff
1860 inside your box. Other bus systems are PCI, EISA, MicroChannel 1851 inside your box. Other bus systems are PCI, EISA, MicroChannel
@@ -1880,9 +1871,8 @@ config EISA
1880source "drivers/eisa/Kconfig" 1871source "drivers/eisa/Kconfig"
1881 1872
1882config MCA 1873config MCA
1883 bool "MCA support" if !X86_VOYAGER 1874 bool "MCA support"
1884 default y if X86_VOYAGER 1875 ---help---
1885 help
1886 MicroChannel Architecture is found in some IBM PS/2 machines and 1876 MicroChannel Architecture is found in some IBM PS/2 machines and
1887 laptops. It is a bus system similar to PCI or ISA. See 1877 laptops. It is a bus system similar to PCI or ISA. See
1888 <file:Documentation/mca.txt> (and especially the web page given 1878 <file:Documentation/mca.txt> (and especially the web page given
@@ -1892,8 +1882,7 @@ source "drivers/mca/Kconfig"
1892 1882
1893config SCx200 1883config SCx200
1894 tristate "NatSemi SCx200 support" 1884 tristate "NatSemi SCx200 support"
1895 depends on !X86_VOYAGER 1885 ---help---
1896 help
1897 This provides basic support for National Semiconductor's 1886 This provides basic support for National Semiconductor's
1898 (now AMD's) Geode processors. The driver probes for the 1887 (now AMD's) Geode processors. The driver probes for the
1899 PCI-IDs of several on-chip devices, so its a good dependency 1888 PCI-IDs of several on-chip devices, so its a good dependency
@@ -1905,7 +1894,7 @@ config SCx200HR_TIMER
1905 tristate "NatSemi SCx200 27MHz High-Resolution Timer Support" 1894 tristate "NatSemi SCx200 27MHz High-Resolution Timer Support"
1906 depends on SCx200 && GENERIC_TIME 1895 depends on SCx200 && GENERIC_TIME
1907 default y 1896 default y
1908 help 1897 ---help---
1909 This driver provides a clocksource built upon the on-chip 1898 This driver provides a clocksource built upon the on-chip
1910 27MHz high-resolution timer. Its also a workaround for 1899 27MHz high-resolution timer. Its also a workaround for
1911 NSC Geode SC-1100's buggy TSC, which loses time when the 1900 NSC Geode SC-1100's buggy TSC, which loses time when the
@@ -1916,7 +1905,7 @@ config GEODE_MFGPT_TIMER
1916 def_bool y 1905 def_bool y
1917 prompt "Geode Multi-Function General Purpose Timer (MFGPT) events" 1906 prompt "Geode Multi-Function General Purpose Timer (MFGPT) events"
1918 depends on MGEODE_LX && GENERIC_TIME && GENERIC_CLOCKEVENTS 1907 depends on MGEODE_LX && GENERIC_TIME && GENERIC_CLOCKEVENTS
1919 help 1908 ---help---
1920 This driver provides a clock event source based on the MFGPT 1909 This driver provides a clock event source based on the MFGPT
1921 timer(s) in the CS5535 and CS5536 companion chip for the geode. 1910 timer(s) in the CS5535 and CS5536 companion chip for the geode.
1922 MFGPTs have a better resolution and max interval than the 1911 MFGPTs have a better resolution and max interval than the
@@ -1925,7 +1914,7 @@ config GEODE_MFGPT_TIMER
1925config OLPC 1914config OLPC
1926 bool "One Laptop Per Child support" 1915 bool "One Laptop Per Child support"
1927 default n 1916 default n
1928 help 1917 ---help---
1929 Add support for detecting the unique features of the OLPC 1918 Add support for detecting the unique features of the OLPC
1930 XO hardware. 1919 XO hardware.
1931 1920
@@ -1950,16 +1939,16 @@ config IA32_EMULATION
1950 bool "IA32 Emulation" 1939 bool "IA32 Emulation"
1951 depends on X86_64 1940 depends on X86_64
1952 select COMPAT_BINFMT_ELF 1941 select COMPAT_BINFMT_ELF
1953 help 1942 ---help---
1954 Include code to run 32-bit programs under a 64-bit kernel. You should 1943 Include code to run 32-bit programs under a 64-bit kernel. You should
1955 likely turn this on, unless you're 100% sure that you don't have any 1944 likely turn this on, unless you're 100% sure that you don't have any
1956 32-bit programs left. 1945 32-bit programs left.
1957 1946
1958config IA32_AOUT 1947config IA32_AOUT
1959 tristate "IA32 a.out support" 1948 tristate "IA32 a.out support"
1960 depends on IA32_EMULATION 1949 depends on IA32_EMULATION
1961 help 1950 ---help---
1962 Support old a.out binaries in the 32bit emulation. 1951 Support old a.out binaries in the 32bit emulation.
1963 1952
1964config COMPAT 1953config COMPAT
1965 def_bool y 1954 def_bool y
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index c98d52e82966..a95eaf0e582a 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -50,7 +50,7 @@ config M386
50config M486 50config M486
51 bool "486" 51 bool "486"
52 depends on X86_32 52 depends on X86_32
53 help 53 ---help---
54 Select this for a 486 series processor, either Intel or one of the 54 Select this for a 486 series processor, either Intel or one of the
55 compatible processors from AMD, Cyrix, IBM, or Intel. Includes DX, 55 compatible processors from AMD, Cyrix, IBM, or Intel. Includes DX,
56 DX2, and DX4 variants; also SL/SLC/SLC2/SLC3/SX/SX2 and UMC U5D or 56 DX2, and DX4 variants; also SL/SLC/SLC2/SLC3/SX/SX2 and UMC U5D or
@@ -59,7 +59,7 @@ config M486
59config M586 59config M586
60 bool "586/K5/5x86/6x86/6x86MX" 60 bool "586/K5/5x86/6x86/6x86MX"
61 depends on X86_32 61 depends on X86_32
62 help 62 ---help---
63 Select this for an 586 or 686 series processor such as the AMD K5, 63 Select this for an 586 or 686 series processor such as the AMD K5,
64 the Cyrix 5x86, 6x86 and 6x86MX. This choice does not 64 the Cyrix 5x86, 6x86 and 6x86MX. This choice does not
65 assume the RDTSC (Read Time Stamp Counter) instruction. 65 assume the RDTSC (Read Time Stamp Counter) instruction.
@@ -67,21 +67,21 @@ config M586
67config M586TSC 67config M586TSC
68 bool "Pentium-Classic" 68 bool "Pentium-Classic"
69 depends on X86_32 69 depends on X86_32
70 help 70 ---help---
71 Select this for a Pentium Classic processor with the RDTSC (Read 71 Select this for a Pentium Classic processor with the RDTSC (Read
72 Time Stamp Counter) instruction for benchmarking. 72 Time Stamp Counter) instruction for benchmarking.
73 73
74config M586MMX 74config M586MMX
75 bool "Pentium-MMX" 75 bool "Pentium-MMX"
76 depends on X86_32 76 depends on X86_32
77 help 77 ---help---
78 Select this for a Pentium with the MMX graphics/multimedia 78 Select this for a Pentium with the MMX graphics/multimedia
79 extended instructions. 79 extended instructions.
80 80
81config M686 81config M686
82 bool "Pentium-Pro" 82 bool "Pentium-Pro"
83 depends on X86_32 83 depends on X86_32
84 help 84 ---help---
85 Select this for Intel Pentium Pro chips. This enables the use of 85 Select this for Intel Pentium Pro chips. This enables the use of
86 Pentium Pro extended instructions, and disables the init-time guard 86 Pentium Pro extended instructions, and disables the init-time guard
87 against the f00f bug found in earlier Pentiums. 87 against the f00f bug found in earlier Pentiums.
@@ -89,7 +89,7 @@ config M686
89config MPENTIUMII 89config MPENTIUMII
90 bool "Pentium-II/Celeron(pre-Coppermine)" 90 bool "Pentium-II/Celeron(pre-Coppermine)"
91 depends on X86_32 91 depends on X86_32
92 help 92 ---help---
93 Select this for Intel chips based on the Pentium-II and 93 Select this for Intel chips based on the Pentium-II and
94 pre-Coppermine Celeron core. This option enables an unaligned 94 pre-Coppermine Celeron core. This option enables an unaligned
95 copy optimization, compiles the kernel with optimization flags 95 copy optimization, compiles the kernel with optimization flags
@@ -99,7 +99,7 @@ config MPENTIUMII
99config MPENTIUMIII 99config MPENTIUMIII
100 bool "Pentium-III/Celeron(Coppermine)/Pentium-III Xeon" 100 bool "Pentium-III/Celeron(Coppermine)/Pentium-III Xeon"
101 depends on X86_32 101 depends on X86_32
102 help 102 ---help---
103 Select this for Intel chips based on the Pentium-III and 103 Select this for Intel chips based on the Pentium-III and
104 Celeron-Coppermine core. This option enables use of some 104 Celeron-Coppermine core. This option enables use of some
105 extended prefetch instructions in addition to the Pentium II 105 extended prefetch instructions in addition to the Pentium II
@@ -108,14 +108,14 @@ config MPENTIUMIII
108config MPENTIUMM 108config MPENTIUMM
109 bool "Pentium M" 109 bool "Pentium M"
110 depends on X86_32 110 depends on X86_32
111 help 111 ---help---
112 Select this for Intel Pentium M (not Pentium-4 M) 112 Select this for Intel Pentium M (not Pentium-4 M)
113 notebook chips. 113 notebook chips.
114 114
115config MPENTIUM4 115config MPENTIUM4
116 bool "Pentium-4/Celeron(P4-based)/Pentium-4 M/older Xeon" 116 bool "Pentium-4/Celeron(P4-based)/Pentium-4 M/older Xeon"
117 depends on X86_32 117 depends on X86_32
118 help 118 ---help---
119 Select this for Intel Pentium 4 chips. This includes the 119 Select this for Intel Pentium 4 chips. This includes the
120 Pentium 4, Pentium D, P4-based Celeron and Xeon, and 120 Pentium 4, Pentium D, P4-based Celeron and Xeon, and
121 Pentium-4 M (not Pentium M) chips. This option enables compile 121 Pentium-4 M (not Pentium M) chips. This option enables compile
@@ -151,7 +151,7 @@ config MPENTIUM4
151config MK6 151config MK6
152 bool "K6/K6-II/K6-III" 152 bool "K6/K6-II/K6-III"
153 depends on X86_32 153 depends on X86_32
154 help 154 ---help---
155 Select this for an AMD K6-family processor. Enables use of 155 Select this for an AMD K6-family processor. Enables use of
156 some extended instructions, and passes appropriate optimization 156 some extended instructions, and passes appropriate optimization
157 flags to GCC. 157 flags to GCC.
@@ -159,14 +159,14 @@ config MK6
159config MK7 159config MK7
160 bool "Athlon/Duron/K7" 160 bool "Athlon/Duron/K7"
161 depends on X86_32 161 depends on X86_32
162 help 162 ---help---
163 Select this for an AMD Athlon K7-family processor. Enables use of 163 Select this for an AMD Athlon K7-family processor. Enables use of
164 some extended instructions, and passes appropriate optimization 164 some extended instructions, and passes appropriate optimization
165 flags to GCC. 165 flags to GCC.
166 166
167config MK8 167config MK8
168 bool "Opteron/Athlon64/Hammer/K8" 168 bool "Opteron/Athlon64/Hammer/K8"
169 help 169 ---help---
170 Select this for an AMD Opteron or Athlon64 Hammer-family processor. 170 Select this for an AMD Opteron or Athlon64 Hammer-family processor.
171 Enables use of some extended instructions, and passes appropriate 171 Enables use of some extended instructions, and passes appropriate
172 optimization flags to GCC. 172 optimization flags to GCC.
@@ -174,7 +174,7 @@ config MK8
174config MCRUSOE 174config MCRUSOE
175 bool "Crusoe" 175 bool "Crusoe"
176 depends on X86_32 176 depends on X86_32
177 help 177 ---help---
178 Select this for a Transmeta Crusoe processor. Treats the processor 178 Select this for a Transmeta Crusoe processor. Treats the processor
179 like a 586 with TSC, and sets some GCC optimization flags (like a 179 like a 586 with TSC, and sets some GCC optimization flags (like a
180 Pentium Pro with no alignment requirements). 180 Pentium Pro with no alignment requirements).
@@ -182,13 +182,13 @@ config MCRUSOE
182config MEFFICEON 182config MEFFICEON
183 bool "Efficeon" 183 bool "Efficeon"
184 depends on X86_32 184 depends on X86_32
185 help 185 ---help---
186 Select this for a Transmeta Efficeon processor. 186 Select this for a Transmeta Efficeon processor.
187 187
188config MWINCHIPC6 188config MWINCHIPC6
189 bool "Winchip-C6" 189 bool "Winchip-C6"
190 depends on X86_32 190 depends on X86_32
191 help 191 ---help---
192 Select this for an IDT Winchip C6 chip. Linux and GCC 192 Select this for an IDT Winchip C6 chip. Linux and GCC
193 treat this chip as a 586TSC with some extended instructions 193 treat this chip as a 586TSC with some extended instructions
194 and alignment requirements. 194 and alignment requirements.
@@ -196,7 +196,7 @@ config MWINCHIPC6
196config MWINCHIP3D 196config MWINCHIP3D
197 bool "Winchip-2/Winchip-2A/Winchip-3" 197 bool "Winchip-2/Winchip-2A/Winchip-3"
198 depends on X86_32 198 depends on X86_32
199 help 199 ---help---
200 Select this for an IDT Winchip-2, 2A or 3. Linux and GCC 200 Select this for an IDT Winchip-2, 2A or 3. Linux and GCC
201 treat this chip as a 586TSC with some extended instructions 201 treat this chip as a 586TSC with some extended instructions
202 and alignment requirements. Also enable out of order memory 202 and alignment requirements. Also enable out of order memory
@@ -206,19 +206,19 @@ config MWINCHIP3D
206config MGEODEGX1 206config MGEODEGX1
207 bool "GeodeGX1" 207 bool "GeodeGX1"
208 depends on X86_32 208 depends on X86_32
209 help 209 ---help---
210 Select this for a Geode GX1 (Cyrix MediaGX) chip. 210 Select this for a Geode GX1 (Cyrix MediaGX) chip.
211 211
212config MGEODE_LX 212config MGEODE_LX
213 bool "Geode GX/LX" 213 bool "Geode GX/LX"
214 depends on X86_32 214 depends on X86_32
215 help 215 ---help---
216 Select this for AMD Geode GX and LX processors. 216 Select this for AMD Geode GX and LX processors.
217 217
218config MCYRIXIII 218config MCYRIXIII
219 bool "CyrixIII/VIA-C3" 219 bool "CyrixIII/VIA-C3"
220 depends on X86_32 220 depends on X86_32
221 help 221 ---help---
222 Select this for a Cyrix III or C3 chip. Presently Linux and GCC 222 Select this for a Cyrix III or C3 chip. Presently Linux and GCC
223 treat this chip as a generic 586. Whilst the CPU is 686 class, 223 treat this chip as a generic 586. Whilst the CPU is 686 class,
224 it lacks the cmov extension which gcc assumes is present when 224 it lacks the cmov extension which gcc assumes is present when
@@ -230,7 +230,7 @@ config MCYRIXIII
230config MVIAC3_2 230config MVIAC3_2
231 bool "VIA C3-2 (Nehemiah)" 231 bool "VIA C3-2 (Nehemiah)"
232 depends on X86_32 232 depends on X86_32
233 help 233 ---help---
234 Select this for a VIA C3 "Nehemiah". Selecting this enables usage 234 Select this for a VIA C3 "Nehemiah". Selecting this enables usage
235 of SSE and tells gcc to treat the CPU as a 686. 235 of SSE and tells gcc to treat the CPU as a 686.
236 Note, this kernel will not boot on older (pre model 9) C3s. 236 Note, this kernel will not boot on older (pre model 9) C3s.
@@ -238,14 +238,14 @@ config MVIAC3_2
238config MVIAC7 238config MVIAC7
239 bool "VIA C7" 239 bool "VIA C7"
240 depends on X86_32 240 depends on X86_32
241 help 241 ---help---
242 Select this for a VIA C7. Selecting this uses the correct cache 242 Select this for a VIA C7. Selecting this uses the correct cache
243 shift and tells gcc to treat the CPU as a 686. 243 shift and tells gcc to treat the CPU as a 686.
244 244
245config MPSC 245config MPSC
246 bool "Intel P4 / older Netburst based Xeon" 246 bool "Intel P4 / older Netburst based Xeon"
247 depends on X86_64 247 depends on X86_64
248 help 248 ---help---
249 Optimize for Intel Pentium 4, Pentium D and older Nocona/Dempsey 249 Optimize for Intel Pentium 4, Pentium D and older Nocona/Dempsey
250 Xeon CPUs with Intel 64bit which is compatible with x86-64. 250 Xeon CPUs with Intel 64bit which is compatible with x86-64.
251 Note that the latest Xeons (Xeon 51xx and 53xx) are not based on the 251 Note that the latest Xeons (Xeon 51xx and 53xx) are not based on the
@@ -255,7 +255,7 @@ config MPSC
255 255
256config MCORE2 256config MCORE2
257 bool "Core 2/newer Xeon" 257 bool "Core 2/newer Xeon"
258 help 258 ---help---
259 259
260 Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and 260 Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and
261 53xx) CPUs. You can distinguish newer from older Xeons by the CPU 261 53xx) CPUs. You can distinguish newer from older Xeons by the CPU
@@ -265,7 +265,7 @@ config MCORE2
265config GENERIC_CPU 265config GENERIC_CPU
266 bool "Generic-x86-64" 266 bool "Generic-x86-64"
267 depends on X86_64 267 depends on X86_64
268 help 268 ---help---
269 Generic x86-64 CPU. 269 Generic x86-64 CPU.
270 Run equally well on all x86-64 CPUs. 270 Run equally well on all x86-64 CPUs.
271 271
@@ -274,7 +274,7 @@ endchoice
274config X86_GENERIC 274config X86_GENERIC
275 bool "Generic x86 support" 275 bool "Generic x86 support"
276 depends on X86_32 276 depends on X86_32
277 help 277 ---help---
278 Instead of just including optimizations for the selected 278 Instead of just including optimizations for the selected
279 x86 variant (e.g. PII, Crusoe or Athlon), include some more 279 x86 variant (e.g. PII, Crusoe or Athlon), include some more
280 generic optimizations as well. This will make the kernel 280 generic optimizations as well. This will make the kernel
@@ -294,25 +294,23 @@ config X86_CPU
294# Define implied options from the CPU selection here 294# Define implied options from the CPU selection here
295config X86_L1_CACHE_BYTES 295config X86_L1_CACHE_BYTES
296 int 296 int
297 default "128" if GENERIC_CPU || MPSC 297 default "128" if MPSC
298 default "64" if MK8 || MCORE2 298 default "64" if GENERIC_CPU || MK8 || MCORE2 || X86_32
299 depends on X86_64
300 299
301config X86_INTERNODE_CACHE_BYTES 300config X86_INTERNODE_CACHE_BYTES
302 int 301 int
303 default "4096" if X86_VSMP 302 default "4096" if X86_VSMP
304 default X86_L1_CACHE_BYTES if !X86_VSMP 303 default X86_L1_CACHE_BYTES if !X86_VSMP
305 depends on X86_64
306 304
307config X86_CMPXCHG 305config X86_CMPXCHG
308 def_bool X86_64 || (X86_32 && !M386) 306 def_bool X86_64 || (X86_32 && !M386)
309 307
310config X86_L1_CACHE_SHIFT 308config X86_L1_CACHE_SHIFT
311 int 309 int
312 default "7" if MPENTIUM4 || X86_GENERIC || GENERIC_CPU || MPSC 310 default "7" if MPENTIUM4 || MPSC
313 default "4" if X86_ELAN || M486 || M386 || MGEODEGX1 311 default "4" if X86_ELAN || M486 || M386 || MGEODEGX1
314 default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX 312 default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
315 default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MVIAC7 313 default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MVIAC7 || X86_GENERIC || GENERIC_CPU
316 314
317config X86_XADD 315config X86_XADD
318 def_bool y 316 def_bool y
@@ -321,7 +319,7 @@ config X86_XADD
321config X86_PPRO_FENCE 319config X86_PPRO_FENCE
322 bool "PentiumPro memory ordering errata workaround" 320 bool "PentiumPro memory ordering errata workaround"
323 depends on M686 || M586MMX || M586TSC || M586 || M486 || M386 || MGEODEGX1 321 depends on M686 || M586MMX || M586TSC || M586 || M486 || M386 || MGEODEGX1
324 help 322 ---help---
325 Old PentiumPro multiprocessor systems had errata that could cause 323 Old PentiumPro multiprocessor systems had errata that could cause
326 memory operations to violate the x86 ordering standard in rare cases. 324 memory operations to violate the x86 ordering standard in rare cases.
327 Enabling this option will attempt to work around some (but not all) 325 Enabling this option will attempt to work around some (but not all)
@@ -414,14 +412,14 @@ config X86_DEBUGCTLMSR
414 412
415menuconfig PROCESSOR_SELECT 413menuconfig PROCESSOR_SELECT
416 bool "Supported processor vendors" if EMBEDDED 414 bool "Supported processor vendors" if EMBEDDED
417 help 415 ---help---
418 This lets you choose what x86 vendor support code your kernel 416 This lets you choose what x86 vendor support code your kernel
419 will include. 417 will include.
420 418
421config CPU_SUP_INTEL 419config CPU_SUP_INTEL
422 default y 420 default y
423 bool "Support Intel processors" if PROCESSOR_SELECT 421 bool "Support Intel processors" if PROCESSOR_SELECT
424 help 422 ---help---
425 This enables detection, tunings and quirks for Intel processors 423 This enables detection, tunings and quirks for Intel processors
426 424
427 You need this enabled if you want your kernel to run on an 425 You need this enabled if you want your kernel to run on an
@@ -435,7 +433,7 @@ config CPU_SUP_CYRIX_32
435 default y 433 default y
436 bool "Support Cyrix processors" if PROCESSOR_SELECT 434 bool "Support Cyrix processors" if PROCESSOR_SELECT
437 depends on !64BIT 435 depends on !64BIT
438 help 436 ---help---
439 This enables detection, tunings and quirks for Cyrix processors 437 This enables detection, tunings and quirks for Cyrix processors
440 438
441 You need this enabled if you want your kernel to run on a 439 You need this enabled if you want your kernel to run on a
@@ -448,7 +446,7 @@ config CPU_SUP_CYRIX_32
448config CPU_SUP_AMD 446config CPU_SUP_AMD
449 default y 447 default y
450 bool "Support AMD processors" if PROCESSOR_SELECT 448 bool "Support AMD processors" if PROCESSOR_SELECT
451 help 449 ---help---
452 This enables detection, tunings and quirks for AMD processors 450 This enables detection, tunings and quirks for AMD processors
453 451
454 You need this enabled if you want your kernel to run on an 452 You need this enabled if you want your kernel to run on an
@@ -462,7 +460,7 @@ config CPU_SUP_CENTAUR_32
462 default y 460 default y
463 bool "Support Centaur processors" if PROCESSOR_SELECT 461 bool "Support Centaur processors" if PROCESSOR_SELECT
464 depends on !64BIT 462 depends on !64BIT
465 help 463 ---help---
466 This enables detection, tunings and quirks for Centaur processors 464 This enables detection, tunings and quirks for Centaur processors
467 465
468 You need this enabled if you want your kernel to run on a 466 You need this enabled if you want your kernel to run on a
@@ -476,7 +474,7 @@ config CPU_SUP_CENTAUR_64
476 default y 474 default y
477 bool "Support Centaur processors" if PROCESSOR_SELECT 475 bool "Support Centaur processors" if PROCESSOR_SELECT
478 depends on 64BIT 476 depends on 64BIT
479 help 477 ---help---
480 This enables detection, tunings and quirks for Centaur processors 478 This enables detection, tunings and quirks for Centaur processors
481 479
482 You need this enabled if you want your kernel to run on a 480 You need this enabled if you want your kernel to run on a
@@ -490,7 +488,7 @@ config CPU_SUP_TRANSMETA_32
490 default y 488 default y
491 bool "Support Transmeta processors" if PROCESSOR_SELECT 489 bool "Support Transmeta processors" if PROCESSOR_SELECT
492 depends on !64BIT 490 depends on !64BIT
493 help 491 ---help---
494 This enables detection, tunings and quirks for Transmeta processors 492 This enables detection, tunings and quirks for Transmeta processors
495 493
496 You need this enabled if you want your kernel to run on a 494 You need this enabled if you want your kernel to run on a
@@ -504,7 +502,7 @@ config CPU_SUP_UMC_32
504 default y 502 default y
505 bool "Support UMC processors" if PROCESSOR_SELECT 503 bool "Support UMC processors" if PROCESSOR_SELECT
506 depends on !64BIT 504 depends on !64BIT
507 help 505 ---help---
508 This enables detection, tunings and quirks for UMC processors 506 This enables detection, tunings and quirks for UMC processors
509 507
510 You need this enabled if you want your kernel to run on a 508 You need this enabled if you want your kernel to run on a
@@ -523,7 +521,7 @@ config X86_PTRACE_BTS
523 bool "Branch Trace Store" 521 bool "Branch Trace Store"
524 default y 522 default y
525 depends on X86_DEBUGCTLMSR 523 depends on X86_DEBUGCTLMSR
526 help 524 ---help---
527 This adds a ptrace interface to the hardware's branch trace store. 525 This adds a ptrace interface to the hardware's branch trace store.
528 526
529 Debuggers may use it to collect an execution trace of the debugged 527 Debuggers may use it to collect an execution trace of the debugged
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 10d6cc3fd052..ba4781b93890 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -7,7 +7,7 @@ source "lib/Kconfig.debug"
7 7
8config STRICT_DEVMEM 8config STRICT_DEVMEM
9 bool "Filter access to /dev/mem" 9 bool "Filter access to /dev/mem"
10 help 10 ---help---
11 If this option is disabled, you allow userspace (root) access to all 11 If this option is disabled, you allow userspace (root) access to all
12 of memory, including kernel and userspace memory. Accidental 12 of memory, including kernel and userspace memory. Accidental
13 access to this is obviously disastrous, but specific access can 13 access to this is obviously disastrous, but specific access can
@@ -25,7 +25,7 @@ config STRICT_DEVMEM
25config X86_VERBOSE_BOOTUP 25config X86_VERBOSE_BOOTUP
26 bool "Enable verbose x86 bootup info messages" 26 bool "Enable verbose x86 bootup info messages"
27 default y 27 default y
28 help 28 ---help---
29 Enables the informational output from the decompression stage 29 Enables the informational output from the decompression stage
30 (e.g. bzImage) of the boot. If you disable this you will still 30 (e.g. bzImage) of the boot. If you disable this you will still
31 see errors. Disable this if you want silent bootup. 31 see errors. Disable this if you want silent bootup.
@@ -33,7 +33,7 @@ config X86_VERBOSE_BOOTUP
33config EARLY_PRINTK 33config EARLY_PRINTK
34 bool "Early printk" if EMBEDDED 34 bool "Early printk" if EMBEDDED
35 default y 35 default y
36 help 36 ---help---
37 Write kernel log output directly into the VGA buffer or to a serial 37 Write kernel log output directly into the VGA buffer or to a serial
38 port. 38 port.
39 39
@@ -47,7 +47,7 @@ config EARLY_PRINTK_DBGP
47 bool "Early printk via EHCI debug port" 47 bool "Early printk via EHCI debug port"
48 default n 48 default n
49 depends on EARLY_PRINTK && PCI 49 depends on EARLY_PRINTK && PCI
50 help 50 ---help---
51 Write kernel log output directly into the EHCI debug port. 51 Write kernel log output directly into the EHCI debug port.
52 52
53 This is useful for kernel debugging when your machine crashes very 53 This is useful for kernel debugging when your machine crashes very
@@ -59,14 +59,14 @@ config EARLY_PRINTK_DBGP
59config DEBUG_STACKOVERFLOW 59config DEBUG_STACKOVERFLOW
60 bool "Check for stack overflows" 60 bool "Check for stack overflows"
61 depends on DEBUG_KERNEL 61 depends on DEBUG_KERNEL
62 help 62 ---help---
63 This option will cause messages to be printed if free stack space 63 This option will cause messages to be printed if free stack space
64 drops below a certain limit. 64 drops below a certain limit.
65 65
66config DEBUG_STACK_USAGE 66config DEBUG_STACK_USAGE
67 bool "Stack utilization instrumentation" 67 bool "Stack utilization instrumentation"
68 depends on DEBUG_KERNEL 68 depends on DEBUG_KERNEL
69 help 69 ---help---
70 Enables the display of the minimum amount of free stack which each 70 Enables the display of the minimum amount of free stack which each
71 task has ever had available in the sysrq-T and sysrq-P debug output. 71 task has ever had available in the sysrq-T and sysrq-P debug output.
72 72
@@ -75,7 +75,7 @@ config DEBUG_STACK_USAGE
75config DEBUG_PAGEALLOC 75config DEBUG_PAGEALLOC
76 bool "Debug page memory allocations" 76 bool "Debug page memory allocations"
77 depends on DEBUG_KERNEL 77 depends on DEBUG_KERNEL
78 help 78 ---help---
79 Unmap pages from the kernel linear mapping after free_pages(). 79 Unmap pages from the kernel linear mapping after free_pages().
80 This results in a large slowdown, but helps to find certain types 80 This results in a large slowdown, but helps to find certain types
81 of memory corruptions. 81 of memory corruptions.
@@ -83,9 +83,9 @@ config DEBUG_PAGEALLOC
83config DEBUG_PER_CPU_MAPS 83config DEBUG_PER_CPU_MAPS
84 bool "Debug access to per_cpu maps" 84 bool "Debug access to per_cpu maps"
85 depends on DEBUG_KERNEL 85 depends on DEBUG_KERNEL
86 depends on X86_SMP 86 depends on SMP
87 default n 87 default n
88 help 88 ---help---
89 Say Y to verify that the per_cpu map being accessed has 89 Say Y to verify that the per_cpu map being accessed has
90 been setup. Adds a fair amount of code to kernel memory 90 been setup. Adds a fair amount of code to kernel memory
91 and decreases performance. 91 and decreases performance.
@@ -96,7 +96,7 @@ config X86_PTDUMP
96 bool "Export kernel pagetable layout to userspace via debugfs" 96 bool "Export kernel pagetable layout to userspace via debugfs"
97 depends on DEBUG_KERNEL 97 depends on DEBUG_KERNEL
98 select DEBUG_FS 98 select DEBUG_FS
99 help 99 ---help---
100 Say Y here if you want to show the kernel pagetable layout in a 100 Say Y here if you want to show the kernel pagetable layout in a
101 debugfs file. This information is only useful for kernel developers 101 debugfs file. This information is only useful for kernel developers
102 who are working in architecture specific areas of the kernel. 102 who are working in architecture specific areas of the kernel.
@@ -108,7 +108,7 @@ config DEBUG_RODATA
108 bool "Write protect kernel read-only data structures" 108 bool "Write protect kernel read-only data structures"
109 default y 109 default y
110 depends on DEBUG_KERNEL 110 depends on DEBUG_KERNEL
111 help 111 ---help---
112 Mark the kernel read-only data as write-protected in the pagetables, 112 Mark the kernel read-only data as write-protected in the pagetables,
113 in order to catch accidental (and incorrect) writes to such const 113 in order to catch accidental (and incorrect) writes to such const
114 data. This is recommended so that we can catch kernel bugs sooner. 114 data. This is recommended so that we can catch kernel bugs sooner.
@@ -117,7 +117,8 @@ config DEBUG_RODATA
117config DEBUG_RODATA_TEST 117config DEBUG_RODATA_TEST
118 bool "Testcase for the DEBUG_RODATA feature" 118 bool "Testcase for the DEBUG_RODATA feature"
119 depends on DEBUG_RODATA 119 depends on DEBUG_RODATA
120 help 120 default y
121 ---help---
121 This option enables a testcase for the DEBUG_RODATA 122 This option enables a testcase for the DEBUG_RODATA
122 feature as well as for the change_page_attr() infrastructure. 123 feature as well as for the change_page_attr() infrastructure.
123 If in doubt, say "N" 124 If in doubt, say "N"
@@ -125,7 +126,7 @@ config DEBUG_RODATA_TEST
125config DEBUG_NX_TEST 126config DEBUG_NX_TEST
126 tristate "Testcase for the NX non-executable stack feature" 127 tristate "Testcase for the NX non-executable stack feature"
127 depends on DEBUG_KERNEL && m 128 depends on DEBUG_KERNEL && m
128 help 129 ---help---
129 This option enables a testcase for the CPU NX capability 130 This option enables a testcase for the CPU NX capability
130 and the software setup of this feature. 131 and the software setup of this feature.
131 If in doubt, say "N" 132 If in doubt, say "N"
@@ -133,7 +134,7 @@ config DEBUG_NX_TEST
133config 4KSTACKS 134config 4KSTACKS
134 bool "Use 4Kb for kernel stacks instead of 8Kb" 135 bool "Use 4Kb for kernel stacks instead of 8Kb"
135 depends on X86_32 136 depends on X86_32
136 help 137 ---help---
137 If you say Y here the kernel will use a 4Kb stacksize for the 138 If you say Y here the kernel will use a 4Kb stacksize for the
138 kernel stack attached to each process/thread. This facilitates 139 kernel stack attached to each process/thread. This facilitates
139 running more threads on a system and also reduces the pressure 140 running more threads on a system and also reduces the pressure
@@ -144,7 +145,7 @@ config DOUBLEFAULT
144 default y 145 default y
145 bool "Enable doublefault exception handler" if EMBEDDED 146 bool "Enable doublefault exception handler" if EMBEDDED
146 depends on X86_32 147 depends on X86_32
147 help 148 ---help---
148 This option allows trapping of rare doublefault exceptions that 149 This option allows trapping of rare doublefault exceptions that
149 would otherwise cause a system to silently reboot. Disabling this 150 would otherwise cause a system to silently reboot. Disabling this
150 option saves about 4k and might cause you much additional grey 151 option saves about 4k and might cause you much additional grey
@@ -154,7 +155,7 @@ config IOMMU_DEBUG
154 bool "Enable IOMMU debugging" 155 bool "Enable IOMMU debugging"
155 depends on GART_IOMMU && DEBUG_KERNEL 156 depends on GART_IOMMU && DEBUG_KERNEL
156 depends on X86_64 157 depends on X86_64
157 help 158 ---help---
158 Force the IOMMU to on even when you have less than 4GB of 159 Force the IOMMU to on even when you have less than 4GB of
159 memory and add debugging code. On overflow always panic. And 160 memory and add debugging code. On overflow always panic. And
160 allow to enable IOMMU leak tracing. Can be disabled at boot 161 allow to enable IOMMU leak tracing. Can be disabled at boot
@@ -170,7 +171,7 @@ config IOMMU_LEAK
170 bool "IOMMU leak tracing" 171 bool "IOMMU leak tracing"
171 depends on DEBUG_KERNEL 172 depends on DEBUG_KERNEL
172 depends on IOMMU_DEBUG 173 depends on IOMMU_DEBUG
173 help 174 ---help---
174 Add a simple leak tracer to the IOMMU code. This is useful when you 175 Add a simple leak tracer to the IOMMU code. This is useful when you
175 are debugging a buggy device driver that leaks IOMMU mappings. 176 are debugging a buggy device driver that leaks IOMMU mappings.
176 177
@@ -223,25 +224,25 @@ choice
223 224
224config IO_DELAY_0X80 225config IO_DELAY_0X80
225 bool "port 0x80 based port-IO delay [recommended]" 226 bool "port 0x80 based port-IO delay [recommended]"
226 help 227 ---help---
227 This is the traditional Linux IO delay used for in/out_p. 228 This is the traditional Linux IO delay used for in/out_p.
228 It is the most tested hence safest selection here. 229 It is the most tested hence safest selection here.
229 230
230config IO_DELAY_0XED 231config IO_DELAY_0XED
231 bool "port 0xed based port-IO delay" 232 bool "port 0xed based port-IO delay"
232 help 233 ---help---
233 Use port 0xed as the IO delay. This frees up port 0x80 which is 234 Use port 0xed as the IO delay. This frees up port 0x80 which is
234 often used as a hardware-debug port. 235 often used as a hardware-debug port.
235 236
236config IO_DELAY_UDELAY 237config IO_DELAY_UDELAY
237 bool "udelay based port-IO delay" 238 bool "udelay based port-IO delay"
238 help 239 ---help---
239 Use udelay(2) as the IO delay method. This provides the delay 240 Use udelay(2) as the IO delay method. This provides the delay
240 while not having any side-effect on the IO port space. 241 while not having any side-effect on the IO port space.
241 242
242config IO_DELAY_NONE 243config IO_DELAY_NONE
243 bool "no port-IO delay" 244 bool "no port-IO delay"
244 help 245 ---help---
245 No port-IO delay. Will break on old boxes that require port-IO 246 No port-IO delay. Will break on old boxes that require port-IO
246 delay for certain operations. Should work on most new machines. 247 delay for certain operations. Should work on most new machines.
247 248
@@ -275,18 +276,18 @@ config DEBUG_BOOT_PARAMS
275 bool "Debug boot parameters" 276 bool "Debug boot parameters"
276 depends on DEBUG_KERNEL 277 depends on DEBUG_KERNEL
277 depends on DEBUG_FS 278 depends on DEBUG_FS
278 help 279 ---help---
279 This option will cause struct boot_params to be exported via debugfs. 280 This option will cause struct boot_params to be exported via debugfs.
280 281
281config CPA_DEBUG 282config CPA_DEBUG
282 bool "CPA self-test code" 283 bool "CPA self-test code"
283 depends on DEBUG_KERNEL 284 depends on DEBUG_KERNEL
284 help 285 ---help---
285 Do change_page_attr() self-tests every 30 seconds. 286 Do change_page_attr() self-tests every 30 seconds.
286 287
287config OPTIMIZE_INLINING 288config OPTIMIZE_INLINING
288 bool "Allow gcc to uninline functions marked 'inline'" 289 bool "Allow gcc to uninline functions marked 'inline'"
289 help 290 ---help---
290 This option determines if the kernel forces gcc to inline the functions 291 This option determines if the kernel forces gcc to inline the functions
291 developers have marked 'inline'. Doing so takes away freedom from gcc to 292 developers have marked 'inline'. Doing so takes away freedom from gcc to
292 do what it thinks is best, which is desirable for the gcc 3.x series of 293 do what it thinks is best, which is desirable for the gcc 3.x series of
@@ -299,4 +300,3 @@ config OPTIMIZE_INLINING
299 If unsure, say N. 300 If unsure, say N.
300 301
301endmenu 302endmenu
302
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index d1a47adb5aec..1836191839ee 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -70,14 +70,17 @@ else
70 # this works around some issues with generating unwind tables in older gccs 70 # this works around some issues with generating unwind tables in older gccs
71 # newer gccs do it by default 71 # newer gccs do it by default
72 KBUILD_CFLAGS += -maccumulate-outgoing-args 72 KBUILD_CFLAGS += -maccumulate-outgoing-args
73endif
73 74
74 stackp := $(CONFIG_SHELL) $(srctree)/scripts/gcc-x86_64-has-stack-protector.sh 75ifdef CONFIG_CC_STACKPROTECTOR
75 stackp-$(CONFIG_CC_STACKPROTECTOR) := $(shell $(stackp) \ 76 cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
76 "$(CC)" -fstack-protector ) 77 ifeq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC)),y)
77 stackp-$(CONFIG_CC_STACKPROTECTOR_ALL) += $(shell $(stackp) \ 78 stackp-y := -fstack-protector
78 "$(CC)" -fstack-protector-all ) 79 stackp-$(CONFIG_CC_STACKPROTECTOR_ALL) += -fstack-protector-all
79 80 KBUILD_CFLAGS += $(stackp-y)
80 KBUILD_CFLAGS += $(stackp-y) 81 else
82 $(warning stack protector enabled but no compiler support)
83 endif
81endif 84endif
82 85
83# Stackpointer is addressed different for 32 bit and 64 bit x86 86# Stackpointer is addressed different for 32 bit and 64 bit x86
@@ -102,29 +105,6 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
102# prevent gcc from generating any FP code by mistake 105# prevent gcc from generating any FP code by mistake
103KBUILD_CFLAGS += $(call cc-option,-mno-sse -mno-mmx -mno-sse2 -mno-3dnow,) 106KBUILD_CFLAGS += $(call cc-option,-mno-sse -mno-mmx -mno-sse2 -mno-3dnow,)
104 107
105###
106# Sub architecture support
107# fcore-y is linked before mcore-y files.
108
109# Default subarch .c files
110mcore-y := arch/x86/mach-default/
111
112# Voyager subarch support
113mflags-$(CONFIG_X86_VOYAGER) := -Iarch/x86/include/asm/mach-voyager
114mcore-$(CONFIG_X86_VOYAGER) := arch/x86/mach-voyager/
115
116# generic subarchitecture
117mflags-$(CONFIG_X86_GENERICARCH):= -Iarch/x86/include/asm/mach-generic
118fcore-$(CONFIG_X86_GENERICARCH) += arch/x86/mach-generic/
119mcore-$(CONFIG_X86_GENERICARCH) := arch/x86/mach-default/
120
121# default subarch .h files
122mflags-y += -Iarch/x86/include/asm/mach-default
123
124# 64 bit does not support subarch support - clear sub arch variables
125fcore-$(CONFIG_X86_64) :=
126mcore-$(CONFIG_X86_64) :=
127
128KBUILD_CFLAGS += $(mflags-y) 108KBUILD_CFLAGS += $(mflags-y)
129KBUILD_AFLAGS += $(mflags-y) 109KBUILD_AFLAGS += $(mflags-y)
130 110
@@ -150,9 +130,6 @@ core-$(CONFIG_LGUEST_GUEST) += arch/x86/lguest/
150core-y += arch/x86/kernel/ 130core-y += arch/x86/kernel/
151core-y += arch/x86/mm/ 131core-y += arch/x86/mm/
152 132
153# Remaining sub architecture files
154core-y += $(mcore-y)
155
156core-y += arch/x86/crypto/ 133core-y += arch/x86/crypto/
157core-y += arch/x86/vdso/ 134core-y += arch/x86/vdso/
158core-$(CONFIG_IA32_EMULATION) += arch/x86/ia32/ 135core-$(CONFIG_IA32_EMULATION) += arch/x86/ia32/
diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig
index 8f40a80ccb55..096dd5359cd9 100644
--- a/arch/x86/configs/i386_defconfig
+++ b/arch/x86/configs/i386_defconfig
@@ -196,7 +196,6 @@ CONFIG_SMP=y
196CONFIG_SPARSE_IRQ=y 196CONFIG_SPARSE_IRQ=y
197CONFIG_X86_FIND_SMP_CONFIG=y 197CONFIG_X86_FIND_SMP_CONFIG=y
198CONFIG_X86_MPPARSE=y 198CONFIG_X86_MPPARSE=y
199CONFIG_X86_PC=y
200# CONFIG_X86_ELAN is not set 199# CONFIG_X86_ELAN is not set
201# CONFIG_X86_VOYAGER is not set 200# CONFIG_X86_VOYAGER is not set
202# CONFIG_X86_GENERICARCH is not set 201# CONFIG_X86_GENERICARCH is not set
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
index 10728fd91c63..2efb5d5063ff 100644
--- a/arch/x86/configs/x86_64_defconfig
+++ b/arch/x86/configs/x86_64_defconfig
@@ -198,7 +198,6 @@ CONFIG_SPARSE_IRQ=y
198# CONFIG_NUMA_MIGRATE_IRQ_DESC is not set 198# CONFIG_NUMA_MIGRATE_IRQ_DESC is not set
199CONFIG_X86_FIND_SMP_CONFIG=y 199CONFIG_X86_FIND_SMP_CONFIG=y
200CONFIG_X86_MPPARSE=y 200CONFIG_X86_MPPARSE=y
201CONFIG_X86_PC=y
202# CONFIG_X86_ELAN is not set 201# CONFIG_X86_ELAN is not set
203# CONFIG_X86_VOYAGER is not set 202# CONFIG_X86_VOYAGER is not set
204# CONFIG_X86_GENERICARCH is not set 203# CONFIG_X86_GENERICARCH is not set
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
index 9dabd00e9805..dd77ac0cac46 100644
--- a/arch/x86/ia32/ia32_signal.c
+++ b/arch/x86/ia32/ia32_signal.c
@@ -46,78 +46,83 @@ void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
46 46
47int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from) 47int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
48{ 48{
49 int err; 49 int err = 0;
50 50
51 if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t))) 51 if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
52 return -EFAULT; 52 return -EFAULT;
53 53
54 /* If you change siginfo_t structure, please make sure that 54 put_user_try {
55 this code is fixed accordingly. 55 /* If you change siginfo_t structure, please make sure that
56 It should never copy any pad contained in the structure 56 this code is fixed accordingly.
57 to avoid security leaks, but must copy the generic 57 It should never copy any pad contained in the structure
58 3 ints plus the relevant union member. */ 58 to avoid security leaks, but must copy the generic
59 err = __put_user(from->si_signo, &to->si_signo); 59 3 ints plus the relevant union member. */
60 err |= __put_user(from->si_errno, &to->si_errno); 60 put_user_ex(from->si_signo, &to->si_signo);
61 err |= __put_user((short)from->si_code, &to->si_code); 61 put_user_ex(from->si_errno, &to->si_errno);
62 62 put_user_ex((short)from->si_code, &to->si_code);
63 if (from->si_code < 0) { 63
64 err |= __put_user(from->si_pid, &to->si_pid); 64 if (from->si_code < 0) {
65 err |= __put_user(from->si_uid, &to->si_uid); 65 put_user_ex(from->si_pid, &to->si_pid);
66 err |= __put_user(ptr_to_compat(from->si_ptr), &to->si_ptr); 66 put_user_ex(from->si_uid, &to->si_uid);
67 } else { 67 put_user_ex(ptr_to_compat(from->si_ptr), &to->si_ptr);
68 /* 68 } else {
69 * First 32bits of unions are always present: 69 /*
70 * si_pid === si_band === si_tid === si_addr(LS half) 70 * First 32bits of unions are always present:
71 */ 71 * si_pid === si_band === si_tid === si_addr(LS half)
72 err |= __put_user(from->_sifields._pad[0], 72 */
73 &to->_sifields._pad[0]); 73 put_user_ex(from->_sifields._pad[0],
74 switch (from->si_code >> 16) { 74 &to->_sifields._pad[0]);
75 case __SI_FAULT >> 16: 75 switch (from->si_code >> 16) {
76 break; 76 case __SI_FAULT >> 16:
77 case __SI_CHLD >> 16: 77 break;
78 err |= __put_user(from->si_utime, &to->si_utime); 78 case __SI_CHLD >> 16:
79 err |= __put_user(from->si_stime, &to->si_stime); 79 put_user_ex(from->si_utime, &to->si_utime);
80 err |= __put_user(from->si_status, &to->si_status); 80 put_user_ex(from->si_stime, &to->si_stime);
81 /* FALL THROUGH */ 81 put_user_ex(from->si_status, &to->si_status);
82 default: 82 /* FALL THROUGH */
83 case __SI_KILL >> 16: 83 default:
84 err |= __put_user(from->si_uid, &to->si_uid); 84 case __SI_KILL >> 16:
85 break; 85 put_user_ex(from->si_uid, &to->si_uid);
86 case __SI_POLL >> 16: 86 break;
87 err |= __put_user(from->si_fd, &to->si_fd); 87 case __SI_POLL >> 16:
88 break; 88 put_user_ex(from->si_fd, &to->si_fd);
89 case __SI_TIMER >> 16: 89 break;
90 err |= __put_user(from->si_overrun, &to->si_overrun); 90 case __SI_TIMER >> 16:
91 err |= __put_user(ptr_to_compat(from->si_ptr), 91 put_user_ex(from->si_overrun, &to->si_overrun);
92 &to->si_ptr); 92 put_user_ex(ptr_to_compat(from->si_ptr),
93 break; 93 &to->si_ptr);
94 /* This is not generated by the kernel as of now. */ 94 break;
95 case __SI_RT >> 16: 95 /* This is not generated by the kernel as of now. */
96 case __SI_MESGQ >> 16: 96 case __SI_RT >> 16:
97 err |= __put_user(from->si_uid, &to->si_uid); 97 case __SI_MESGQ >> 16:
98 err |= __put_user(from->si_int, &to->si_int); 98 put_user_ex(from->si_uid, &to->si_uid);
99 break; 99 put_user_ex(from->si_int, &to->si_int);
100 break;
101 }
100 } 102 }
101 } 103 } put_user_catch(err);
104
102 return err; 105 return err;
103} 106}
104 107
105int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) 108int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
106{ 109{
107 int err; 110 int err = 0;
108 u32 ptr32; 111 u32 ptr32;
109 112
110 if (!access_ok(VERIFY_READ, from, sizeof(compat_siginfo_t))) 113 if (!access_ok(VERIFY_READ, from, sizeof(compat_siginfo_t)))
111 return -EFAULT; 114 return -EFAULT;
112 115
113 err = __get_user(to->si_signo, &from->si_signo); 116 get_user_try {
114 err |= __get_user(to->si_errno, &from->si_errno); 117 get_user_ex(to->si_signo, &from->si_signo);
115 err |= __get_user(to->si_code, &from->si_code); 118 get_user_ex(to->si_errno, &from->si_errno);
119 get_user_ex(to->si_code, &from->si_code);
116 120
117 err |= __get_user(to->si_pid, &from->si_pid); 121 get_user_ex(to->si_pid, &from->si_pid);
118 err |= __get_user(to->si_uid, &from->si_uid); 122 get_user_ex(to->si_uid, &from->si_uid);
119 err |= __get_user(ptr32, &from->si_ptr); 123 get_user_ex(ptr32, &from->si_ptr);
120 to->si_ptr = compat_ptr(ptr32); 124 to->si_ptr = compat_ptr(ptr32);
125 } get_user_catch(err);
121 126
122 return err; 127 return err;
123} 128}
@@ -142,17 +147,23 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
142 struct pt_regs *regs) 147 struct pt_regs *regs)
143{ 148{
144 stack_t uss, uoss; 149 stack_t uss, uoss;
145 int ret; 150 int ret, err = 0;
146 mm_segment_t seg; 151 mm_segment_t seg;
147 152
148 if (uss_ptr) { 153 if (uss_ptr) {
149 u32 ptr; 154 u32 ptr;
150 155
151 memset(&uss, 0, sizeof(stack_t)); 156 memset(&uss, 0, sizeof(stack_t));
152 if (!access_ok(VERIFY_READ, uss_ptr, sizeof(stack_ia32_t)) || 157 if (!access_ok(VERIFY_READ, uss_ptr, sizeof(stack_ia32_t)))
153 __get_user(ptr, &uss_ptr->ss_sp) || 158 return -EFAULT;
154 __get_user(uss.ss_flags, &uss_ptr->ss_flags) || 159
155 __get_user(uss.ss_size, &uss_ptr->ss_size)) 160 get_user_try {
161 get_user_ex(ptr, &uss_ptr->ss_sp);
162 get_user_ex(uss.ss_flags, &uss_ptr->ss_flags);
163 get_user_ex(uss.ss_size, &uss_ptr->ss_size);
164 } get_user_catch(err);
165
166 if (err)
156 return -EFAULT; 167 return -EFAULT;
157 uss.ss_sp = compat_ptr(ptr); 168 uss.ss_sp = compat_ptr(ptr);
158 } 169 }
@@ -161,10 +172,16 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
161 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp); 172 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
162 set_fs(seg); 173 set_fs(seg);
163 if (ret >= 0 && uoss_ptr) { 174 if (ret >= 0 && uoss_ptr) {
164 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)) || 175 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
165 __put_user(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp) || 176 return -EFAULT;
166 __put_user(uoss.ss_flags, &uoss_ptr->ss_flags) || 177
167 __put_user(uoss.ss_size, &uoss_ptr->ss_size)) 178 put_user_try {
179 put_user_ex(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp);
180 put_user_ex(uoss.ss_flags, &uoss_ptr->ss_flags);
181 put_user_ex(uoss.ss_size, &uoss_ptr->ss_size);
182 } put_user_catch(err);
183
184 if (err)
168 ret = -EFAULT; 185 ret = -EFAULT;
169 } 186 }
170 return ret; 187 return ret;
@@ -174,18 +191,18 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
174 * Do a signal return; undo the signal stack. 191 * Do a signal return; undo the signal stack.
175 */ 192 */
176#define COPY(x) { \ 193#define COPY(x) { \
177 err |= __get_user(regs->x, &sc->x); \ 194 get_user_ex(regs->x, &sc->x); \
178} 195}
179 196
180#define COPY_SEG_CPL3(seg) { \ 197#define COPY_SEG_CPL3(seg) { \
181 unsigned short tmp; \ 198 unsigned short tmp; \
182 err |= __get_user(tmp, &sc->seg); \ 199 get_user_ex(tmp, &sc->seg); \
183 regs->seg = tmp | 3; \ 200 regs->seg = tmp | 3; \
184} 201}
185 202
186#define RELOAD_SEG(seg) { \ 203#define RELOAD_SEG(seg) { \
187 unsigned int cur, pre; \ 204 unsigned int cur, pre; \
188 err |= __get_user(pre, &sc->seg); \ 205 get_user_ex(pre, &sc->seg); \
189 savesegment(seg, cur); \ 206 savesegment(seg, cur); \
190 pre |= 3; \ 207 pre |= 3; \
191 if (pre != cur) \ 208 if (pre != cur) \
@@ -209,39 +226,42 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
209 sc, sc->err, sc->ip, sc->cs, sc->flags); 226 sc, sc->err, sc->ip, sc->cs, sc->flags);
210#endif 227#endif
211 228
212 /* 229 get_user_try {
213 * Reload fs and gs if they have changed in the signal 230 /*
214 * handler. This does not handle long fs/gs base changes in 231 * Reload fs and gs if they have changed in the signal
215 * the handler, but does not clobber them at least in the 232 * handler. This does not handle long fs/gs base changes in
216 * normal case. 233 * the handler, but does not clobber them at least in the
217 */ 234 * normal case.
218 err |= __get_user(gs, &sc->gs); 235 */
219 gs |= 3; 236 get_user_ex(gs, &sc->gs);
220 savesegment(gs, oldgs); 237 gs |= 3;
221 if (gs != oldgs) 238 savesegment(gs, oldgs);
222 load_gs_index(gs); 239 if (gs != oldgs)
223 240 load_gs_index(gs);
224 RELOAD_SEG(fs); 241
225 RELOAD_SEG(ds); 242 RELOAD_SEG(fs);
226 RELOAD_SEG(es); 243 RELOAD_SEG(ds);
227 244 RELOAD_SEG(es);
228 COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx); 245
229 COPY(dx); COPY(cx); COPY(ip); 246 COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
230 /* Don't touch extended registers */ 247 COPY(dx); COPY(cx); COPY(ip);
231 248 /* Don't touch extended registers */
232 COPY_SEG_CPL3(cs); 249
233 COPY_SEG_CPL3(ss); 250 COPY_SEG_CPL3(cs);
234 251 COPY_SEG_CPL3(ss);
235 err |= __get_user(tmpflags, &sc->flags); 252
236 regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS); 253 get_user_ex(tmpflags, &sc->flags);
237 /* disable syscall checks */ 254 regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
238 regs->orig_ax = -1; 255 /* disable syscall checks */
239 256 regs->orig_ax = -1;
240 err |= __get_user(tmp, &sc->fpstate); 257
241 buf = compat_ptr(tmp); 258 get_user_ex(tmp, &sc->fpstate);
242 err |= restore_i387_xstate_ia32(buf); 259 buf = compat_ptr(tmp);
243 260 err |= restore_i387_xstate_ia32(buf);
244 err |= __get_user(*pax, &sc->ax); 261
262 get_user_ex(*pax, &sc->ax);
263 } get_user_catch(err);
264
245 return err; 265 return err;
246} 266}
247 267
@@ -319,36 +339,38 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
319{ 339{
320 int tmp, err = 0; 340 int tmp, err = 0;
321 341
322 savesegment(gs, tmp); 342 put_user_try {
323 err |= __put_user(tmp, (unsigned int __user *)&sc->gs); 343 savesegment(gs, tmp);
324 savesegment(fs, tmp); 344 put_user_ex(tmp, (unsigned int __user *)&sc->gs);
325 err |= __put_user(tmp, (unsigned int __user *)&sc->fs); 345 savesegment(fs, tmp);
326 savesegment(ds, tmp); 346 put_user_ex(tmp, (unsigned int __user *)&sc->fs);
327 err |= __put_user(tmp, (unsigned int __user *)&sc->ds); 347 savesegment(ds, tmp);
328 savesegment(es, tmp); 348 put_user_ex(tmp, (unsigned int __user *)&sc->ds);
329 err |= __put_user(tmp, (unsigned int __user *)&sc->es); 349 savesegment(es, tmp);
330 350 put_user_ex(tmp, (unsigned int __user *)&sc->es);
331 err |= __put_user(regs->di, &sc->di); 351
332 err |= __put_user(regs->si, &sc->si); 352 put_user_ex(regs->di, &sc->di);
333 err |= __put_user(regs->bp, &sc->bp); 353 put_user_ex(regs->si, &sc->si);
334 err |= __put_user(regs->sp, &sc->sp); 354 put_user_ex(regs->bp, &sc->bp);
335 err |= __put_user(regs->bx, &sc->bx); 355 put_user_ex(regs->sp, &sc->sp);
336 err |= __put_user(regs->dx, &sc->dx); 356 put_user_ex(regs->bx, &sc->bx);
337 err |= __put_user(regs->cx, &sc->cx); 357 put_user_ex(regs->dx, &sc->dx);
338 err |= __put_user(regs->ax, &sc->ax); 358 put_user_ex(regs->cx, &sc->cx);
339 err |= __put_user(current->thread.trap_no, &sc->trapno); 359 put_user_ex(regs->ax, &sc->ax);
340 err |= __put_user(current->thread.error_code, &sc->err); 360 put_user_ex(current->thread.trap_no, &sc->trapno);
341 err |= __put_user(regs->ip, &sc->ip); 361 put_user_ex(current->thread.error_code, &sc->err);
342 err |= __put_user(regs->cs, (unsigned int __user *)&sc->cs); 362 put_user_ex(regs->ip, &sc->ip);
343 err |= __put_user(regs->flags, &sc->flags); 363 put_user_ex(regs->cs, (unsigned int __user *)&sc->cs);
344 err |= __put_user(regs->sp, &sc->sp_at_signal); 364 put_user_ex(regs->flags, &sc->flags);
345 err |= __put_user(regs->ss, (unsigned int __user *)&sc->ss); 365 put_user_ex(regs->sp, &sc->sp_at_signal);
346 366 put_user_ex(regs->ss, (unsigned int __user *)&sc->ss);
347 err |= __put_user(ptr_to_compat(fpstate), &sc->fpstate); 367
348 368 put_user_ex(ptr_to_compat(fpstate), &sc->fpstate);
349 /* non-iBCS2 extensions.. */ 369
350 err |= __put_user(mask, &sc->oldmask); 370 /* non-iBCS2 extensions.. */
351 err |= __put_user(current->thread.cr2, &sc->cr2); 371 put_user_ex(mask, &sc->oldmask);
372 put_user_ex(current->thread.cr2, &sc->cr2);
373 } put_user_catch(err);
352 374
353 return err; 375 return err;
354} 376}
@@ -437,13 +459,17 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
437 else 459 else
438 restorer = &frame->retcode; 460 restorer = &frame->retcode;
439 } 461 }
440 err |= __put_user(ptr_to_compat(restorer), &frame->pretcode);
441 462
442 /* 463 put_user_try {
443 * These are actually not used anymore, but left because some 464 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
444 * gdb versions depend on them as a marker. 465
445 */ 466 /*
446 err |= __put_user(*((u64 *)&code), (u64 *)frame->retcode); 467 * These are actually not used anymore, but left because some
468 * gdb versions depend on them as a marker.
469 */
470 put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
471 } put_user_catch(err);
472
447 if (err) 473 if (err)
448 return -EFAULT; 474 return -EFAULT;
449 475
@@ -496,41 +522,40 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
496 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) 522 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
497 return -EFAULT; 523 return -EFAULT;
498 524
499 err |= __put_user(sig, &frame->sig); 525 put_user_try {
500 err |= __put_user(ptr_to_compat(&frame->info), &frame->pinfo); 526 put_user_ex(sig, &frame->sig);
501 err |= __put_user(ptr_to_compat(&frame->uc), &frame->puc); 527 put_user_ex(ptr_to_compat(&frame->info), &frame->pinfo);
502 err |= copy_siginfo_to_user32(&frame->info, info); 528 put_user_ex(ptr_to_compat(&frame->uc), &frame->puc);
503 if (err) 529 err |= copy_siginfo_to_user32(&frame->info, info);
504 return -EFAULT;
505 530
506 /* Create the ucontext. */ 531 /* Create the ucontext. */
507 if (cpu_has_xsave) 532 if (cpu_has_xsave)
508 err |= __put_user(UC_FP_XSTATE, &frame->uc.uc_flags); 533 put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags);
509 else 534 else
510 err |= __put_user(0, &frame->uc.uc_flags); 535 put_user_ex(0, &frame->uc.uc_flags);
511 err |= __put_user(0, &frame->uc.uc_link); 536 put_user_ex(0, &frame->uc.uc_link);
512 err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); 537 put_user_ex(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
513 err |= __put_user(sas_ss_flags(regs->sp), 538 put_user_ex(sas_ss_flags(regs->sp),
514 &frame->uc.uc_stack.ss_flags); 539 &frame->uc.uc_stack.ss_flags);
515 err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); 540 put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
516 err |= ia32_setup_sigcontext(&frame->uc.uc_mcontext, fpstate, 541 err |= ia32_setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
517 regs, set->sig[0]); 542 regs, set->sig[0]);
518 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); 543 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
519 if (err) 544
520 return -EFAULT; 545 if (ka->sa.sa_flags & SA_RESTORER)
546 restorer = ka->sa.sa_restorer;
547 else
548 restorer = VDSO32_SYMBOL(current->mm->context.vdso,
549 rt_sigreturn);
550 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
551
552 /*
553 * Not actually used anymore, but left because some gdb
554 * versions need it.
555 */
556 put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
557 } put_user_catch(err);
521 558
522 if (ka->sa.sa_flags & SA_RESTORER)
523 restorer = ka->sa.sa_restorer;
524 else
525 restorer = VDSO32_SYMBOL(current->mm->context.vdso,
526 rt_sigreturn);
527 err |= __put_user(ptr_to_compat(restorer), &frame->pretcode);
528
529 /*
530 * Not actually used anymore, but left because some gdb
531 * versions need it.
532 */
533 err |= __put_user(*((u64 *)&code), (u64 *)frame->retcode);
534 if (err) 559 if (err)
535 return -EFAULT; 560 return -EFAULT;
536 561
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index 5a0d76dc56a4..097a6b64c24d 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -112,8 +112,8 @@ ENTRY(ia32_sysenter_target)
112 CFI_DEF_CFA rsp,0 112 CFI_DEF_CFA rsp,0
113 CFI_REGISTER rsp,rbp 113 CFI_REGISTER rsp,rbp
114 SWAPGS_UNSAFE_STACK 114 SWAPGS_UNSAFE_STACK
115 movq %gs:pda_kernelstack, %rsp 115 movq PER_CPU_VAR(kernel_stack), %rsp
116 addq $(PDA_STACKOFFSET),%rsp 116 addq $(KERNEL_STACK_OFFSET),%rsp
117 /* 117 /*
118 * No need to follow this irqs on/off section: the syscall 118 * No need to follow this irqs on/off section: the syscall
119 * disabled irqs, here we enable it straight after entry: 119 * disabled irqs, here we enable it straight after entry:
@@ -273,13 +273,13 @@ ENDPROC(ia32_sysenter_target)
273ENTRY(ia32_cstar_target) 273ENTRY(ia32_cstar_target)
274 CFI_STARTPROC32 simple 274 CFI_STARTPROC32 simple
275 CFI_SIGNAL_FRAME 275 CFI_SIGNAL_FRAME
276 CFI_DEF_CFA rsp,PDA_STACKOFFSET 276 CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
277 CFI_REGISTER rip,rcx 277 CFI_REGISTER rip,rcx
278 /*CFI_REGISTER rflags,r11*/ 278 /*CFI_REGISTER rflags,r11*/
279 SWAPGS_UNSAFE_STACK 279 SWAPGS_UNSAFE_STACK
280 movl %esp,%r8d 280 movl %esp,%r8d
281 CFI_REGISTER rsp,r8 281 CFI_REGISTER rsp,r8
282 movq %gs:pda_kernelstack,%rsp 282 movq PER_CPU_VAR(kernel_stack),%rsp
283 /* 283 /*
284 * No need to follow this irqs on/off section: the syscall 284 * No need to follow this irqs on/off section: the syscall
285 * disabled irqs and here we enable it straight after entry: 285 * disabled irqs and here we enable it straight after entry:
diff --git a/arch/x86/include/asm/a.out-core.h b/arch/x86/include/asm/a.out-core.h
index 3c601f8224be..bb70e397aa84 100644
--- a/arch/x86/include/asm/a.out-core.h
+++ b/arch/x86/include/asm/a.out-core.h
@@ -55,7 +55,7 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump)
55 dump->regs.ds = (u16)regs->ds; 55 dump->regs.ds = (u16)regs->ds;
56 dump->regs.es = (u16)regs->es; 56 dump->regs.es = (u16)regs->es;
57 dump->regs.fs = (u16)regs->fs; 57 dump->regs.fs = (u16)regs->fs;
58 savesegment(gs, dump->regs.gs); 58 dump->regs.gs = get_user_gs(regs);
59 dump->regs.orig_ax = regs->orig_ax; 59 dump->regs.orig_ax = regs->orig_ax;
60 dump->regs.ip = regs->ip; 60 dump->regs.ip = regs->ip;
61 dump->regs.cs = (u16)regs->cs; 61 dump->regs.cs = (u16)regs->cs;
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 9830681446ad..4518dc500903 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -102,9 +102,6 @@ static inline void disable_acpi(void)
102 acpi_noirq = 1; 102 acpi_noirq = 1;
103} 103}
104 104
105/* Fixmap pages to reserve for ACPI boot-time tables (see fixmap.h) */
106#define FIX_ACPI_PAGES 4
107
108extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq); 105extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq);
109 106
110static inline void acpi_noirq_set(void) { acpi_noirq = 1; } 107static inline void acpi_noirq_set(void) { acpi_noirq = 1; }
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index ab1d51a8855e..fba49f66228f 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -33,7 +33,13 @@
33 } while (0) 33 } while (0)
34 34
35 35
36#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
36extern void generic_apic_probe(void); 37extern void generic_apic_probe(void);
38#else
39static inline void generic_apic_probe(void)
40{
41}
42#endif
37 43
38#ifdef CONFIG_X86_LOCAL_APIC 44#ifdef CONFIG_X86_LOCAL_APIC
39 45
@@ -41,6 +47,21 @@ extern unsigned int apic_verbosity;
41extern int local_apic_timer_c2_ok; 47extern int local_apic_timer_c2_ok;
42 48
43extern int disable_apic; 49extern int disable_apic;
50
51#ifdef CONFIG_SMP
52extern void __inquire_remote_apic(int apicid);
53#else /* CONFIG_SMP */
54static inline void __inquire_remote_apic(int apicid)
55{
56}
57#endif /* CONFIG_SMP */
58
59static inline void default_inquire_remote_apic(int apicid)
60{
61 if (apic_verbosity >= APIC_DEBUG)
62 __inquire_remote_apic(apicid);
63}
64
44/* 65/*
45 * Basic functions accessing APICs. 66 * Basic functions accessing APICs.
46 */ 67 */
@@ -124,12 +145,35 @@ struct apic_ops {
124 145
125extern struct apic_ops *apic_ops; 146extern struct apic_ops *apic_ops;
126 147
127#define apic_read (apic_ops->read) 148static inline u32 apic_read(u32 reg)
128#define apic_write (apic_ops->write) 149{
129#define apic_icr_read (apic_ops->icr_read) 150 return apic_ops->read(reg);
130#define apic_icr_write (apic_ops->icr_write) 151}
131#define apic_wait_icr_idle (apic_ops->wait_icr_idle) 152
132#define safe_apic_wait_icr_idle (apic_ops->safe_wait_icr_idle) 153static inline void apic_write(u32 reg, u32 val)
154{
155 apic_ops->write(reg, val);
156}
157
158static inline u64 apic_icr_read(void)
159{
160 return apic_ops->icr_read();
161}
162
163static inline void apic_icr_write(u32 low, u32 high)
164{
165 apic_ops->icr_write(low, high);
166}
167
168static inline void apic_wait_icr_idle(void)
169{
170 apic_ops->wait_icr_idle();
171}
172
173static inline u32 safe_apic_wait_icr_idle(void)
174{
175 return apic_ops->safe_wait_icr_idle();
176}
133 177
134extern int get_physical_broadcast(void); 178extern int get_physical_broadcast(void);
135 179
@@ -196,4 +240,22 @@ static inline void disable_local_APIC(void) { }
196 240
197#endif /* !CONFIG_X86_LOCAL_APIC */ 241#endif /* !CONFIG_X86_LOCAL_APIC */
198 242
243#ifdef CONFIG_X86_64
244#define SET_APIC_ID(x) (apic->set_apic_id(x))
245#else
246
247#ifdef CONFIG_X86_LOCAL_APIC
248static inline unsigned default_get_apic_id(unsigned long x)
249{
250 unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR));
251
252 if (APIC_XAPIC(ver))
253 return (x >> 24) & 0xFF;
254 else
255 return (x >> 24) & 0x0F;
256}
257#endif
258
259#endif
260
199#endif /* _ASM_X86_APIC_H */ 261#endif /* _ASM_X86_APIC_H */
diff --git a/arch/x86/include/asm/apicnum.h b/arch/x86/include/asm/apicnum.h
new file mode 100644
index 000000000000..82f613c607ce
--- /dev/null
+++ b/arch/x86/include/asm/apicnum.h
@@ -0,0 +1,12 @@
1#ifndef _ASM_X86_APICNUM_H
2#define _ASM_X86_APICNUM_H
3
4/* define MAX_IO_APICS */
5#ifdef CONFIG_X86_32
6# define MAX_IO_APICS 64
7#else
8# define MAX_IO_APICS 128
9# define MAX_LOCAL_APIC 32768
10#endif
11
12#endif /* _ASM_X86_APICNUM_H */
diff --git a/arch/x86/include/asm/mach-default/apm.h b/arch/x86/include/asm/apm.h
index 20370c6db74b..20370c6db74b 100644
--- a/arch/x86/include/asm/mach-default/apm.h
+++ b/arch/x86/include/asm/apm.h
diff --git a/arch/x86/include/asm/bigsmp/apic.h b/arch/x86/include/asm/bigsmp/apic.h
deleted file mode 100644
index d8dd9f537911..000000000000
--- a/arch/x86/include/asm/bigsmp/apic.h
+++ /dev/null
@@ -1,155 +0,0 @@
1#ifndef __ASM_MACH_APIC_H
2#define __ASM_MACH_APIC_H
3
4#define xapic_phys_to_log_apicid(cpu) (per_cpu(x86_bios_cpu_apicid, cpu))
5#define esr_disable (1)
6
7static inline int apic_id_registered(void)
8{
9 return (1);
10}
11
12static inline const cpumask_t *target_cpus(void)
13{
14#ifdef CONFIG_SMP
15 return &cpu_online_map;
16#else
17 return &cpumask_of_cpu(0);
18#endif
19}
20
21#undef APIC_DEST_LOGICAL
22#define APIC_DEST_LOGICAL 0
23#define APIC_DFR_VALUE (APIC_DFR_FLAT)
24#define INT_DELIVERY_MODE (dest_Fixed)
25#define INT_DEST_MODE (0) /* phys delivery to target proc */
26#define NO_BALANCE_IRQ (0)
27
28static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
29{
30 return (0);
31}
32
33static inline unsigned long check_apicid_present(int bit)
34{
35 return (1);
36}
37
38static inline unsigned long calculate_ldr(int cpu)
39{
40 unsigned long val, id;
41 val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
42 id = xapic_phys_to_log_apicid(cpu);
43 val |= SET_APIC_LOGICAL_ID(id);
44 return val;
45}
46
47/*
48 * Set up the logical destination ID.
49 *
50 * Intel recommends to set DFR, LDR and TPR before enabling
51 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
52 * document number 292116). So here it goes...
53 */
54static inline void init_apic_ldr(void)
55{
56 unsigned long val;
57 int cpu = smp_processor_id();
58
59 apic_write(APIC_DFR, APIC_DFR_VALUE);
60 val = calculate_ldr(cpu);
61 apic_write(APIC_LDR, val);
62}
63
64static inline void setup_apic_routing(void)
65{
66 printk("Enabling APIC mode: %s. Using %d I/O APICs\n",
67 "Physflat", nr_ioapics);
68}
69
70static inline int multi_timer_check(int apic, int irq)
71{
72 return (0);
73}
74
75static inline int apicid_to_node(int logical_apicid)
76{
77 return apicid_2_node[hard_smp_processor_id()];
78}
79
80static inline int cpu_present_to_apicid(int mps_cpu)
81{
82 if (mps_cpu < nr_cpu_ids)
83 return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
84
85 return BAD_APICID;
86}
87
88static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
89{
90 return physid_mask_of_physid(phys_apicid);
91}
92
93extern u8 cpu_2_logical_apicid[];
94/* Mapping from cpu number to logical apicid */
95static inline int cpu_to_logical_apicid(int cpu)
96{
97 if (cpu >= nr_cpu_ids)
98 return BAD_APICID;
99 return cpu_physical_id(cpu);
100}
101
102static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
103{
104 /* For clustered we don't have a good way to do this yet - hack */
105 return physids_promote(0xFFL);
106}
107
108static inline void setup_portio_remap(void)
109{
110}
111
112static inline void enable_apic_mode(void)
113{
114}
115
116static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
117{
118 return (1);
119}
120
121/* As we are using single CPU as destination, pick only one CPU here */
122static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
123{
124 int cpu;
125 int apicid;
126
127 cpu = first_cpu(*cpumask);
128 apicid = cpu_to_logical_apicid(cpu);
129 return apicid;
130}
131
132static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
133 const struct cpumask *andmask)
134{
135 int cpu;
136
137 /*
138 * We're using fixed IRQ delivery, can only return one phys APIC ID.
139 * May as well be the first.
140 */
141 for_each_cpu_and(cpu, cpumask, andmask)
142 if (cpumask_test_cpu(cpu, cpu_online_mask))
143 break;
144 if (cpu < nr_cpu_ids)
145 return cpu_to_logical_apicid(cpu);
146
147 return BAD_APICID;
148}
149
150static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
151{
152 return cpuid_apic >> index_msb;
153}
154
155#endif /* __ASM_MACH_APIC_H */
diff --git a/arch/x86/include/asm/bigsmp/apicdef.h b/arch/x86/include/asm/bigsmp/apicdef.h
deleted file mode 100644
index 392c3f5ef2fe..000000000000
--- a/arch/x86/include/asm/bigsmp/apicdef.h
+++ /dev/null
@@ -1,13 +0,0 @@
1#ifndef __ASM_MACH_APICDEF_H
2#define __ASM_MACH_APICDEF_H
3
4#define APIC_ID_MASK (0xFF<<24)
5
6static inline unsigned get_apic_id(unsigned long x)
7{
8 return (((x)>>24)&0xFF);
9}
10
11#define GET_APIC_ID(x) get_apic_id(x)
12
13#endif
diff --git a/arch/x86/include/asm/bigsmp/ipi.h b/arch/x86/include/asm/bigsmp/ipi.h
deleted file mode 100644
index 27fcd01b3ae6..000000000000
--- a/arch/x86/include/asm/bigsmp/ipi.h
+++ /dev/null
@@ -1,22 +0,0 @@
1#ifndef __ASM_MACH_IPI_H
2#define __ASM_MACH_IPI_H
3
4void send_IPI_mask_sequence(const struct cpumask *mask, int vector);
5void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
6
7static inline void send_IPI_mask(const struct cpumask *mask, int vector)
8{
9 send_IPI_mask_sequence(mask, vector);
10}
11
12static inline void send_IPI_allbutself(int vector)
13{
14 send_IPI_mask_allbutself(cpu_online_mask, vector);
15}
16
17static inline void send_IPI_all(int vector)
18{
19 send_IPI_mask(cpu_online_mask, vector);
20}
21
22#endif /* __ASM_MACH_IPI_H */
diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
index 2bc162e0ec6e..0e63c9a2a8d0 100644
--- a/arch/x86/include/asm/calling.h
+++ b/arch/x86/include/asm/calling.h
@@ -1,5 +1,55 @@
1/* 1/*
2 * Some macros to handle stack frames in assembly. 2
3 x86 function call convention, 64-bit:
4 -------------------------------------
5 arguments | callee-saved | extra caller-saved | return
6 [callee-clobbered] | | [callee-clobbered] |
7 ---------------------------------------------------------------------------
8 rdi rsi rdx rcx r8-9 | rbx rbp [*] r12-15 | r10-11 | rax, rdx [**]
9
10 ( rsp is obviously invariant across normal function calls. (gcc can 'merge'
11 functions when it sees tail-call optimization possibilities) rflags is
12 clobbered. Leftover arguments are passed over the stack frame.)
13
14 [*] In the frame-pointers case rbp is fixed to the stack frame.
15
16 [**] for struct return values wider than 64 bits the return convention is a
17 bit more complex: up to 128 bits width we return small structures
18 straight in rax, rdx. For structures larger than that (3 words or
19 larger) the caller puts a pointer to an on-stack return struct
20 [allocated in the caller's stack frame] into the first argument - i.e.
21 into rdi. All other arguments shift up by one in this case.
22 Fortunately this case is rare in the kernel.
23
24For 32-bit we have the following conventions - kernel is built with
25-mregparm=3 and -freg-struct-return:
26
27 x86 function calling convention, 32-bit:
28 ----------------------------------------
29 arguments | callee-saved | extra caller-saved | return
30 [callee-clobbered] | | [callee-clobbered] |
31 -------------------------------------------------------------------------
32 eax edx ecx | ebx edi esi ebp [*] | <none> | eax, edx [**]
33
34 ( here too esp is obviously invariant across normal function calls. eflags
35 is clobbered. Leftover arguments are passed over the stack frame. )
36
37 [*] In the frame-pointers case ebp is fixed to the stack frame.
38
39 [**] We build with -freg-struct-return, which on 32-bit means similar
40 semantics as on 64-bit: edx can be used for a second return value
41 (i.e. covering integer and structure sizes up to 64 bits) - after that
42 it gets more complex and more expensive: 3-word or larger struct returns
43 get done in the caller's frame and the pointer to the return struct goes
44 into regparm0, i.e. eax - the other arguments shift up and the
45 function's register parameters degenerate to regparm=2 in essence.
46
47*/
48
49
50/*
51 * 64-bit system call stack frame layout defines and helpers,
52 * for assembly code:
3 */ 53 */
4 54
5#define R15 0 55#define R15 0
@@ -9,7 +59,7 @@
9#define RBP 32 59#define RBP 32
10#define RBX 40 60#define RBX 40
11 61
12/* arguments: interrupts/non tracing syscalls only save upto here*/ 62/* arguments: interrupts/non tracing syscalls only save up to here: */
13#define R11 48 63#define R11 48
14#define R10 56 64#define R10 56
15#define R9 64 65#define R9 64
@@ -22,7 +72,7 @@
22#define ORIG_RAX 120 /* + error_code */ 72#define ORIG_RAX 120 /* + error_code */
23/* end of arguments */ 73/* end of arguments */
24 74
25/* cpu exception frame or undefined in case of fast syscall. */ 75/* cpu exception frame or undefined in case of fast syscall: */
26#define RIP 128 76#define RIP 128
27#define CS 136 77#define CS 136
28#define EFLAGS 144 78#define EFLAGS 144
diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h
index bae482df6039..b185091bf19c 100644
--- a/arch/x86/include/asm/cpu.h
+++ b/arch/x86/include/asm/cpu.h
@@ -7,6 +7,20 @@
7#include <linux/nodemask.h> 7#include <linux/nodemask.h>
8#include <linux/percpu.h> 8#include <linux/percpu.h>
9 9
10#ifdef CONFIG_SMP
11
12extern void prefill_possible_map(void);
13
14#else /* CONFIG_SMP */
15
16static inline void prefill_possible_map(void) {}
17
18#define cpu_physical_id(cpu) boot_cpu_physical_apicid
19#define safe_smp_processor_id() 0
20#define stack_smp_processor_id() 0
21
22#endif /* CONFIG_SMP */
23
10struct x86_cpu { 24struct x86_cpu {
11 struct cpu cpu; 25 struct cpu cpu;
12}; 26};
@@ -17,4 +31,7 @@ extern void arch_unregister_cpu(int);
17#endif 31#endif
18 32
19DECLARE_PER_CPU(int, cpu_state); 33DECLARE_PER_CPU(int, cpu_state);
34
35extern unsigned int boot_cpu_id;
36
20#endif /* _ASM_X86_CPU_H */ 37#endif /* _ASM_X86_CPU_H */
diff --git a/arch/x86/include/asm/cpumask.h b/arch/x86/include/asm/cpumask.h
new file mode 100644
index 000000000000..a7f3c75f8ad7
--- /dev/null
+++ b/arch/x86/include/asm/cpumask.h
@@ -0,0 +1,32 @@
1#ifndef _ASM_X86_CPUMASK_H
2#define _ASM_X86_CPUMASK_H
3#ifndef __ASSEMBLY__
4#include <linux/cpumask.h>
5
6#ifdef CONFIG_X86_64
7
8extern cpumask_var_t cpu_callin_mask;
9extern cpumask_var_t cpu_callout_mask;
10extern cpumask_var_t cpu_initialized_mask;
11extern cpumask_var_t cpu_sibling_setup_mask;
12
13extern void setup_cpu_local_masks(void);
14
15#else /* CONFIG_X86_32 */
16
17extern cpumask_t cpu_callin_map;
18extern cpumask_t cpu_callout_map;
19extern cpumask_t cpu_initialized;
20extern cpumask_t cpu_sibling_setup_map;
21
22#define cpu_callin_mask ((struct cpumask *)&cpu_callin_map)
23#define cpu_callout_mask ((struct cpumask *)&cpu_callout_map)
24#define cpu_initialized_mask ((struct cpumask *)&cpu_initialized)
25#define cpu_sibling_setup_mask ((struct cpumask *)&cpu_sibling_setup_map)
26
27static inline void setup_cpu_local_masks(void) { }
28
29#endif /* CONFIG_X86_32 */
30
31#endif /* __ASSEMBLY__ */
32#endif /* _ASM_X86_CPUMASK_H */
diff --git a/arch/x86/include/asm/current.h b/arch/x86/include/asm/current.h
index 0930b4f8d672..c68c361697e1 100644
--- a/arch/x86/include/asm/current.h
+++ b/arch/x86/include/asm/current.h
@@ -1,39 +1,21 @@
1#ifndef _ASM_X86_CURRENT_H 1#ifndef _ASM_X86_CURRENT_H
2#define _ASM_X86_CURRENT_H 2#define _ASM_X86_CURRENT_H
3 3
4#ifdef CONFIG_X86_32
5#include <linux/compiler.h> 4#include <linux/compiler.h>
6#include <asm/percpu.h> 5#include <asm/percpu.h>
7 6
7#ifndef __ASSEMBLY__
8struct task_struct; 8struct task_struct;
9 9
10DECLARE_PER_CPU(struct task_struct *, current_task); 10DECLARE_PER_CPU(struct task_struct *, current_task);
11static __always_inline struct task_struct *get_current(void)
12{
13 return x86_read_percpu(current_task);
14}
15
16#else /* X86_32 */
17
18#ifndef __ASSEMBLY__
19#include <asm/pda.h>
20
21struct task_struct;
22 11
23static __always_inline struct task_struct *get_current(void) 12static __always_inline struct task_struct *get_current(void)
24{ 13{
25 return read_pda(pcurrent); 14 return percpu_read(current_task);
26} 15}
27 16
28#else /* __ASSEMBLY__ */ 17#define current get_current()
29
30#include <asm/asm-offsets.h>
31#define GET_CURRENT(reg) movq %gs:(pda_pcurrent),reg
32 18
33#endif /* __ASSEMBLY__ */ 19#endif /* __ASSEMBLY__ */
34 20
35#endif /* X86_32 */
36
37#define current get_current()
38
39#endif /* _ASM_X86_CURRENT_H */ 21#endif /* _ASM_X86_CURRENT_H */
diff --git a/arch/x86/include/asm/mach-default/do_timer.h b/arch/x86/include/asm/do_timer.h
index 23ecda0b28a0..23ecda0b28a0 100644
--- a/arch/x86/include/asm/mach-default/do_timer.h
+++ b/arch/x86/include/asm/do_timer.h
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index f51a3ddde01a..83c1bc8d2e8a 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -112,7 +112,7 @@ extern unsigned int vdso_enabled;
112 * now struct_user_regs, they are different) 112 * now struct_user_regs, they are different)
113 */ 113 */
114 114
115#define ELF_CORE_COPY_REGS(pr_reg, regs) \ 115#define ELF_CORE_COPY_REGS_COMMON(pr_reg, regs) \
116do { \ 116do { \
117 pr_reg[0] = regs->bx; \ 117 pr_reg[0] = regs->bx; \
118 pr_reg[1] = regs->cx; \ 118 pr_reg[1] = regs->cx; \
@@ -124,7 +124,6 @@ do { \
124 pr_reg[7] = regs->ds & 0xffff; \ 124 pr_reg[7] = regs->ds & 0xffff; \
125 pr_reg[8] = regs->es & 0xffff; \ 125 pr_reg[8] = regs->es & 0xffff; \
126 pr_reg[9] = regs->fs & 0xffff; \ 126 pr_reg[9] = regs->fs & 0xffff; \
127 savesegment(gs, pr_reg[10]); \
128 pr_reg[11] = regs->orig_ax; \ 127 pr_reg[11] = regs->orig_ax; \
129 pr_reg[12] = regs->ip; \ 128 pr_reg[12] = regs->ip; \
130 pr_reg[13] = regs->cs & 0xffff; \ 129 pr_reg[13] = regs->cs & 0xffff; \
@@ -133,6 +132,18 @@ do { \
133 pr_reg[16] = regs->ss & 0xffff; \ 132 pr_reg[16] = regs->ss & 0xffff; \
134} while (0); 133} while (0);
135 134
135#define ELF_CORE_COPY_REGS(pr_reg, regs) \
136do { \
137 ELF_CORE_COPY_REGS_COMMON(pr_reg, regs);\
138 pr_reg[10] = get_user_gs(regs); \
139} while (0);
140
141#define ELF_CORE_COPY_KERNEL_REGS(pr_reg, regs) \
142do { \
143 ELF_CORE_COPY_REGS_COMMON(pr_reg, regs);\
144 savesegment(gs, pr_reg[10]); \
145} while (0);
146
136#define ELF_PLATFORM (utsname()->machine) 147#define ELF_PLATFORM (utsname()->machine)
137#define set_personality_64bit() do { } while (0) 148#define set_personality_64bit() do { } while (0)
138 149
diff --git a/arch/x86/include/asm/mach-default/entry_arch.h b/arch/x86/include/asm/entry_arch.h
index 6b1add8e31dd..854d538ae857 100644
--- a/arch/x86/include/asm/mach-default/entry_arch.h
+++ b/arch/x86/include/asm/entry_arch.h
@@ -9,12 +9,28 @@
9 * is no hardware IRQ pin equivalent for them, they are triggered 9 * is no hardware IRQ pin equivalent for them, they are triggered
10 * through the ICC by us (IPIs) 10 * through the ICC by us (IPIs)
11 */ 11 */
12#ifdef CONFIG_X86_SMP 12#ifdef CONFIG_SMP
13BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR) 13BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR)
14BUILD_INTERRUPT(invalidate_interrupt,INVALIDATE_TLB_VECTOR)
15BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR) 14BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR)
16BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR) 15BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR)
17BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR) 16BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR)
17
18BUILD_INTERRUPT3(invalidate_interrupt0,INVALIDATE_TLB_VECTOR_START+0,
19 smp_invalidate_interrupt)
20BUILD_INTERRUPT3(invalidate_interrupt1,INVALIDATE_TLB_VECTOR_START+1,
21 smp_invalidate_interrupt)
22BUILD_INTERRUPT3(invalidate_interrupt2,INVALIDATE_TLB_VECTOR_START+2,
23 smp_invalidate_interrupt)
24BUILD_INTERRUPT3(invalidate_interrupt3,INVALIDATE_TLB_VECTOR_START+3,
25 smp_invalidate_interrupt)
26BUILD_INTERRUPT3(invalidate_interrupt4,INVALIDATE_TLB_VECTOR_START+4,
27 smp_invalidate_interrupt)
28BUILD_INTERRUPT3(invalidate_interrupt5,INVALIDATE_TLB_VECTOR_START+5,
29 smp_invalidate_interrupt)
30BUILD_INTERRUPT3(invalidate_interrupt6,INVALIDATE_TLB_VECTOR_START+6,
31 smp_invalidate_interrupt)
32BUILD_INTERRUPT3(invalidate_interrupt7,INVALIDATE_TLB_VECTOR_START+7,
33 smp_invalidate_interrupt)
18#endif 34#endif
19 35
20/* 36/*
@@ -25,10 +41,15 @@ BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR)
25 * a much simpler SMP time architecture: 41 * a much simpler SMP time architecture:
26 */ 42 */
27#ifdef CONFIG_X86_LOCAL_APIC 43#ifdef CONFIG_X86_LOCAL_APIC
44
28BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR) 45BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR)
29BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR) 46BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR)
30BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR) 47BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR)
31 48
49#ifdef CONFIG_PERF_COUNTERS
50BUILD_INTERRUPT(perf_counter_interrupt, LOCAL_PERF_VECTOR)
51#endif
52
32#ifdef CONFIG_X86_MCE_P4THERMAL 53#ifdef CONFIG_X86_MCE_P4THERMAL
33BUILD_INTERRUPT(thermal_interrupt,THERMAL_APIC_VECTOR) 54BUILD_INTERRUPT(thermal_interrupt,THERMAL_APIC_VECTOR)
34#endif 55#endif
diff --git a/arch/x86/include/asm/es7000/apic.h b/arch/x86/include/asm/es7000/apic.h
deleted file mode 100644
index c58b9cc74465..000000000000
--- a/arch/x86/include/asm/es7000/apic.h
+++ /dev/null
@@ -1,242 +0,0 @@
1#ifndef __ASM_ES7000_APIC_H
2#define __ASM_ES7000_APIC_H
3
4#include <linux/gfp.h>
5
6#define xapic_phys_to_log_apicid(cpu) per_cpu(x86_bios_cpu_apicid, cpu)
7#define esr_disable (1)
8
9static inline int apic_id_registered(void)
10{
11 return (1);
12}
13
14static inline const cpumask_t *target_cpus_cluster(void)
15{
16 return &CPU_MASK_ALL;
17}
18
19static inline const cpumask_t *target_cpus(void)
20{
21 return &cpumask_of_cpu(smp_processor_id());
22}
23
24#define APIC_DFR_VALUE_CLUSTER (APIC_DFR_CLUSTER)
25#define INT_DELIVERY_MODE_CLUSTER (dest_LowestPrio)
26#define INT_DEST_MODE_CLUSTER (1) /* logical delivery broadcast to all procs */
27#define NO_BALANCE_IRQ_CLUSTER (1)
28
29#define APIC_DFR_VALUE (APIC_DFR_FLAT)
30#define INT_DELIVERY_MODE (dest_Fixed)
31#define INT_DEST_MODE (0) /* phys delivery to target procs */
32#define NO_BALANCE_IRQ (0)
33#undef APIC_DEST_LOGICAL
34#define APIC_DEST_LOGICAL 0x0
35
36static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
37{
38 return 0;
39}
40static inline unsigned long check_apicid_present(int bit)
41{
42 return physid_isset(bit, phys_cpu_present_map);
43}
44
45#define apicid_cluster(apicid) (apicid & 0xF0)
46
47static inline unsigned long calculate_ldr(int cpu)
48{
49 unsigned long id;
50 id = xapic_phys_to_log_apicid(cpu);
51 return (SET_APIC_LOGICAL_ID(id));
52}
53
54/*
55 * Set up the logical destination ID.
56 *
57 * Intel recommends to set DFR, LdR and TPR before enabling
58 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
59 * document number 292116). So here it goes...
60 */
61static inline void init_apic_ldr_cluster(void)
62{
63 unsigned long val;
64 int cpu = smp_processor_id();
65
66 apic_write(APIC_DFR, APIC_DFR_VALUE_CLUSTER);
67 val = calculate_ldr(cpu);
68 apic_write(APIC_LDR, val);
69}
70
71static inline void init_apic_ldr(void)
72{
73 unsigned long val;
74 int cpu = smp_processor_id();
75
76 apic_write(APIC_DFR, APIC_DFR_VALUE);
77 val = calculate_ldr(cpu);
78 apic_write(APIC_LDR, val);
79}
80
81extern int apic_version [MAX_APICS];
82static inline void setup_apic_routing(void)
83{
84 int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id());
85 printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n",
86 (apic_version[apic] == 0x14) ?
87 "Physical Cluster" : "Logical Cluster",
88 nr_ioapics, cpus_addr(*target_cpus())[0]);
89}
90
91static inline int multi_timer_check(int apic, int irq)
92{
93 return 0;
94}
95
96static inline int apicid_to_node(int logical_apicid)
97{
98 return 0;
99}
100
101
102static inline int cpu_present_to_apicid(int mps_cpu)
103{
104 if (!mps_cpu)
105 return boot_cpu_physical_apicid;
106 else if (mps_cpu < nr_cpu_ids)
107 return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
108 else
109 return BAD_APICID;
110}
111
112static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
113{
114 static int id = 0;
115 physid_mask_t mask;
116 mask = physid_mask_of_physid(id);
117 ++id;
118 return mask;
119}
120
121extern u8 cpu_2_logical_apicid[];
122/* Mapping from cpu number to logical apicid */
123static inline int cpu_to_logical_apicid(int cpu)
124{
125#ifdef CONFIG_SMP
126 if (cpu >= nr_cpu_ids)
127 return BAD_APICID;
128 return (int)cpu_2_logical_apicid[cpu];
129#else
130 return logical_smp_processor_id();
131#endif
132}
133
134static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
135{
136 /* For clustered we don't have a good way to do this yet - hack */
137 return physids_promote(0xff);
138}
139
140
141static inline void setup_portio_remap(void)
142{
143}
144
145extern unsigned int boot_cpu_physical_apicid;
146static inline int check_phys_apicid_present(int cpu_physical_apicid)
147{
148 boot_cpu_physical_apicid = read_apic_id();
149 return (1);
150}
151
152static inline unsigned int
153cpu_mask_to_apicid_cluster(const struct cpumask *cpumask)
154{
155 int num_bits_set;
156 int cpus_found = 0;
157 int cpu;
158 int apicid;
159
160 num_bits_set = cpumask_weight(cpumask);
161 /* Return id to all */
162 if (num_bits_set == nr_cpu_ids)
163 return 0xFF;
164 /*
165 * The cpus in the mask must all be on the apic cluster. If are not
166 * on the same apicid cluster return default value of TARGET_CPUS.
167 */
168 cpu = cpumask_first(cpumask);
169 apicid = cpu_to_logical_apicid(cpu);
170 while (cpus_found < num_bits_set) {
171 if (cpumask_test_cpu(cpu, cpumask)) {
172 int new_apicid = cpu_to_logical_apicid(cpu);
173 if (apicid_cluster(apicid) !=
174 apicid_cluster(new_apicid)){
175 printk ("%s: Not a valid mask!\n", __func__);
176 return 0xFF;
177 }
178 apicid = new_apicid;
179 cpus_found++;
180 }
181 cpu++;
182 }
183 return apicid;
184}
185
186static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
187{
188 int num_bits_set;
189 int cpus_found = 0;
190 int cpu;
191 int apicid;
192
193 num_bits_set = cpus_weight(*cpumask);
194 /* Return id to all */
195 if (num_bits_set == nr_cpu_ids)
196 return cpu_to_logical_apicid(0);
197 /*
198 * The cpus in the mask must all be on the apic cluster. If are not
199 * on the same apicid cluster return default value of TARGET_CPUS.
200 */
201 cpu = first_cpu(*cpumask);
202 apicid = cpu_to_logical_apicid(cpu);
203 while (cpus_found < num_bits_set) {
204 if (cpu_isset(cpu, *cpumask)) {
205 int new_apicid = cpu_to_logical_apicid(cpu);
206 if (apicid_cluster(apicid) !=
207 apicid_cluster(new_apicid)){
208 printk ("%s: Not a valid mask!\n", __func__);
209 return cpu_to_logical_apicid(0);
210 }
211 apicid = new_apicid;
212 cpus_found++;
213 }
214 cpu++;
215 }
216 return apicid;
217}
218
219
220static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask,
221 const struct cpumask *andmask)
222{
223 int apicid = cpu_to_logical_apicid(0);
224 cpumask_var_t cpumask;
225
226 if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
227 return apicid;
228
229 cpumask_and(cpumask, inmask, andmask);
230 cpumask_and(cpumask, cpumask, cpu_online_mask);
231 apicid = cpu_mask_to_apicid(cpumask);
232
233 free_cpumask_var(cpumask);
234 return apicid;
235}
236
237static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
238{
239 return cpuid_apic >> index_msb;
240}
241
242#endif /* __ASM_ES7000_APIC_H */
diff --git a/arch/x86/include/asm/es7000/apicdef.h b/arch/x86/include/asm/es7000/apicdef.h
deleted file mode 100644
index 8b234a3cb851..000000000000
--- a/arch/x86/include/asm/es7000/apicdef.h
+++ /dev/null
@@ -1,13 +0,0 @@
1#ifndef __ASM_ES7000_APICDEF_H
2#define __ASM_ES7000_APICDEF_H
3
4#define APIC_ID_MASK (0xFF<<24)
5
6static inline unsigned get_apic_id(unsigned long x)
7{
8 return (((x)>>24)&0xFF);
9}
10
11#define GET_APIC_ID(x) get_apic_id(x)
12
13#endif
diff --git a/arch/x86/include/asm/es7000/ipi.h b/arch/x86/include/asm/es7000/ipi.h
deleted file mode 100644
index 7e8ed24d4b8a..000000000000
--- a/arch/x86/include/asm/es7000/ipi.h
+++ /dev/null
@@ -1,22 +0,0 @@
1#ifndef __ASM_ES7000_IPI_H
2#define __ASM_ES7000_IPI_H
3
4void send_IPI_mask_sequence(const struct cpumask *mask, int vector);
5void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
6
7static inline void send_IPI_mask(const struct cpumask *mask, int vector)
8{
9 send_IPI_mask_sequence(mask, vector);
10}
11
12static inline void send_IPI_allbutself(int vector)
13{
14 send_IPI_mask_allbutself(cpu_online_mask, vector);
15}
16
17static inline void send_IPI_all(int vector)
18{
19 send_IPI_mask(cpu_online_mask, vector);
20}
21
22#endif /* __ASM_ES7000_IPI_H */
diff --git a/arch/x86/include/asm/es7000/mpparse.h b/arch/x86/include/asm/es7000/mpparse.h
deleted file mode 100644
index c1629b090ec2..000000000000
--- a/arch/x86/include/asm/es7000/mpparse.h
+++ /dev/null
@@ -1,29 +0,0 @@
1#ifndef __ASM_ES7000_MPPARSE_H
2#define __ASM_ES7000_MPPARSE_H
3
4#include <linux/acpi.h>
5
6extern int parse_unisys_oem (char *oemptr);
7extern int find_unisys_acpi_oem_table(unsigned long *oem_addr);
8extern void unmap_unisys_acpi_oem_table(unsigned long oem_addr);
9extern void setup_unisys(void);
10
11#ifndef CONFIG_X86_GENERICARCH
12extern int acpi_madt_oem_check(char *oem_id, char *oem_table_id);
13extern int mps_oem_check(struct mpc_table *mpc, char *oem, char *productid);
14#endif
15
16#ifdef CONFIG_ACPI
17
18static inline int es7000_check_dsdt(void)
19{
20 struct acpi_table_header header;
21
22 if (ACPI_SUCCESS(acpi_get_table_header(ACPI_SIG_DSDT, 0, &header)) &&
23 !strncmp(header.oem_id, "UNISYS", 6))
24 return 1;
25 return 0;
26}
27#endif
28
29#endif /* __ASM_MACH_MPPARSE_H */
diff --git a/arch/x86/include/asm/es7000/wakecpu.h b/arch/x86/include/asm/es7000/wakecpu.h
deleted file mode 100644
index 78f0daaee436..000000000000
--- a/arch/x86/include/asm/es7000/wakecpu.h
+++ /dev/null
@@ -1,37 +0,0 @@
1#ifndef __ASM_ES7000_WAKECPU_H
2#define __ASM_ES7000_WAKECPU_H
3
4#define TRAMPOLINE_PHYS_LOW 0x467
5#define TRAMPOLINE_PHYS_HIGH 0x469
6
7static inline void wait_for_init_deassert(atomic_t *deassert)
8{
9#ifndef CONFIG_ES7000_CLUSTERED_APIC
10 while (!atomic_read(deassert))
11 cpu_relax();
12#endif
13 return;
14}
15
16/* Nothing to do for most platforms, since cleared by the INIT cycle */
17static inline void smp_callin_clear_local_apic(void)
18{
19}
20
21static inline void store_NMI_vector(unsigned short *high, unsigned short *low)
22{
23}
24
25static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
26{
27}
28
29extern void __inquire_remote_apic(int apicid);
30
31static inline void inquire_remote_apic(int apicid)
32{
33 if (apic_verbosity >= APIC_DEBUG)
34 __inquire_remote_apic(apicid);
35}
36
37#endif /* __ASM_MACH_WAKECPU_H */
diff --git a/arch/x86/include/asm/fixmap_32.h b/arch/x86/include/asm/fixmap_32.h
index c7115c1d7217..047d9bab2b31 100644
--- a/arch/x86/include/asm/fixmap_32.h
+++ b/arch/x86/include/asm/fixmap_32.h
@@ -95,10 +95,6 @@ enum fixed_addresses {
95 (__end_of_permanent_fixed_addresses & 255), 95 (__end_of_permanent_fixed_addresses & 255),
96 FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_SLOTS - 1, 96 FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_SLOTS - 1,
97 FIX_WP_TEST, 97 FIX_WP_TEST,
98#ifdef CONFIG_ACPI
99 FIX_ACPI_BEGIN,
100 FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
101#endif
102#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT 98#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
103 FIX_OHCI1394_BASE, 99 FIX_OHCI1394_BASE,
104#endif 100#endif
diff --git a/arch/x86/include/asm/fixmap_64.h b/arch/x86/include/asm/fixmap_64.h
index 00a30ab9b1a5..298d9ba3faeb 100644
--- a/arch/x86/include/asm/fixmap_64.h
+++ b/arch/x86/include/asm/fixmap_64.h
@@ -50,10 +50,6 @@ enum fixed_addresses {
50 FIX_PARAVIRT_BOOTMAP, 50 FIX_PARAVIRT_BOOTMAP,
51#endif 51#endif
52 __end_of_permanent_fixed_addresses, 52 __end_of_permanent_fixed_addresses,
53#ifdef CONFIG_ACPI
54 FIX_ACPI_BEGIN,
55 FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
56#endif
57#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT 53#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
58 FIX_OHCI1394_BASE, 54 FIX_OHCI1394_BASE,
59#endif 55#endif
diff --git a/arch/x86/include/asm/genapic.h b/arch/x86/include/asm/genapic.h
index d48bee663a6f..273b99452ae0 100644
--- a/arch/x86/include/asm/genapic.h
+++ b/arch/x86/include/asm/genapic.h
@@ -1,5 +1,263 @@
1#ifndef _ASM_X86_GENAPIC_H
2#define _ASM_X86_GENAPIC_H
3
4#include <linux/cpumask.h>
5
6#include <asm/mpspec.h>
7#include <asm/atomic.h>
8
9/*
10 * Copyright 2004 James Cleverdon, IBM.
11 * Subject to the GNU Public License, v.2
12 *
13 * Generic APIC sub-arch data struct.
14 *
15 * Hacked for x86-64 by James Cleverdon from i386 architecture code by
16 * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
17 * James Cleverdon.
18 */
19struct genapic {
20 char *name;
21
22 int (*probe)(void);
23 int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
24 int (*apic_id_registered)(void);
25
26 u32 irq_delivery_mode;
27 u32 irq_dest_mode;
28
29 const struct cpumask *(*target_cpus)(void);
30
31 int disable_esr;
32
33 int dest_logical;
34 unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid);
35 unsigned long (*check_apicid_present)(int apicid);
36
37 void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
38 void (*init_apic_ldr)(void);
39
40 physid_mask_t (*ioapic_phys_id_map)(physid_mask_t map);
41
42 void (*setup_apic_routing)(void);
43 int (*multi_timer_check)(int apic, int irq);
44 int (*apicid_to_node)(int logical_apicid);
45 int (*cpu_to_logical_apicid)(int cpu);
46 int (*cpu_present_to_apicid)(int mps_cpu);
47 physid_mask_t (*apicid_to_cpu_present)(int phys_apicid);
48 void (*setup_portio_remap)(void);
49 int (*check_phys_apicid_present)(int boot_cpu_physical_apicid);
50 void (*enable_apic_mode)(void);
51 int (*phys_pkg_id)(int cpuid_apic, int index_msb);
52
53 /*
54 * When one of the next two hooks returns 1 the genapic
55 * is switched to this. Essentially they are additional
56 * probe functions:
57 */
58 int (*mps_oem_check)(struct mpc_table *mpc, char *oem, char *productid);
59
60 unsigned int (*get_apic_id)(unsigned long x);
61 unsigned long (*set_apic_id)(unsigned int id);
62 unsigned long apic_id_mask;
63
64 unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask);
65 unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
66 const struct cpumask *andmask);
67
68 /* ipi */
69 void (*send_IPI_mask)(const struct cpumask *mask, int vector);
70 void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
71 int vector);
72 void (*send_IPI_allbutself)(int vector);
73 void (*send_IPI_all)(int vector);
74 void (*send_IPI_self)(int vector);
75
76 /* wakeup_secondary_cpu */
77 int (*wakeup_cpu)(int apicid, unsigned long start_eip);
78
79 int trampoline_phys_low;
80 int trampoline_phys_high;
81
82 void (*wait_for_init_deassert)(atomic_t *deassert);
83 void (*smp_callin_clear_local_apic)(void);
84 void (*store_NMI_vector)(unsigned short *high, unsigned short *low);
85 void (*inquire_remote_apic)(int apicid);
86};
87
88extern struct genapic *apic;
89
90/*
91 * Warm reset vector default position:
92 */
93#define DEFAULT_TRAMPOLINE_PHYS_LOW 0x467
94#define DEFAULT_TRAMPOLINE_PHYS_HIGH 0x469
95
1#ifdef CONFIG_X86_32 96#ifdef CONFIG_X86_32
2# include "genapic_32.h" 97extern void es7000_update_genapic_to_cluster(void);
3#else 98#else
4# include "genapic_64.h" 99extern struct genapic apic_flat;
100extern struct genapic apic_physflat;
101extern struct genapic apic_x2apic_cluster;
102extern struct genapic apic_x2apic_phys;
103extern int default_acpi_madt_oem_check(char *, char *);
104
105extern void apic_send_IPI_self(int vector);
106
107extern struct genapic apic_x2apic_uv_x;
108DECLARE_PER_CPU(int, x2apic_extra_bits);
109
110extern void default_setup_apic_routing(void);
111
112extern int default_cpu_present_to_apicid(int mps_cpu);
113extern int default_check_phys_apicid_present(int boot_cpu_physical_apicid);
5#endif 114#endif
115
116static inline void default_wait_for_init_deassert(atomic_t *deassert)
117{
118 while (!atomic_read(deassert))
119 cpu_relax();
120 return;
121}
122
123extern void generic_bigsmp_probe(void);
124
125
126#ifdef CONFIG_X86_LOCAL_APIC
127
128#include <asm/smp.h>
129
130#define APIC_DFR_VALUE (APIC_DFR_FLAT)
131
132static inline const struct cpumask *default_target_cpus(void)
133{
134#ifdef CONFIG_SMP
135 return cpu_online_mask;
136#else
137 return cpumask_of(0);
138#endif
139}
140
141DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
142
143
144static inline unsigned int read_apic_id(void)
145{
146 unsigned int reg;
147
148 reg = apic_read(APIC_ID);
149
150 return apic->get_apic_id(reg);
151}
152
153#ifdef CONFIG_X86_64
154extern void default_setup_apic_routing(void);
155#else
156
157/*
158 * Set up the logical destination ID.
159 *
160 * Intel recommends to set DFR, LDR and TPR before enabling
161 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
162 * document number 292116). So here it goes...
163 */
164extern void default_init_apic_ldr(void);
165
166static inline int default_apic_id_registered(void)
167{
168 return physid_isset(read_apic_id(), phys_cpu_present_map);
169}
170
171static inline unsigned int
172default_cpu_mask_to_apicid(const struct cpumask *cpumask)
173{
174 return cpumask_bits(cpumask)[0];
175}
176
177static inline unsigned int
178default_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
179 const struct cpumask *andmask)
180{
181 unsigned long mask1 = cpumask_bits(cpumask)[0];
182 unsigned long mask2 = cpumask_bits(andmask)[0];
183 unsigned long mask3 = cpumask_bits(cpu_online_mask)[0];
184
185 return (unsigned int)(mask1 & mask2 & mask3);
186}
187
188static inline int default_phys_pkg_id(int cpuid_apic, int index_msb)
189{
190 return cpuid_apic >> index_msb;
191}
192
193static inline void default_setup_apic_routing(void)
194{
195#ifdef CONFIG_X86_IO_APIC
196 printk("Enabling APIC mode: %s. Using %d I/O APICs\n",
197 "Flat", nr_ioapics);
198#endif
199}
200
201extern int default_apicid_to_node(int logical_apicid);
202
203#endif
204
205static inline unsigned long default_check_apicid_used(physid_mask_t bitmap, int apicid)
206{
207 return physid_isset(apicid, bitmap);
208}
209
210static inline unsigned long default_check_apicid_present(int bit)
211{
212 return physid_isset(bit, phys_cpu_present_map);
213}
214
215static inline physid_mask_t default_ioapic_phys_id_map(physid_mask_t phys_map)
216{
217 return phys_map;
218}
219
220/* Mapping from cpu number to logical apicid */
221static inline int default_cpu_to_logical_apicid(int cpu)
222{
223 return 1 << cpu;
224}
225
226static inline int __default_cpu_present_to_apicid(int mps_cpu)
227{
228 if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu))
229 return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
230 else
231 return BAD_APICID;
232}
233
234static inline int
235__default_check_phys_apicid_present(int boot_cpu_physical_apicid)
236{
237 return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map);
238}
239
240#ifdef CONFIG_X86_32
241static inline int default_cpu_present_to_apicid(int mps_cpu)
242{
243 return __default_cpu_present_to_apicid(mps_cpu);
244}
245
246static inline int
247default_check_phys_apicid_present(int boot_cpu_physical_apicid)
248{
249 return __default_check_phys_apicid_present(boot_cpu_physical_apicid);
250}
251#else
252extern int default_cpu_present_to_apicid(int mps_cpu);
253extern int default_check_phys_apicid_present(int boot_cpu_physical_apicid);
254#endif
255
256static inline physid_mask_t default_apicid_to_cpu_present(int phys_apicid)
257{
258 return physid_mask_of_physid(phys_apicid);
259}
260
261#endif /* CONFIG_X86_LOCAL_APIC */
262
263#endif /* _ASM_X86_GENAPIC_64_H */
diff --git a/arch/x86/include/asm/genapic_32.h b/arch/x86/include/asm/genapic_32.h
deleted file mode 100644
index 2c05b737ee22..000000000000
--- a/arch/x86/include/asm/genapic_32.h
+++ /dev/null
@@ -1,148 +0,0 @@
1#ifndef _ASM_X86_GENAPIC_32_H
2#define _ASM_X86_GENAPIC_32_H
3
4#include <asm/mpspec.h>
5#include <asm/atomic.h>
6
7/*
8 * Generic APIC driver interface.
9 *
10 * An straight forward mapping of the APIC related parts of the
11 * x86 subarchitecture interface to a dynamic object.
12 *
13 * This is used by the "generic" x86 subarchitecture.
14 *
15 * Copyright 2003 Andi Kleen, SuSE Labs.
16 */
17
18struct mpc_bus;
19struct mpc_table;
20struct mpc_cpu;
21
22struct genapic {
23 char *name;
24 int (*probe)(void);
25
26 int (*apic_id_registered)(void);
27 const struct cpumask *(*target_cpus)(void);
28 int int_delivery_mode;
29 int int_dest_mode;
30 int ESR_DISABLE;
31 int apic_destination_logical;
32 unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid);
33 unsigned long (*check_apicid_present)(int apicid);
34 int no_balance_irq;
35 int no_ioapic_check;
36 void (*init_apic_ldr)(void);
37 physid_mask_t (*ioapic_phys_id_map)(physid_mask_t map);
38
39 void (*setup_apic_routing)(void);
40 int (*multi_timer_check)(int apic, int irq);
41 int (*apicid_to_node)(int logical_apicid);
42 int (*cpu_to_logical_apicid)(int cpu);
43 int (*cpu_present_to_apicid)(int mps_cpu);
44 physid_mask_t (*apicid_to_cpu_present)(int phys_apicid);
45 void (*setup_portio_remap)(void);
46 int (*check_phys_apicid_present)(int boot_cpu_physical_apicid);
47 void (*enable_apic_mode)(void);
48 u32 (*phys_pkg_id)(u32 cpuid_apic, int index_msb);
49
50 /* mpparse */
51 /* When one of the next two hooks returns 1 the genapic
52 is switched to this. Essentially they are additional probe
53 functions. */
54 int (*mps_oem_check)(struct mpc_table *mpc, char *oem,
55 char *productid);
56 int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
57
58 unsigned (*get_apic_id)(unsigned long x);
59 unsigned long apic_id_mask;
60 unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask);
61 unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
62 const struct cpumask *andmask);
63 void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
64
65#ifdef CONFIG_SMP
66 /* ipi */
67 void (*send_IPI_mask)(const struct cpumask *mask, int vector);
68 void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
69 int vector);
70 void (*send_IPI_allbutself)(int vector);
71 void (*send_IPI_all)(int vector);
72#endif
73 int (*wakeup_cpu)(int apicid, unsigned long start_eip);
74 int trampoline_phys_low;
75 int trampoline_phys_high;
76 void (*wait_for_init_deassert)(atomic_t *deassert);
77 void (*smp_callin_clear_local_apic)(void);
78 void (*store_NMI_vector)(unsigned short *high, unsigned short *low);
79 void (*restore_NMI_vector)(unsigned short *high, unsigned short *low);
80 void (*inquire_remote_apic)(int apicid);
81};
82
83#define APICFUNC(x) .x = x,
84
85/* More functions could be probably marked IPIFUNC and save some space
86 in UP GENERICARCH kernels, but I don't have the nerve right now
87 to untangle this mess. -AK */
88#ifdef CONFIG_SMP
89#define IPIFUNC(x) APICFUNC(x)
90#else
91#define IPIFUNC(x)
92#endif
93
94#define APIC_INIT(aname, aprobe) \
95{ \
96 .name = aname, \
97 .probe = aprobe, \
98 .int_delivery_mode = INT_DELIVERY_MODE, \
99 .int_dest_mode = INT_DEST_MODE, \
100 .no_balance_irq = NO_BALANCE_IRQ, \
101 .ESR_DISABLE = esr_disable, \
102 .apic_destination_logical = APIC_DEST_LOGICAL, \
103 APICFUNC(apic_id_registered) \
104 APICFUNC(target_cpus) \
105 APICFUNC(check_apicid_used) \
106 APICFUNC(check_apicid_present) \
107 APICFUNC(init_apic_ldr) \
108 APICFUNC(ioapic_phys_id_map) \
109 APICFUNC(setup_apic_routing) \
110 APICFUNC(multi_timer_check) \
111 APICFUNC(apicid_to_node) \
112 APICFUNC(cpu_to_logical_apicid) \
113 APICFUNC(cpu_present_to_apicid) \
114 APICFUNC(apicid_to_cpu_present) \
115 APICFUNC(setup_portio_remap) \
116 APICFUNC(check_phys_apicid_present) \
117 APICFUNC(mps_oem_check) \
118 APICFUNC(get_apic_id) \
119 .apic_id_mask = APIC_ID_MASK, \
120 APICFUNC(cpu_mask_to_apicid) \
121 APICFUNC(cpu_mask_to_apicid_and) \
122 APICFUNC(vector_allocation_domain) \
123 APICFUNC(acpi_madt_oem_check) \
124 IPIFUNC(send_IPI_mask) \
125 IPIFUNC(send_IPI_allbutself) \
126 IPIFUNC(send_IPI_all) \
127 APICFUNC(enable_apic_mode) \
128 APICFUNC(phys_pkg_id) \
129 .trampoline_phys_low = TRAMPOLINE_PHYS_LOW, \
130 .trampoline_phys_high = TRAMPOLINE_PHYS_HIGH, \
131 APICFUNC(wait_for_init_deassert) \
132 APICFUNC(smp_callin_clear_local_apic) \
133 APICFUNC(store_NMI_vector) \
134 APICFUNC(restore_NMI_vector) \
135 APICFUNC(inquire_remote_apic) \
136}
137
138extern struct genapic *genapic;
139extern void es7000_update_genapic_to_cluster(void);
140
141enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
142#define get_uv_system_type() UV_NONE
143#define is_uv_system() 0
144#define uv_wakeup_secondary(a, b) 1
145#define uv_system_init() do {} while (0)
146
147
148#endif /* _ASM_X86_GENAPIC_32_H */
diff --git a/arch/x86/include/asm/genapic_64.h b/arch/x86/include/asm/genapic_64.h
deleted file mode 100644
index adf32fb56aa6..000000000000
--- a/arch/x86/include/asm/genapic_64.h
+++ /dev/null
@@ -1,66 +0,0 @@
1#ifndef _ASM_X86_GENAPIC_64_H
2#define _ASM_X86_GENAPIC_64_H
3
4#include <linux/cpumask.h>
5
6/*
7 * Copyright 2004 James Cleverdon, IBM.
8 * Subject to the GNU Public License, v.2
9 *
10 * Generic APIC sub-arch data struct.
11 *
12 * Hacked for x86-64 by James Cleverdon from i386 architecture code by
13 * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
14 * James Cleverdon.
15 */
16
17struct genapic {
18 char *name;
19 int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
20 u32 int_delivery_mode;
21 u32 int_dest_mode;
22 int (*apic_id_registered)(void);
23 const struct cpumask *(*target_cpus)(void);
24 void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
25 void (*init_apic_ldr)(void);
26 /* ipi */
27 void (*send_IPI_mask)(const struct cpumask *mask, int vector);
28 void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
29 int vector);
30 void (*send_IPI_allbutself)(int vector);
31 void (*send_IPI_all)(int vector);
32 void (*send_IPI_self)(int vector);
33 /* */
34 unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask);
35 unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
36 const struct cpumask *andmask);
37 unsigned int (*phys_pkg_id)(int index_msb);
38 unsigned int (*get_apic_id)(unsigned long x);
39 unsigned long (*set_apic_id)(unsigned int id);
40 unsigned long apic_id_mask;
41 /* wakeup_secondary_cpu */
42 int (*wakeup_cpu)(int apicid, unsigned long start_eip);
43};
44
45extern struct genapic *genapic;
46
47extern struct genapic apic_flat;
48extern struct genapic apic_physflat;
49extern struct genapic apic_x2apic_cluster;
50extern struct genapic apic_x2apic_phys;
51extern int acpi_madt_oem_check(char *, char *);
52
53extern void apic_send_IPI_self(int vector);
54enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
55extern enum uv_system_type get_uv_system_type(void);
56extern int is_uv_system(void);
57
58extern struct genapic apic_x2apic_uv_x;
59DECLARE_PER_CPU(int, x2apic_extra_bits);
60extern void uv_cpu_init(void);
61extern void uv_system_init(void);
62extern int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip);
63
64extern void setup_apic_routing(void);
65
66#endif /* _ASM_X86_GENAPIC_64_H */
diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h
index 000787df66e6..176f058e7159 100644
--- a/arch/x86/include/asm/hardirq.h
+++ b/arch/x86/include/asm/hardirq.h
@@ -1,11 +1,52 @@
1#ifdef CONFIG_X86_32 1#ifndef _ASM_X86_HARDIRQ_H
2# include "hardirq_32.h" 2#define _ASM_X86_HARDIRQ_H
3#else 3
4# include "hardirq_64.h" 4#include <linux/threads.h>
5#include <linux/irq.h>
6
7typedef struct {
8 unsigned int __softirq_pending;
9 unsigned int __nmi_count; /* arch dependent */
10 unsigned int irq0_irqs;
11#ifdef CONFIG_X86_LOCAL_APIC
12 unsigned int apic_timer_irqs; /* arch dependent */
13 unsigned int irq_spurious_count;
14#endif
15#ifdef CONFIG_SMP
16 unsigned int irq_resched_count;
17 unsigned int irq_call_count;
18 unsigned int irq_tlb_count;
19#endif
20#ifdef CONFIG_X86_MCE
21 unsigned int irq_thermal_count;
22# ifdef CONFIG_X86_64
23 unsigned int irq_threshold_count;
24# endif
5#endif 25#endif
26} ____cacheline_aligned irq_cpustat_t;
27
28DECLARE_PER_CPU(irq_cpustat_t, irq_stat);
29
30/* We can have at most NR_VECTORS irqs routed to a cpu at a time */
31#define MAX_HARDIRQS_PER_CPU NR_VECTORS
32
33#define __ARCH_IRQ_STAT
34
35#define inc_irq_stat(member) percpu_add(irq_stat.member, 1)
36
37#define local_softirq_pending() percpu_read(irq_stat.__softirq_pending)
38
39#define __ARCH_SET_SOFTIRQ_PENDING
40
41#define set_softirq_pending(x) percpu_write(irq_stat.__softirq_pending, (x))
42#define or_softirq_pending(x) percpu_or(irq_stat.__softirq_pending, (x))
43
44extern void ack_bad_irq(unsigned int irq);
6 45
7extern u64 arch_irq_stat_cpu(unsigned int cpu); 46extern u64 arch_irq_stat_cpu(unsigned int cpu);
8#define arch_irq_stat_cpu arch_irq_stat_cpu 47#define arch_irq_stat_cpu arch_irq_stat_cpu
9 48
10extern u64 arch_irq_stat(void); 49extern u64 arch_irq_stat(void);
11#define arch_irq_stat arch_irq_stat 50#define arch_irq_stat arch_irq_stat
51
52#endif /* _ASM_X86_HARDIRQ_H */
diff --git a/arch/x86/include/asm/hardirq_32.h b/arch/x86/include/asm/hardirq_32.h
deleted file mode 100644
index cf7954d1405f..000000000000
--- a/arch/x86/include/asm/hardirq_32.h
+++ /dev/null
@@ -1,30 +0,0 @@
1#ifndef _ASM_X86_HARDIRQ_32_H
2#define _ASM_X86_HARDIRQ_32_H
3
4#include <linux/threads.h>
5#include <linux/irq.h>
6
7typedef struct {
8 unsigned int __softirq_pending;
9 unsigned long idle_timestamp;
10 unsigned int __nmi_count; /* arch dependent */
11 unsigned int apic_timer_irqs; /* arch dependent */
12 unsigned int irq0_irqs;
13 unsigned int irq_resched_count;
14 unsigned int irq_call_count;
15 unsigned int irq_tlb_count;
16 unsigned int irq_thermal_count;
17 unsigned int irq_spurious_count;
18} ____cacheline_aligned irq_cpustat_t;
19
20DECLARE_PER_CPU(irq_cpustat_t, irq_stat);
21
22#define __ARCH_IRQ_STAT
23#define __IRQ_STAT(cpu, member) (per_cpu(irq_stat, cpu).member)
24
25#define inc_irq_stat(member) (__get_cpu_var(irq_stat).member++)
26
27void ack_bad_irq(unsigned int irq);
28#include <linux/irq_cpustat.h>
29
30#endif /* _ASM_X86_HARDIRQ_32_H */
diff --git a/arch/x86/include/asm/hardirq_64.h b/arch/x86/include/asm/hardirq_64.h
deleted file mode 100644
index b5a6b5d56704..000000000000
--- a/arch/x86/include/asm/hardirq_64.h
+++ /dev/null
@@ -1,25 +0,0 @@
1#ifndef _ASM_X86_HARDIRQ_64_H
2#define _ASM_X86_HARDIRQ_64_H
3
4#include <linux/threads.h>
5#include <linux/irq.h>
6#include <asm/pda.h>
7#include <asm/apic.h>
8
9/* We can have at most NR_VECTORS irqs routed to a cpu at a time */
10#define MAX_HARDIRQS_PER_CPU NR_VECTORS
11
12#define __ARCH_IRQ_STAT 1
13
14#define inc_irq_stat(member) add_pda(member, 1)
15
16#define local_softirq_pending() read_pda(__softirq_pending)
17
18#define __ARCH_SET_SOFTIRQ_PENDING 1
19
20#define set_softirq_pending(x) write_pda(__softirq_pending, (x))
21#define or_softirq_pending(x) or_pda(__softirq_pending, (x))
22
23extern void ack_bad_irq(unsigned int irq);
24
25#endif /* _ASM_X86_HARDIRQ_64_H */
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
index 8de644b6b959..370e1c83bb49 100644
--- a/arch/x86/include/asm/hw_irq.h
+++ b/arch/x86/include/asm/hw_irq.h
@@ -25,8 +25,6 @@
25#include <asm/irq.h> 25#include <asm/irq.h>
26#include <asm/sections.h> 26#include <asm/sections.h>
27 27
28#define platform_legacy_irq(irq) ((irq) < 16)
29
30/* Interrupt handlers registered during init_IRQ */ 28/* Interrupt handlers registered during init_IRQ */
31extern void apic_timer_interrupt(void); 29extern void apic_timer_interrupt(void);
32extern void error_interrupt(void); 30extern void error_interrupt(void);
@@ -58,7 +56,7 @@ extern void make_8259A_irq(unsigned int irq);
58extern void init_8259A(int aeoi); 56extern void init_8259A(int aeoi);
59 57
60/* IOAPIC */ 58/* IOAPIC */
61#define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs)) 59#define IO_APIC_IRQ(x) (((x) >= NR_IRQS_LEGACY) || ((1<<(x)) & io_apic_irqs))
62extern unsigned long io_apic_irqs; 60extern unsigned long io_apic_irqs;
63 61
64extern void init_VISWS_APIC_irqs(void); 62extern void init_VISWS_APIC_irqs(void);
@@ -67,15 +65,7 @@ extern void disable_IO_APIC(void);
67extern int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn); 65extern int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn);
68extern void setup_ioapic_dest(void); 66extern void setup_ioapic_dest(void);
69 67
70#ifdef CONFIG_X86_64
71extern void enable_IO_APIC(void); 68extern void enable_IO_APIC(void);
72#endif
73
74/* IPI functions */
75#ifdef CONFIG_X86_32
76extern void send_IPI_self(int vector);
77#endif
78extern void send_IPI(int dest, int vector);
79 69
80/* Statistics */ 70/* Statistics */
81extern atomic_t irq_err_count; 71extern atomic_t irq_err_count;
@@ -84,21 +74,11 @@ extern atomic_t irq_mis_count;
84/* EISA */ 74/* EISA */
85extern void eisa_set_level_irq(unsigned int irq); 75extern void eisa_set_level_irq(unsigned int irq);
86 76
87/* Voyager functions */
88extern asmlinkage void vic_cpi_interrupt(void);
89extern asmlinkage void vic_sys_interrupt(void);
90extern asmlinkage void vic_cmn_interrupt(void);
91extern asmlinkage void qic_timer_interrupt(void);
92extern asmlinkage void qic_invalidate_interrupt(void);
93extern asmlinkage void qic_reschedule_interrupt(void);
94extern asmlinkage void qic_enable_irq_interrupt(void);
95extern asmlinkage void qic_call_function_interrupt(void);
96
97/* SMP */ 77/* SMP */
98extern void smp_apic_timer_interrupt(struct pt_regs *); 78extern void smp_apic_timer_interrupt(struct pt_regs *);
99extern void smp_spurious_interrupt(struct pt_regs *); 79extern void smp_spurious_interrupt(struct pt_regs *);
100extern void smp_error_interrupt(struct pt_regs *); 80extern void smp_error_interrupt(struct pt_regs *);
101#ifdef CONFIG_X86_SMP 81#ifdef CONFIG_SMP
102extern void smp_reschedule_interrupt(struct pt_regs *); 82extern void smp_reschedule_interrupt(struct pt_regs *);
103extern void smp_call_function_interrupt(struct pt_regs *); 83extern void smp_call_function_interrupt(struct pt_regs *);
104extern void smp_call_function_single_interrupt(struct pt_regs *); 84extern void smp_call_function_single_interrupt(struct pt_regs *);
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index 1dbbdf4be9b4..e5a2ab44cd5c 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -5,6 +5,7 @@
5 5
6#include <linux/compiler.h> 6#include <linux/compiler.h>
7#include <asm-generic/int-ll64.h> 7#include <asm-generic/int-ll64.h>
8#include <asm/page.h>
8 9
9#define build_mmio_read(name, size, type, reg, barrier) \ 10#define build_mmio_read(name, size, type, reg, barrier) \
10static inline type name(const volatile void __iomem *addr) \ 11static inline type name(const volatile void __iomem *addr) \
@@ -80,6 +81,95 @@ static inline void writeq(__u64 val, volatile void __iomem *addr)
80#define readq readq 81#define readq readq
81#define writeq writeq 82#define writeq writeq
82 83
84/**
85 * virt_to_phys - map virtual addresses to physical
86 * @address: address to remap
87 *
88 * The returned physical address is the physical (CPU) mapping for
89 * the memory address given. It is only valid to use this function on
90 * addresses directly mapped or allocated via kmalloc.
91 *
92 * This function does not give bus mappings for DMA transfers. In
93 * almost all conceivable cases a device driver should not be using
94 * this function
95 */
96
97static inline phys_addr_t virt_to_phys(volatile void *address)
98{
99 return __pa(address);
100}
101
102/**
103 * phys_to_virt - map physical address to virtual
104 * @address: address to remap
105 *
106 * The returned virtual address is a current CPU mapping for
107 * the memory address given. It is only valid to use this function on
108 * addresses that have a kernel mapping
109 *
110 * This function does not handle bus mappings for DMA transfers. In
111 * almost all conceivable cases a device driver should not be using
112 * this function
113 */
114
115static inline void *phys_to_virt(phys_addr_t address)
116{
117 return __va(address);
118}
119
120/*
121 * Change "struct page" to physical address.
122 */
123#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
124
125/*
126 * ISA I/O bus memory addresses are 1:1 with the physical address.
127 */
128#define isa_virt_to_bus virt_to_phys
129#define isa_page_to_bus page_to_phys
130#define isa_bus_to_virt phys_to_virt
131
132/*
133 * However PCI ones are not necessarily 1:1 and therefore these interfaces
134 * are forbidden in portable PCI drivers.
135 *
136 * Allow them on x86 for legacy drivers, though.
137 */
138#define virt_to_bus virt_to_phys
139#define bus_to_virt phys_to_virt
140
141/**
142 * ioremap - map bus memory into CPU space
143 * @offset: bus address of the memory
144 * @size: size of the resource to map
145 *
146 * ioremap performs a platform specific sequence of operations to
147 * make bus memory CPU accessible via the readb/readw/readl/writeb/
148 * writew/writel functions and the other mmio helpers. The returned
149 * address is not guaranteed to be usable directly as a virtual
150 * address.
151 *
152 * If the area you are trying to map is a PCI BAR you should have a
153 * look at pci_iomap().
154 */
155extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
156extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
157extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
158 unsigned long prot_val);
159
160/*
161 * The default ioremap() behavior is non-cached:
162 */
163static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
164{
165 return ioremap_nocache(offset, size);
166}
167
168extern void iounmap(volatile void __iomem *addr);
169
170extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
171
172
83#ifdef CONFIG_X86_32 173#ifdef CONFIG_X86_32
84# include "io_32.h" 174# include "io_32.h"
85#else 175#else
@@ -91,7 +181,7 @@ extern void unxlate_dev_mem_ptr(unsigned long phys, void *addr);
91 181
92extern int ioremap_change_attr(unsigned long vaddr, unsigned long size, 182extern int ioremap_change_attr(unsigned long vaddr, unsigned long size,
93 unsigned long prot_val); 183 unsigned long prot_val);
94extern void __iomem *ioremap_wc(unsigned long offset, unsigned long size); 184extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size);
95 185
96/* 186/*
97 * early_ioremap() and early_iounmap() are for temporary early boot-time 187 * early_ioremap() and early_iounmap() are for temporary early boot-time
@@ -105,5 +195,6 @@ extern void __iomem *early_memremap(unsigned long offset, unsigned long size);
105extern void early_iounmap(void __iomem *addr, unsigned long size); 195extern void early_iounmap(void __iomem *addr, unsigned long size);
106extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys); 196extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
107 197
198#define IO_SPACE_LIMIT 0xffff
108 199
109#endif /* _ASM_X86_IO_H */ 200#endif /* _ASM_X86_IO_H */
diff --git a/arch/x86/include/asm/io_32.h b/arch/x86/include/asm/io_32.h
index d8e242e1b396..a299900f5920 100644
--- a/arch/x86/include/asm/io_32.h
+++ b/arch/x86/include/asm/io_32.h
@@ -37,8 +37,6 @@
37 * - Arnaldo Carvalho de Melo <acme@conectiva.com.br> 37 * - Arnaldo Carvalho de Melo <acme@conectiva.com.br>
38 */ 38 */
39 39
40#define IO_SPACE_LIMIT 0xffff
41
42#define XQUAD_PORTIO_BASE 0xfe400000 40#define XQUAD_PORTIO_BASE 0xfe400000
43#define XQUAD_PORTIO_QUAD 0x40000 /* 256k per quad. */ 41#define XQUAD_PORTIO_QUAD 0x40000 /* 256k per quad. */
44 42
@@ -53,92 +51,6 @@
53 */ 51 */
54#define xlate_dev_kmem_ptr(p) p 52#define xlate_dev_kmem_ptr(p) p
55 53
56/**
57 * virt_to_phys - map virtual addresses to physical
58 * @address: address to remap
59 *
60 * The returned physical address is the physical (CPU) mapping for
61 * the memory address given. It is only valid to use this function on
62 * addresses directly mapped or allocated via kmalloc.
63 *
64 * This function does not give bus mappings for DMA transfers. In
65 * almost all conceivable cases a device driver should not be using
66 * this function
67 */
68
69static inline unsigned long virt_to_phys(volatile void *address)
70{
71 return __pa(address);
72}
73
74/**
75 * phys_to_virt - map physical address to virtual
76 * @address: address to remap
77 *
78 * The returned virtual address is a current CPU mapping for
79 * the memory address given. It is only valid to use this function on
80 * addresses that have a kernel mapping
81 *
82 * This function does not handle bus mappings for DMA transfers. In
83 * almost all conceivable cases a device driver should not be using
84 * this function
85 */
86
87static inline void *phys_to_virt(unsigned long address)
88{
89 return __va(address);
90}
91
92/*
93 * Change "struct page" to physical address.
94 */
95#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
96
97/**
98 * ioremap - map bus memory into CPU space
99 * @offset: bus address of the memory
100 * @size: size of the resource to map
101 *
102 * ioremap performs a platform specific sequence of operations to
103 * make bus memory CPU accessible via the readb/readw/readl/writeb/
104 * writew/writel functions and the other mmio helpers. The returned
105 * address is not guaranteed to be usable directly as a virtual
106 * address.
107 *
108 * If the area you are trying to map is a PCI BAR you should have a
109 * look at pci_iomap().
110 */
111extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
112extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
113extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
114 unsigned long prot_val);
115
116/*
117 * The default ioremap() behavior is non-cached:
118 */
119static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
120{
121 return ioremap_nocache(offset, size);
122}
123
124extern void iounmap(volatile void __iomem *addr);
125
126/*
127 * ISA I/O bus memory addresses are 1:1 with the physical address.
128 */
129#define isa_virt_to_bus virt_to_phys
130#define isa_page_to_bus page_to_phys
131#define isa_bus_to_virt phys_to_virt
132
133/*
134 * However PCI ones are not necessarily 1:1 and therefore these interfaces
135 * are forbidden in portable PCI drivers.
136 *
137 * Allow them on x86 for legacy drivers, though.
138 */
139#define virt_to_bus virt_to_phys
140#define bus_to_virt phys_to_virt
141
142static inline void 54static inline void
143memset_io(volatile void __iomem *addr, unsigned char val, int count) 55memset_io(volatile void __iomem *addr, unsigned char val, int count)
144{ 56{
diff --git a/arch/x86/include/asm/io_64.h b/arch/x86/include/asm/io_64.h
index 563c16270ba6..244067893af4 100644
--- a/arch/x86/include/asm/io_64.h
+++ b/arch/x86/include/asm/io_64.h
@@ -136,73 +136,12 @@ __OUTS(b)
136__OUTS(w) 136__OUTS(w)
137__OUTS(l) 137__OUTS(l)
138 138
139#define IO_SPACE_LIMIT 0xffff
140
141#if defined(__KERNEL__) && defined(__x86_64__) 139#if defined(__KERNEL__) && defined(__x86_64__)
142 140
143#include <linux/vmalloc.h> 141#include <linux/vmalloc.h>
144 142
145#ifndef __i386__
146/*
147 * Change virtual addresses to physical addresses and vv.
148 * These are pretty trivial
149 */
150static inline unsigned long virt_to_phys(volatile void *address)
151{
152 return __pa(address);
153}
154
155static inline void *phys_to_virt(unsigned long address)
156{
157 return __va(address);
158}
159#endif
160
161/*
162 * Change "struct page" to physical address.
163 */
164#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
165
166#include <asm-generic/iomap.h> 143#include <asm-generic/iomap.h>
167 144
168/*
169 * This one maps high address device memory and turns off caching for that area.
170 * it's useful if some control registers are in such an area and write combining
171 * or read caching is not desirable:
172 */
173extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
174extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
175extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
176 unsigned long prot_val);
177
178/*
179 * The default ioremap() behavior is non-cached:
180 */
181static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
182{
183 return ioremap_nocache(offset, size);
184}
185
186extern void iounmap(volatile void __iomem *addr);
187
188extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
189
190/*
191 * ISA I/O bus memory addresses are 1:1 with the physical address.
192 */
193#define isa_virt_to_bus virt_to_phys
194#define isa_page_to_bus page_to_phys
195#define isa_bus_to_virt phys_to_virt
196
197/*
198 * However PCI ones are not necessarily 1:1 and therefore these interfaces
199 * are forbidden in portable PCI drivers.
200 *
201 * Allow them on x86 for legacy drivers, though.
202 */
203#define virt_to_bus virt_to_phys
204#define bus_to_virt phys_to_virt
205
206void __memcpy_fromio(void *, unsigned long, unsigned); 145void __memcpy_fromio(void *, unsigned long, unsigned);
207void __memcpy_toio(unsigned long, const void *, unsigned); 146void __memcpy_toio(unsigned long, const void *, unsigned);
208 147
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h
index 7a1f44ac1f17..59cb4a1317b7 100644
--- a/arch/x86/include/asm/io_apic.h
+++ b/arch/x86/include/asm/io_apic.h
@@ -114,38 +114,16 @@ struct IR_IO_APIC_route_entry {
114extern int nr_ioapics; 114extern int nr_ioapics;
115extern int nr_ioapic_registers[MAX_IO_APICS]; 115extern int nr_ioapic_registers[MAX_IO_APICS];
116 116
117/*
118 * MP-BIOS irq configuration table structures:
119 */
120
121#define MP_MAX_IOAPIC_PIN 127 117#define MP_MAX_IOAPIC_PIN 127
122 118
123struct mp_config_ioapic {
124 unsigned long mp_apicaddr;
125 unsigned int mp_apicid;
126 unsigned char mp_type;
127 unsigned char mp_apicver;
128 unsigned char mp_flags;
129};
130
131struct mp_config_intsrc {
132 unsigned int mp_dstapic;
133 unsigned char mp_type;
134 unsigned char mp_irqtype;
135 unsigned short mp_irqflag;
136 unsigned char mp_srcbus;
137 unsigned char mp_srcbusirq;
138 unsigned char mp_dstirq;
139};
140
141/* I/O APIC entries */ 119/* I/O APIC entries */
142extern struct mp_config_ioapic mp_ioapics[MAX_IO_APICS]; 120extern struct mpc_ioapic mp_ioapics[MAX_IO_APICS];
143 121
144/* # of MP IRQ source entries */ 122/* # of MP IRQ source entries */
145extern int mp_irq_entries; 123extern int mp_irq_entries;
146 124
147/* MP IRQ source entries */ 125/* MP IRQ source entries */
148extern struct mp_config_intsrc mp_irqs[MAX_IRQ_SOURCES]; 126extern struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES];
149 127
150/* non-0 if default (table-less) MP configuration */ 128/* non-0 if default (table-less) MP configuration */
151extern int mpc_default_type; 129extern int mpc_default_type;
@@ -165,15 +143,6 @@ extern int noioapicreroute;
165/* 1 if the timer IRQ uses the '8259A Virtual Wire' mode */ 143/* 1 if the timer IRQ uses the '8259A Virtual Wire' mode */
166extern int timer_through_8259; 144extern int timer_through_8259;
167 145
168static inline void disable_ioapic_setup(void)
169{
170#ifdef CONFIG_PCI
171 noioapicquirk = 1;
172 noioapicreroute = -1;
173#endif
174 skip_ioapic_setup = 1;
175}
176
177/* 146/*
178 * If we use the IO-APIC for IRQ routing, disable automatic 147 * If we use the IO-APIC for IRQ routing, disable automatic
179 * assignment of PCI IRQ's. 148 * assignment of PCI IRQ's.
@@ -200,6 +169,12 @@ extern void reinit_intr_remapped_IO_APIC(int);
200 169
201extern void probe_nr_irqs_gsi(void); 170extern void probe_nr_irqs_gsi(void);
202 171
172extern int setup_ioapic_entry(int apic, int irq,
173 struct IO_APIC_route_entry *entry,
174 unsigned int destination, int trigger,
175 int polarity, int vector);
176extern void ioapic_write_entry(int apic, int pin,
177 struct IO_APIC_route_entry e);
203#else /* !CONFIG_X86_IO_APIC */ 178#else /* !CONFIG_X86_IO_APIC */
204#define io_apic_assign_pci_irqs 0 179#define io_apic_assign_pci_irqs 0
205static const int timer_through_8259 = 0; 180static const int timer_through_8259 = 0;
diff --git a/arch/x86/include/asm/ipi.h b/arch/x86/include/asm/ipi.h
index c745a306f7d3..5f2efc5d9927 100644
--- a/arch/x86/include/asm/ipi.h
+++ b/arch/x86/include/asm/ipi.h
@@ -1,6 +1,8 @@
1#ifndef _ASM_X86_IPI_H 1#ifndef _ASM_X86_IPI_H
2#define _ASM_X86_IPI_H 2#define _ASM_X86_IPI_H
3 3
4#ifdef CONFIG_X86_LOCAL_APIC
5
4/* 6/*
5 * Copyright 2004 James Cleverdon, IBM. 7 * Copyright 2004 James Cleverdon, IBM.
6 * Subject to the GNU Public License, v.2 8 * Subject to the GNU Public License, v.2
@@ -55,8 +57,8 @@ static inline void __xapic_wait_icr_idle(void)
55 cpu_relax(); 57 cpu_relax();
56} 58}
57 59
58static inline void __send_IPI_shortcut(unsigned int shortcut, int vector, 60static inline void
59 unsigned int dest) 61__default_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest)
60{ 62{
61 /* 63 /*
62 * Subtle. In the case of the 'never do double writes' workaround 64 * Subtle. In the case of the 'never do double writes' workaround
@@ -87,8 +89,8 @@ static inline void __send_IPI_shortcut(unsigned int shortcut, int vector,
87 * This is used to send an IPI with no shorthand notation (the destination is 89 * This is used to send an IPI with no shorthand notation (the destination is
88 * specified in bits 56 to 63 of the ICR). 90 * specified in bits 56 to 63 of the ICR).
89 */ 91 */
90static inline void __send_IPI_dest_field(unsigned int mask, int vector, 92static inline void
91 unsigned int dest) 93 __default_send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest)
92{ 94{
93 unsigned long cfg; 95 unsigned long cfg;
94 96
@@ -117,41 +119,46 @@ static inline void __send_IPI_dest_field(unsigned int mask, int vector,
117 native_apic_mem_write(APIC_ICR, cfg); 119 native_apic_mem_write(APIC_ICR, cfg);
118} 120}
119 121
120static inline void send_IPI_mask_sequence(const struct cpumask *mask, 122extern void default_send_IPI_mask_sequence_phys(const struct cpumask *mask,
121 int vector) 123 int vector);
122{ 124extern void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
123 unsigned long flags; 125 int vector);
124 unsigned long query_cpu; 126#include <asm/genapic.h>
125 127
126 /* 128extern void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
127 * Hack. The clustered APIC addressing mode doesn't allow us to send 129 int vector);
128 * to an arbitrary mask, so I do a unicast to each CPU instead. 130extern void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
129 * - mbligh 131 int vector);
130 */ 132
131 local_irq_save(flags); 133/* Avoid include hell */
132 for_each_cpu(query_cpu, mask) { 134#define NMI_VECTOR 0x02
133 __send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu), 135
134 vector, APIC_DEST_PHYSICAL); 136extern int no_broadcast;
135 } 137
136 local_irq_restore(flags); 138static inline void __default_local_send_IPI_allbutself(int vector)
139{
140 if (no_broadcast || vector == NMI_VECTOR)
141 apic->send_IPI_mask_allbutself(cpu_online_mask, vector);
142 else
143 __default_send_IPI_shortcut(APIC_DEST_ALLBUT, vector, apic->dest_logical);
137} 144}
138 145
139static inline void send_IPI_mask_allbutself(const struct cpumask *mask, 146static inline void __default_local_send_IPI_all(int vector)
140 int vector)
141{ 147{
142 unsigned long flags; 148 if (no_broadcast || vector == NMI_VECTOR)
143 unsigned int query_cpu; 149 apic->send_IPI_mask(cpu_online_mask, vector);
144 unsigned int this_cpu = smp_processor_id(); 150 else
145 151 __default_send_IPI_shortcut(APIC_DEST_ALLINC, vector, apic->dest_logical);
146 /* See Hack comment above */
147
148 local_irq_save(flags);
149 for_each_cpu(query_cpu, mask)
150 if (query_cpu != this_cpu)
151 __send_IPI_dest_field(
152 per_cpu(x86_cpu_to_apicid, query_cpu),
153 vector, APIC_DEST_PHYSICAL);
154 local_irq_restore(flags);
155} 152}
156 153
154#ifdef CONFIG_X86_32
155extern void default_send_IPI_mask_logical(const struct cpumask *mask,
156 int vector);
157extern void default_send_IPI_allbutself(int vector);
158extern void default_send_IPI_all(int vector);
159extern void default_send_IPI_self(int vector);
160#endif
161
162#endif
163
157#endif /* _ASM_X86_IPI_H */ 164#endif /* _ASM_X86_IPI_H */
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
index 592688ed04d3..107eb2196691 100644
--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -36,9 +36,11 @@ static inline int irq_canonicalize(int irq)
36extern void fixup_irqs(void); 36extern void fixup_irqs(void);
37#endif 37#endif
38 38
39extern unsigned int do_IRQ(struct pt_regs *regs);
40extern void init_IRQ(void); 39extern void init_IRQ(void);
41extern void native_init_IRQ(void); 40extern void native_init_IRQ(void);
41extern bool handle_irq(unsigned irq, struct pt_regs *regs);
42
43extern unsigned int do_IRQ(struct pt_regs *regs);
42 44
43/* Interrupt vector management */ 45/* Interrupt vector management */
44extern DECLARE_BITMAP(used_vectors, NR_VECTORS); 46extern DECLARE_BITMAP(used_vectors, NR_VECTORS);
diff --git a/arch/x86/include/asm/irq_regs.h b/arch/x86/include/asm/irq_regs.h
index 89c898ab298b..77843225b7ea 100644
--- a/arch/x86/include/asm/irq_regs.h
+++ b/arch/x86/include/asm/irq_regs.h
@@ -1,5 +1,31 @@
1#ifdef CONFIG_X86_32 1/*
2# include "irq_regs_32.h" 2 * Per-cpu current frame pointer - the location of the last exception frame on
3#else 3 * the stack, stored in the per-cpu area.
4# include "irq_regs_64.h" 4 *
5#endif 5 * Jeremy Fitzhardinge <jeremy@goop.org>
6 */
7#ifndef _ASM_X86_IRQ_REGS_H
8#define _ASM_X86_IRQ_REGS_H
9
10#include <asm/percpu.h>
11
12#define ARCH_HAS_OWN_IRQ_REGS
13
14DECLARE_PER_CPU(struct pt_regs *, irq_regs);
15
16static inline struct pt_regs *get_irq_regs(void)
17{
18 return percpu_read(irq_regs);
19}
20
21static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
22{
23 struct pt_regs *old_regs;
24
25 old_regs = get_irq_regs();
26 percpu_write(irq_regs, new_regs);
27
28 return old_regs;
29}
30
31#endif /* _ASM_X86_IRQ_REGS_32_H */
diff --git a/arch/x86/include/asm/irq_regs_32.h b/arch/x86/include/asm/irq_regs_32.h
deleted file mode 100644
index 86afd7473457..000000000000
--- a/arch/x86/include/asm/irq_regs_32.h
+++ /dev/null
@@ -1,31 +0,0 @@
1/*
2 * Per-cpu current frame pointer - the location of the last exception frame on
3 * the stack, stored in the per-cpu area.
4 *
5 * Jeremy Fitzhardinge <jeremy@goop.org>
6 */
7#ifndef _ASM_X86_IRQ_REGS_32_H
8#define _ASM_X86_IRQ_REGS_32_H
9
10#include <asm/percpu.h>
11
12#define ARCH_HAS_OWN_IRQ_REGS
13
14DECLARE_PER_CPU(struct pt_regs *, irq_regs);
15
16static inline struct pt_regs *get_irq_regs(void)
17{
18 return x86_read_percpu(irq_regs);
19}
20
21static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
22{
23 struct pt_regs *old_regs;
24
25 old_regs = get_irq_regs();
26 x86_write_percpu(irq_regs, new_regs);
27
28 return old_regs;
29}
30
31#endif /* _ASM_X86_IRQ_REGS_32_H */
diff --git a/arch/x86/include/asm/irq_regs_64.h b/arch/x86/include/asm/irq_regs_64.h
deleted file mode 100644
index 3dd9c0b70270..000000000000
--- a/arch/x86/include/asm/irq_regs_64.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/irq_regs.h>
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index f7ff65032b9d..b07278c55e9e 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -1,47 +1,69 @@
1#ifndef _ASM_X86_IRQ_VECTORS_H 1#ifndef _ASM_X86_IRQ_VECTORS_H
2#define _ASM_X86_IRQ_VECTORS_H 2#define _ASM_X86_IRQ_VECTORS_H
3 3
4#include <linux/threads.h> 4/*
5 * Linux IRQ vector layout.
6 *
7 * There are 256 IDT entries (per CPU - each entry is 8 bytes) which can
8 * be defined by Linux. They are used as a jump table by the CPU when a
9 * given vector is triggered - by a CPU-external, CPU-internal or
10 * software-triggered event.
11 *
12 * Linux sets the kernel code address each entry jumps to early during
13 * bootup, and never changes them. This is the general layout of the
14 * IDT entries:
15 *
16 * Vectors 0 ... 31 : system traps and exceptions - hardcoded events
17 * Vectors 32 ... 127 : device interrupts
18 * Vector 128 : legacy int80 syscall interface
19 * Vectors 129 ... 237 : device interrupts
20 * Vectors 238 ... 255 : special interrupts
21 *
22 * 64-bit x86 has per CPU IDT tables, 32-bit has one shared IDT table.
23 *
24 * This file enumerates the exact layout of them:
25 */
5 26
6#define NMI_VECTOR 0x02 27#define NMI_VECTOR 0x02
7 28
8/* 29/*
9 * IDT vectors usable for external interrupt sources start 30 * IDT vectors usable for external interrupt sources start
10 * at 0x20: 31 * at 0x20:
11 */ 32 */
12#define FIRST_EXTERNAL_VECTOR 0x20 33#define FIRST_EXTERNAL_VECTOR 0x20
13 34
14#ifdef CONFIG_X86_32 35#ifdef CONFIG_X86_32
15# define SYSCALL_VECTOR 0x80 36# define SYSCALL_VECTOR 0x80
16#else 37#else
17# define IA32_SYSCALL_VECTOR 0x80 38# define IA32_SYSCALL_VECTOR 0x80
18#endif 39#endif
19 40
20/* 41/*
21 * Reserve the lowest usable priority level 0x20 - 0x2f for triggering 42 * Reserve the lowest usable priority level 0x20 - 0x2f for triggering
22 * cleanup after irq migration. 43 * cleanup after irq migration.
23 */ 44 */
24#define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR 45#define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR
25 46
26/* 47/*
27 * Vectors 0x30-0x3f are used for ISA interrupts. 48 * Vectors 0x30-0x3f are used for ISA interrupts.
28 */ 49 */
29#define IRQ0_VECTOR (FIRST_EXTERNAL_VECTOR + 0x10) 50#define IRQ0_VECTOR (FIRST_EXTERNAL_VECTOR + 0x10)
30#define IRQ1_VECTOR (IRQ0_VECTOR + 1) 51
31#define IRQ2_VECTOR (IRQ0_VECTOR + 2) 52#define IRQ1_VECTOR (IRQ0_VECTOR + 1)
32#define IRQ3_VECTOR (IRQ0_VECTOR + 3) 53#define IRQ2_VECTOR (IRQ0_VECTOR + 2)
33#define IRQ4_VECTOR (IRQ0_VECTOR + 4) 54#define IRQ3_VECTOR (IRQ0_VECTOR + 3)
34#define IRQ5_VECTOR (IRQ0_VECTOR + 5) 55#define IRQ4_VECTOR (IRQ0_VECTOR + 4)
35#define IRQ6_VECTOR (IRQ0_VECTOR + 6) 56#define IRQ5_VECTOR (IRQ0_VECTOR + 5)
36#define IRQ7_VECTOR (IRQ0_VECTOR + 7) 57#define IRQ6_VECTOR (IRQ0_VECTOR + 6)
37#define IRQ8_VECTOR (IRQ0_VECTOR + 8) 58#define IRQ7_VECTOR (IRQ0_VECTOR + 7)
38#define IRQ9_VECTOR (IRQ0_VECTOR + 9) 59#define IRQ8_VECTOR (IRQ0_VECTOR + 8)
39#define IRQ10_VECTOR (IRQ0_VECTOR + 10) 60#define IRQ9_VECTOR (IRQ0_VECTOR + 9)
40#define IRQ11_VECTOR (IRQ0_VECTOR + 11) 61#define IRQ10_VECTOR (IRQ0_VECTOR + 10)
41#define IRQ12_VECTOR (IRQ0_VECTOR + 12) 62#define IRQ11_VECTOR (IRQ0_VECTOR + 11)
42#define IRQ13_VECTOR (IRQ0_VECTOR + 13) 63#define IRQ12_VECTOR (IRQ0_VECTOR + 12)
43#define IRQ14_VECTOR (IRQ0_VECTOR + 14) 64#define IRQ13_VECTOR (IRQ0_VECTOR + 13)
44#define IRQ15_VECTOR (IRQ0_VECTOR + 15) 65#define IRQ14_VECTOR (IRQ0_VECTOR + 14)
66#define IRQ15_VECTOR (IRQ0_VECTOR + 15)
45 67
46/* 68/*
47 * Special IRQ vectors used by the SMP architecture, 0xf0-0xff 69 * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
@@ -49,119 +71,98 @@
49 * some of the following vectors are 'rare', they are merged 71 * some of the following vectors are 'rare', they are merged
50 * into a single vector (CALL_FUNCTION_VECTOR) to save vector space. 72 * into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
51 * TLB, reschedule and local APIC vectors are performance-critical. 73 * TLB, reschedule and local APIC vectors are performance-critical.
52 *
53 * Vectors 0xf0-0xfa are free (reserved for future Linux use).
54 */ 74 */
55#ifdef CONFIG_X86_32
56
57# define SPURIOUS_APIC_VECTOR 0xff
58# define ERROR_APIC_VECTOR 0xfe
59# define INVALIDATE_TLB_VECTOR 0xfd
60# define RESCHEDULE_VECTOR 0xfc
61# define CALL_FUNCTION_VECTOR 0xfb
62# define CALL_FUNCTION_SINGLE_VECTOR 0xfa
63# define THERMAL_APIC_VECTOR 0xf0
64
65#else
66 75
67#define SPURIOUS_APIC_VECTOR 0xff 76#define SPURIOUS_APIC_VECTOR 0xff
77/*
78 * Sanity check
79 */
80#if ((SPURIOUS_APIC_VECTOR & 0x0F) != 0x0F)
81# error SPURIOUS_APIC_VECTOR definition error
82#endif
83
68#define ERROR_APIC_VECTOR 0xfe 84#define ERROR_APIC_VECTOR 0xfe
69#define RESCHEDULE_VECTOR 0xfd 85#define RESCHEDULE_VECTOR 0xfd
70#define CALL_FUNCTION_VECTOR 0xfc 86#define CALL_FUNCTION_VECTOR 0xfc
71#define CALL_FUNCTION_SINGLE_VECTOR 0xfb 87#define CALL_FUNCTION_SINGLE_VECTOR 0xfb
72#define THERMAL_APIC_VECTOR 0xfa 88#define THERMAL_APIC_VECTOR 0xfa
73#define THRESHOLD_APIC_VECTOR 0xf9
74#define UV_BAU_MESSAGE 0xf8
75#define INVALIDATE_TLB_VECTOR_END 0xf7
76#define INVALIDATE_TLB_VECTOR_START 0xf0 /* f0-f7 used for TLB flush */
77
78#define NUM_INVALIDATE_TLB_VECTORS 8
79 89
90#ifdef CONFIG_X86_32
91/* 0xf8 - 0xf9 : free */
92#else
93# define THRESHOLD_APIC_VECTOR 0xf9
94# define UV_BAU_MESSAGE 0xf8
80#endif 95#endif
81 96
97/* f0-f7 used for spreading out TLB flushes: */
98#define INVALIDATE_TLB_VECTOR_END 0xf7
99#define INVALIDATE_TLB_VECTOR_START 0xf0
100#define NUM_INVALIDATE_TLB_VECTORS 8
101
82/* 102/*
83 * Local APIC timer IRQ vector is on a different priority level, 103 * Local APIC timer IRQ vector is on a different priority level,
84 * to work around the 'lost local interrupt if more than 2 IRQ 104 * to work around the 'lost local interrupt if more than 2 IRQ
85 * sources per level' errata. 105 * sources per level' errata.
86 */ 106 */
87#define LOCAL_TIMER_VECTOR 0xef 107#define LOCAL_TIMER_VECTOR 0xef
108
109/*
110 * Performance monitoring interrupt vector:
111 */
112#define LOCAL_PERF_VECTOR 0xee
88 113
89/* 114/*
90 * First APIC vector available to drivers: (vectors 0x30-0xee) we 115 * First APIC vector available to drivers: (vectors 0x30-0xee) we
91 * start at 0x31(0x41) to spread out vectors evenly between priority 116 * start at 0x31(0x41) to spread out vectors evenly between priority
92 * levels. (0x80 is the syscall vector) 117 * levels. (0x80 is the syscall vector)
93 */ 118 */
94#define FIRST_DEVICE_VECTOR (IRQ15_VECTOR + 2) 119#define FIRST_DEVICE_VECTOR (IRQ15_VECTOR + 2)
95
96#define NR_VECTORS 256
97 120
98#define FPU_IRQ 13 121#define NR_VECTORS 256
99 122
100#define FIRST_VM86_IRQ 3 123#define FPU_IRQ 13
101#define LAST_VM86_IRQ 15
102#define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15)
103 124
104#define NR_IRQS_LEGACY 16 125#define FIRST_VM86_IRQ 3
126#define LAST_VM86_IRQ 15
105 127
106#if defined(CONFIG_X86_IO_APIC) && !defined(CONFIG_X86_VOYAGER) 128#ifndef __ASSEMBLY__
107 129static inline int invalid_vm86_irq(int irq)
108#ifndef CONFIG_SPARSE_IRQ 130{
109# if NR_CPUS < MAX_IO_APICS 131 return irq < 3 || irq > 15;
110# define NR_IRQS (NR_VECTORS + (32 * NR_CPUS)) 132}
111# else
112# define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS))
113# endif
114#else
115# if (8 * NR_CPUS) > (32 * MAX_IO_APICS)
116# define NR_IRQS (NR_VECTORS + (8 * NR_CPUS))
117# else
118# define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS))
119# endif
120#endif 133#endif
121 134
122#elif defined(CONFIG_X86_VOYAGER) 135/*
123 136 * Size the maximum number of interrupts.
124# define NR_IRQS 224 137 *
138 * If the irq_desc[] array has a sparse layout, we can size things
139 * generously - it scales up linearly with the maximum number of CPUs,
140 * and the maximum number of IO-APICs, whichever is higher.
141 *
142 * In other cases we size more conservatively, to not create too large
143 * static arrays.
144 */
125 145
126#else /* IO_APIC || VOYAGER */ 146#define NR_IRQS_LEGACY 16
127 147
128# define NR_IRQS 16 148#define CPU_VECTOR_LIMIT ( 8 * NR_CPUS )
149#define IO_APIC_VECTOR_LIMIT ( 32 * MAX_IO_APICS )
129 150
151#ifdef CONFIG_X86_IO_APIC
152# ifdef CONFIG_SPARSE_IRQ
153# define NR_IRQS \
154 (CPU_VECTOR_LIMIT > IO_APIC_VECTOR_LIMIT ? \
155 (NR_VECTORS + CPU_VECTOR_LIMIT) : \
156 (NR_VECTORS + IO_APIC_VECTOR_LIMIT))
157# else
158# if NR_CPUS < MAX_IO_APICS
159# define NR_IRQS (NR_VECTORS + 4*CPU_VECTOR_LIMIT)
160# else
161# define NR_IRQS (NR_VECTORS + IO_APIC_VECTOR_LIMIT)
162# endif
163# endif
164#else /* !CONFIG_X86_IO_APIC: */
165# define NR_IRQS NR_IRQS_LEGACY
130#endif 166#endif
131 167
132/* Voyager specific defines */
133/* These define the CPIs we use in linux */
134#define VIC_CPI_LEVEL0 0
135#define VIC_CPI_LEVEL1 1
136/* now the fake CPIs */
137#define VIC_TIMER_CPI 2
138#define VIC_INVALIDATE_CPI 3
139#define VIC_RESCHEDULE_CPI 4
140#define VIC_ENABLE_IRQ_CPI 5
141#define VIC_CALL_FUNCTION_CPI 6
142#define VIC_CALL_FUNCTION_SINGLE_CPI 7
143
144/* Now the QIC CPIs: Since we don't need the two initial levels,
145 * these are 2 less than the VIC CPIs */
146#define QIC_CPI_OFFSET 1
147#define QIC_TIMER_CPI (VIC_TIMER_CPI - QIC_CPI_OFFSET)
148#define QIC_INVALIDATE_CPI (VIC_INVALIDATE_CPI - QIC_CPI_OFFSET)
149#define QIC_RESCHEDULE_CPI (VIC_RESCHEDULE_CPI - QIC_CPI_OFFSET)
150#define QIC_ENABLE_IRQ_CPI (VIC_ENABLE_IRQ_CPI - QIC_CPI_OFFSET)
151#define QIC_CALL_FUNCTION_CPI (VIC_CALL_FUNCTION_CPI - QIC_CPI_OFFSET)
152#define QIC_CALL_FUNCTION_SINGLE_CPI (VIC_CALL_FUNCTION_SINGLE_CPI - QIC_CPI_OFFSET)
153
154#define VIC_START_FAKE_CPI VIC_TIMER_CPI
155#define VIC_END_FAKE_CPI VIC_CALL_FUNCTION_SINGLE_CPI
156
157/* this is the SYS_INT CPI. */
158#define VIC_SYS_INT 8
159#define VIC_CMN_INT 15
160
161/* This is the boot CPI for alternate processors. It gets overwritten
162 * by the above once the system has activated all available processors */
163#define VIC_CPU_BOOT_CPI VIC_CPI_LEVEL0
164#define VIC_CPU_BOOT_ERRATA_CPI (VIC_CPI_LEVEL0 + 8)
165
166
167#endif /* _ASM_X86_IRQ_VECTORS_H */ 168#endif /* _ASM_X86_IRQ_VECTORS_H */
diff --git a/arch/x86/include/asm/kexec.h b/arch/x86/include/asm/kexec.h
index c61d8b2ab8b9..0ceb6d19ed30 100644
--- a/arch/x86/include/asm/kexec.h
+++ b/arch/x86/include/asm/kexec.h
@@ -9,23 +9,8 @@
9# define PAGES_NR 4 9# define PAGES_NR 4
10#else 10#else
11# define PA_CONTROL_PAGE 0 11# define PA_CONTROL_PAGE 0
12# define VA_CONTROL_PAGE 1 12# define PA_TABLE_PAGE 1
13# define PA_PGD 2 13# define PAGES_NR 2
14# define VA_PGD 3
15# define PA_PUD_0 4
16# define VA_PUD_0 5
17# define PA_PMD_0 6
18# define VA_PMD_0 7
19# define PA_PTE_0 8
20# define VA_PTE_0 9
21# define PA_PUD_1 10
22# define VA_PUD_1 11
23# define PA_PMD_1 12
24# define VA_PMD_1 13
25# define PA_PTE_1 14
26# define VA_PTE_1 15
27# define PA_TABLE_PAGE 16
28# define PAGES_NR 17
29#endif 14#endif
30 15
31#ifdef CONFIG_X86_32 16#ifdef CONFIG_X86_32
@@ -157,9 +142,9 @@ relocate_kernel(unsigned long indirection_page,
157 unsigned long start_address) ATTRIB_NORET; 142 unsigned long start_address) ATTRIB_NORET;
158#endif 143#endif
159 144
160#ifdef CONFIG_X86_32
161#define ARCH_HAS_KIMAGE_ARCH 145#define ARCH_HAS_KIMAGE_ARCH
162 146
147#ifdef CONFIG_X86_32
163struct kimage_arch { 148struct kimage_arch {
164 pgd_t *pgd; 149 pgd_t *pgd;
165#ifdef CONFIG_X86_PAE 150#ifdef CONFIG_X86_PAE
@@ -169,6 +154,12 @@ struct kimage_arch {
169 pte_t *pte0; 154 pte_t *pte0;
170 pte_t *pte1; 155 pte_t *pte1;
171}; 156};
157#else
158struct kimage_arch {
159 pud_t *pud;
160 pmd_t *pmd;
161 pte_t *pte;
162};
172#endif 163#endif
173 164
174#endif /* __ASSEMBLY__ */ 165#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/mach-default/mach_apic.h b/arch/x86/include/asm/mach-default/mach_apic.h
deleted file mode 100644
index cc09cbbee27e..000000000000
--- a/arch/x86/include/asm/mach-default/mach_apic.h
+++ /dev/null
@@ -1,168 +0,0 @@
1#ifndef _ASM_X86_MACH_DEFAULT_MACH_APIC_H
2#define _ASM_X86_MACH_DEFAULT_MACH_APIC_H
3
4#ifdef CONFIG_X86_LOCAL_APIC
5
6#include <mach_apicdef.h>
7#include <asm/smp.h>
8
9#define APIC_DFR_VALUE (APIC_DFR_FLAT)
10
11static inline const struct cpumask *target_cpus(void)
12{
13#ifdef CONFIG_SMP
14 return cpu_online_mask;
15#else
16 return cpumask_of(0);
17#endif
18}
19
20#define NO_BALANCE_IRQ (0)
21#define esr_disable (0)
22
23#ifdef CONFIG_X86_64
24#include <asm/genapic.h>
25#define INT_DELIVERY_MODE (genapic->int_delivery_mode)
26#define INT_DEST_MODE (genapic->int_dest_mode)
27#define TARGET_CPUS (genapic->target_cpus())
28#define apic_id_registered (genapic->apic_id_registered)
29#define init_apic_ldr (genapic->init_apic_ldr)
30#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid)
31#define cpu_mask_to_apicid_and (genapic->cpu_mask_to_apicid_and)
32#define phys_pkg_id (genapic->phys_pkg_id)
33#define vector_allocation_domain (genapic->vector_allocation_domain)
34#define read_apic_id() (GET_APIC_ID(apic_read(APIC_ID)))
35#define send_IPI_self (genapic->send_IPI_self)
36#define wakeup_secondary_cpu (genapic->wakeup_cpu)
37extern void setup_apic_routing(void);
38#else
39#define INT_DELIVERY_MODE dest_LowestPrio
40#define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */
41#define TARGET_CPUS (target_cpus())
42#define wakeup_secondary_cpu wakeup_secondary_cpu_via_init
43/*
44 * Set up the logical destination ID.
45 *
46 * Intel recommends to set DFR, LDR and TPR before enabling
47 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
48 * document number 292116). So here it goes...
49 */
50static inline void init_apic_ldr(void)
51{
52 unsigned long val;
53
54 apic_write(APIC_DFR, APIC_DFR_VALUE);
55 val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
56 val |= SET_APIC_LOGICAL_ID(1UL << smp_processor_id());
57 apic_write(APIC_LDR, val);
58}
59
60static inline int apic_id_registered(void)
61{
62 return physid_isset(read_apic_id(), phys_cpu_present_map);
63}
64
65static inline unsigned int cpu_mask_to_apicid(const struct cpumask *cpumask)
66{
67 return cpumask_bits(cpumask)[0];
68}
69
70static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
71 const struct cpumask *andmask)
72{
73 unsigned long mask1 = cpumask_bits(cpumask)[0];
74 unsigned long mask2 = cpumask_bits(andmask)[0];
75 unsigned long mask3 = cpumask_bits(cpu_online_mask)[0];
76
77 return (unsigned int)(mask1 & mask2 & mask3);
78}
79
80static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
81{
82 return cpuid_apic >> index_msb;
83}
84
85static inline void setup_apic_routing(void)
86{
87#ifdef CONFIG_X86_IO_APIC
88 printk("Enabling APIC mode: %s. Using %d I/O APICs\n",
89 "Flat", nr_ioapics);
90#endif
91}
92
93static inline int apicid_to_node(int logical_apicid)
94{
95#ifdef CONFIG_SMP
96 return apicid_2_node[hard_smp_processor_id()];
97#else
98 return 0;
99#endif
100}
101
102static inline void vector_allocation_domain(int cpu, struct cpumask *retmask)
103{
104 /* Careful. Some cpus do not strictly honor the set of cpus
105 * specified in the interrupt destination when using lowest
106 * priority interrupt delivery mode.
107 *
108 * In particular there was a hyperthreading cpu observed to
109 * deliver interrupts to the wrong hyperthread when only one
110 * hyperthread was specified in the interrupt desitination.
111 */
112 *retmask = (cpumask_t) { { [0] = APIC_ALL_CPUS } };
113}
114#endif
115
116static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
117{
118 return physid_isset(apicid, bitmap);
119}
120
121static inline unsigned long check_apicid_present(int bit)
122{
123 return physid_isset(bit, phys_cpu_present_map);
124}
125
126static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
127{
128 return phys_map;
129}
130
131static inline int multi_timer_check(int apic, int irq)
132{
133 return 0;
134}
135
136/* Mapping from cpu number to logical apicid */
137static inline int cpu_to_logical_apicid(int cpu)
138{
139 return 1 << cpu;
140}
141
142static inline int cpu_present_to_apicid(int mps_cpu)
143{
144 if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu))
145 return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
146 else
147 return BAD_APICID;
148}
149
150static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
151{
152 return physid_mask_of_physid(phys_apicid);
153}
154
155static inline void setup_portio_remap(void)
156{
157}
158
159static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
160{
161 return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map);
162}
163
164static inline void enable_apic_mode(void)
165{
166}
167#endif /* CONFIG_X86_LOCAL_APIC */
168#endif /* _ASM_X86_MACH_DEFAULT_MACH_APIC_H */
diff --git a/arch/x86/include/asm/mach-default/mach_apicdef.h b/arch/x86/include/asm/mach-default/mach_apicdef.h
deleted file mode 100644
index 53179936d6c6..000000000000
--- a/arch/x86/include/asm/mach-default/mach_apicdef.h
+++ /dev/null
@@ -1,24 +0,0 @@
1#ifndef _ASM_X86_MACH_DEFAULT_MACH_APICDEF_H
2#define _ASM_X86_MACH_DEFAULT_MACH_APICDEF_H
3
4#include <asm/apic.h>
5
6#ifdef CONFIG_X86_64
7#define APIC_ID_MASK (genapic->apic_id_mask)
8#define GET_APIC_ID(x) (genapic->get_apic_id(x))
9#define SET_APIC_ID(x) (genapic->set_apic_id(x))
10#else
11#define APIC_ID_MASK (0xF<<24)
12static inline unsigned get_apic_id(unsigned long x)
13{
14 unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR));
15 if (APIC_XAPIC(ver))
16 return (((x)>>24)&0xFF);
17 else
18 return (((x)>>24)&0xF);
19}
20
21#define GET_APIC_ID(x) get_apic_id(x)
22#endif
23
24#endif /* _ASM_X86_MACH_DEFAULT_MACH_APICDEF_H */
diff --git a/arch/x86/include/asm/mach-default/mach_ipi.h b/arch/x86/include/asm/mach-default/mach_ipi.h
deleted file mode 100644
index 191312d155da..000000000000
--- a/arch/x86/include/asm/mach-default/mach_ipi.h
+++ /dev/null
@@ -1,64 +0,0 @@
1#ifndef _ASM_X86_MACH_DEFAULT_MACH_IPI_H
2#define _ASM_X86_MACH_DEFAULT_MACH_IPI_H
3
4/* Avoid include hell */
5#define NMI_VECTOR 0x02
6
7void send_IPI_mask_bitmask(const struct cpumask *mask, int vector);
8void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
9void __send_IPI_shortcut(unsigned int shortcut, int vector);
10
11extern int no_broadcast;
12
13#ifdef CONFIG_X86_64
14#include <asm/genapic.h>
15#define send_IPI_mask (genapic->send_IPI_mask)
16#define send_IPI_mask_allbutself (genapic->send_IPI_mask_allbutself)
17#else
18static inline void send_IPI_mask(const struct cpumask *mask, int vector)
19{
20 send_IPI_mask_bitmask(mask, vector);
21}
22void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
23#endif
24
25static inline void __local_send_IPI_allbutself(int vector)
26{
27 if (no_broadcast || vector == NMI_VECTOR)
28 send_IPI_mask_allbutself(cpu_online_mask, vector);
29 else
30 __send_IPI_shortcut(APIC_DEST_ALLBUT, vector);
31}
32
33static inline void __local_send_IPI_all(int vector)
34{
35 if (no_broadcast || vector == NMI_VECTOR)
36 send_IPI_mask(cpu_online_mask, vector);
37 else
38 __send_IPI_shortcut(APIC_DEST_ALLINC, vector);
39}
40
41#ifdef CONFIG_X86_64
42#define send_IPI_allbutself (genapic->send_IPI_allbutself)
43#define send_IPI_all (genapic->send_IPI_all)
44#else
45static inline void send_IPI_allbutself(int vector)
46{
47 /*
48 * if there are no other CPUs in the system then we get an APIC send
49 * error if we try to broadcast, thus avoid sending IPIs in this case.
50 */
51 if (!(num_online_cpus() > 1))
52 return;
53
54 __local_send_IPI_allbutself(vector);
55 return;
56}
57
58static inline void send_IPI_all(int vector)
59{
60 __local_send_IPI_all(vector);
61}
62#endif
63
64#endif /* _ASM_X86_MACH_DEFAULT_MACH_IPI_H */
diff --git a/arch/x86/include/asm/mach-default/mach_mpparse.h b/arch/x86/include/asm/mach-default/mach_mpparse.h
deleted file mode 100644
index c70a263d68cd..000000000000
--- a/arch/x86/include/asm/mach-default/mach_mpparse.h
+++ /dev/null
@@ -1,17 +0,0 @@
1#ifndef _ASM_X86_MACH_DEFAULT_MACH_MPPARSE_H
2#define _ASM_X86_MACH_DEFAULT_MACH_MPPARSE_H
3
4static inline int
5mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
6{
7 return 0;
8}
9
10/* Hook from generic ACPI tables.c */
11static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
12{
13 return 0;
14}
15
16
17#endif /* _ASM_X86_MACH_DEFAULT_MACH_MPPARSE_H */
diff --git a/arch/x86/include/asm/mach-default/mach_mpspec.h b/arch/x86/include/asm/mach-default/mach_mpspec.h
deleted file mode 100644
index e85ede686be8..000000000000
--- a/arch/x86/include/asm/mach-default/mach_mpspec.h
+++ /dev/null
@@ -1,12 +0,0 @@
1#ifndef _ASM_X86_MACH_DEFAULT_MACH_MPSPEC_H
2#define _ASM_X86_MACH_DEFAULT_MACH_MPSPEC_H
3
4#define MAX_IRQ_SOURCES 256
5
6#if CONFIG_BASE_SMALL == 0
7#define MAX_MP_BUSSES 256
8#else
9#define MAX_MP_BUSSES 32
10#endif
11
12#endif /* _ASM_X86_MACH_DEFAULT_MACH_MPSPEC_H */
diff --git a/arch/x86/include/asm/mach-default/mach_wakecpu.h b/arch/x86/include/asm/mach-default/mach_wakecpu.h
deleted file mode 100644
index 89897a6a65b9..000000000000
--- a/arch/x86/include/asm/mach-default/mach_wakecpu.h
+++ /dev/null
@@ -1,41 +0,0 @@
1#ifndef _ASM_X86_MACH_DEFAULT_MACH_WAKECPU_H
2#define _ASM_X86_MACH_DEFAULT_MACH_WAKECPU_H
3
4#define TRAMPOLINE_PHYS_LOW (0x467)
5#define TRAMPOLINE_PHYS_HIGH (0x469)
6
7static inline void wait_for_init_deassert(atomic_t *deassert)
8{
9 while (!atomic_read(deassert))
10 cpu_relax();
11 return;
12}
13
14/* Nothing to do for most platforms, since cleared by the INIT cycle */
15static inline void smp_callin_clear_local_apic(void)
16{
17}
18
19static inline void store_NMI_vector(unsigned short *high, unsigned short *low)
20{
21}
22
23static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
24{
25}
26
27#ifdef CONFIG_SMP
28extern void __inquire_remote_apic(int apicid);
29#else /* CONFIG_SMP */
30static inline void __inquire_remote_apic(int apicid)
31{
32}
33#endif /* CONFIG_SMP */
34
35static inline void inquire_remote_apic(int apicid)
36{
37 if (apic_verbosity >= APIC_DEBUG)
38 __inquire_remote_apic(apicid);
39}
40
41#endif /* _ASM_X86_MACH_DEFAULT_MACH_WAKECPU_H */
diff --git a/arch/x86/include/asm/mach-generic/gpio.h b/arch/x86/include/asm/mach-generic/gpio.h
deleted file mode 100644
index 995c45efdb33..000000000000
--- a/arch/x86/include/asm/mach-generic/gpio.h
+++ /dev/null
@@ -1,15 +0,0 @@
1#ifndef _ASM_X86_MACH_GENERIC_GPIO_H
2#define _ASM_X86_MACH_GENERIC_GPIO_H
3
4int gpio_request(unsigned gpio, const char *label);
5void gpio_free(unsigned gpio);
6int gpio_direction_input(unsigned gpio);
7int gpio_direction_output(unsigned gpio, int value);
8int gpio_get_value(unsigned gpio);
9void gpio_set_value(unsigned gpio, int value);
10int gpio_to_irq(unsigned gpio);
11int irq_to_gpio(unsigned irq);
12
13#include <asm-generic/gpio.h> /* cansleep wrappers */
14
15#endif /* _ASM_X86_MACH_GENERIC_GPIO_H */
diff --git a/arch/x86/include/asm/mach-generic/mach_apic.h b/arch/x86/include/asm/mach-generic/mach_apic.h
deleted file mode 100644
index 48553e958ad5..000000000000
--- a/arch/x86/include/asm/mach-generic/mach_apic.h
+++ /dev/null
@@ -1,35 +0,0 @@
1#ifndef _ASM_X86_MACH_GENERIC_MACH_APIC_H
2#define _ASM_X86_MACH_GENERIC_MACH_APIC_H
3
4#include <asm/genapic.h>
5
6#define esr_disable (genapic->ESR_DISABLE)
7#define NO_BALANCE_IRQ (genapic->no_balance_irq)
8#define INT_DELIVERY_MODE (genapic->int_delivery_mode)
9#define INT_DEST_MODE (genapic->int_dest_mode)
10#undef APIC_DEST_LOGICAL
11#define APIC_DEST_LOGICAL (genapic->apic_destination_logical)
12#define TARGET_CPUS (genapic->target_cpus())
13#define apic_id_registered (genapic->apic_id_registered)
14#define init_apic_ldr (genapic->init_apic_ldr)
15#define ioapic_phys_id_map (genapic->ioapic_phys_id_map)
16#define setup_apic_routing (genapic->setup_apic_routing)
17#define multi_timer_check (genapic->multi_timer_check)
18#define apicid_to_node (genapic->apicid_to_node)
19#define cpu_to_logical_apicid (genapic->cpu_to_logical_apicid)
20#define cpu_present_to_apicid (genapic->cpu_present_to_apicid)
21#define apicid_to_cpu_present (genapic->apicid_to_cpu_present)
22#define setup_portio_remap (genapic->setup_portio_remap)
23#define check_apicid_present (genapic->check_apicid_present)
24#define check_phys_apicid_present (genapic->check_phys_apicid_present)
25#define check_apicid_used (genapic->check_apicid_used)
26#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid)
27#define cpu_mask_to_apicid_and (genapic->cpu_mask_to_apicid_and)
28#define vector_allocation_domain (genapic->vector_allocation_domain)
29#define enable_apic_mode (genapic->enable_apic_mode)
30#define phys_pkg_id (genapic->phys_pkg_id)
31#define wakeup_secondary_cpu (genapic->wakeup_cpu)
32
33extern void generic_bigsmp_probe(void);
34
35#endif /* _ASM_X86_MACH_GENERIC_MACH_APIC_H */
diff --git a/arch/x86/include/asm/mach-generic/mach_apicdef.h b/arch/x86/include/asm/mach-generic/mach_apicdef.h
deleted file mode 100644
index 68041f3802f4..000000000000
--- a/arch/x86/include/asm/mach-generic/mach_apicdef.h
+++ /dev/null
@@ -1,11 +0,0 @@
1#ifndef _ASM_X86_MACH_GENERIC_MACH_APICDEF_H
2#define _ASM_X86_MACH_GENERIC_MACH_APICDEF_H
3
4#ifndef APIC_DEFINITION
5#include <asm/genapic.h>
6
7#define GET_APIC_ID (genapic->get_apic_id)
8#define APIC_ID_MASK (genapic->apic_id_mask)
9#endif
10
11#endif /* _ASM_X86_MACH_GENERIC_MACH_APICDEF_H */
diff --git a/arch/x86/include/asm/mach-generic/mach_ipi.h b/arch/x86/include/asm/mach-generic/mach_ipi.h
deleted file mode 100644
index ffd637e3c3d9..000000000000
--- a/arch/x86/include/asm/mach-generic/mach_ipi.h
+++ /dev/null
@@ -1,10 +0,0 @@
1#ifndef _ASM_X86_MACH_GENERIC_MACH_IPI_H
2#define _ASM_X86_MACH_GENERIC_MACH_IPI_H
3
4#include <asm/genapic.h>
5
6#define send_IPI_mask (genapic->send_IPI_mask)
7#define send_IPI_allbutself (genapic->send_IPI_allbutself)
8#define send_IPI_all (genapic->send_IPI_all)
9
10#endif /* _ASM_X86_MACH_GENERIC_MACH_IPI_H */
diff --git a/arch/x86/include/asm/mach-generic/mach_mpparse.h b/arch/x86/include/asm/mach-generic/mach_mpparse.h
deleted file mode 100644
index 9444ab8dca94..000000000000
--- a/arch/x86/include/asm/mach-generic/mach_mpparse.h
+++ /dev/null
@@ -1,9 +0,0 @@
1#ifndef _ASM_X86_MACH_GENERIC_MACH_MPPARSE_H
2#define _ASM_X86_MACH_GENERIC_MACH_MPPARSE_H
3
4
5extern int mps_oem_check(struct mpc_table *, char *, char *);
6
7extern int acpi_madt_oem_check(char *, char *);
8
9#endif /* _ASM_X86_MACH_GENERIC_MACH_MPPARSE_H */
diff --git a/arch/x86/include/asm/mach-generic/mach_mpspec.h b/arch/x86/include/asm/mach-generic/mach_mpspec.h
deleted file mode 100644
index 3bc407226578..000000000000
--- a/arch/x86/include/asm/mach-generic/mach_mpspec.h
+++ /dev/null
@@ -1,12 +0,0 @@
1#ifndef _ASM_X86_MACH_GENERIC_MACH_MPSPEC_H
2#define _ASM_X86_MACH_GENERIC_MACH_MPSPEC_H
3
4#define MAX_IRQ_SOURCES 256
5
6/* Summit or generic (i.e. installer) kernels need lots of bus entries. */
7/* Maximum 256 PCI busses, plus 1 ISA bus in each of 4 cabinets. */
8#define MAX_MP_BUSSES 260
9
10extern void numaq_mps_oem_check(struct mpc_table *, char *, char *);
11
12#endif /* _ASM_X86_MACH_GENERIC_MACH_MPSPEC_H */
diff --git a/arch/x86/include/asm/mach-generic/mach_wakecpu.h b/arch/x86/include/asm/mach-generic/mach_wakecpu.h
deleted file mode 100644
index 1ab16b168c8a..000000000000
--- a/arch/x86/include/asm/mach-generic/mach_wakecpu.h
+++ /dev/null
@@ -1,12 +0,0 @@
1#ifndef _ASM_X86_MACH_GENERIC_MACH_WAKECPU_H
2#define _ASM_X86_MACH_GENERIC_MACH_WAKECPU_H
3
4#define TRAMPOLINE_PHYS_LOW (genapic->trampoline_phys_low)
5#define TRAMPOLINE_PHYS_HIGH (genapic->trampoline_phys_high)
6#define wait_for_init_deassert (genapic->wait_for_init_deassert)
7#define smp_callin_clear_local_apic (genapic->smp_callin_clear_local_apic)
8#define store_NMI_vector (genapic->store_NMI_vector)
9#define restore_NMI_vector (genapic->restore_NMI_vector)
10#define inquire_remote_apic (genapic->inquire_remote_apic)
11
12#endif /* _ASM_X86_MACH_GENERIC_MACH_APIC_H */
diff --git a/arch/x86/include/asm/mach-rdc321x/gpio.h b/arch/x86/include/asm/mach-rdc321x/gpio.h
deleted file mode 100644
index c210ab5788b0..000000000000
--- a/arch/x86/include/asm/mach-rdc321x/gpio.h
+++ /dev/null
@@ -1,60 +0,0 @@
1#ifndef _ASM_X86_MACH_RDC321X_GPIO_H
2#define _ASM_X86_MACH_RDC321X_GPIO_H
3
4#include <linux/kernel.h>
5
6extern int rdc_gpio_get_value(unsigned gpio);
7extern void rdc_gpio_set_value(unsigned gpio, int value);
8extern int rdc_gpio_direction_input(unsigned gpio);
9extern int rdc_gpio_direction_output(unsigned gpio, int value);
10extern int rdc_gpio_request(unsigned gpio, const char *label);
11extern void rdc_gpio_free(unsigned gpio);
12extern void __init rdc321x_gpio_setup(void);
13
14/* Wrappers for the arch-neutral GPIO API */
15
16static inline int gpio_request(unsigned gpio, const char *label)
17{
18 return rdc_gpio_request(gpio, label);
19}
20
21static inline void gpio_free(unsigned gpio)
22{
23 might_sleep();
24 rdc_gpio_free(gpio);
25}
26
27static inline int gpio_direction_input(unsigned gpio)
28{
29 return rdc_gpio_direction_input(gpio);
30}
31
32static inline int gpio_direction_output(unsigned gpio, int value)
33{
34 return rdc_gpio_direction_output(gpio, value);
35}
36
37static inline int gpio_get_value(unsigned gpio)
38{
39 return rdc_gpio_get_value(gpio);
40}
41
42static inline void gpio_set_value(unsigned gpio, int value)
43{
44 rdc_gpio_set_value(gpio, value);
45}
46
47static inline int gpio_to_irq(unsigned gpio)
48{
49 return gpio;
50}
51
52static inline int irq_to_gpio(unsigned irq)
53{
54 return irq;
55}
56
57/* For cansleep */
58#include <asm-generic/gpio.h>
59
60#endif /* _ASM_X86_MACH_RDC321X_GPIO_H */
diff --git a/arch/x86/include/asm/mach-default/mach_timer.h b/arch/x86/include/asm/mach_timer.h
index 853728519ae9..853728519ae9 100644
--- a/arch/x86/include/asm/mach-default/mach_timer.h
+++ b/arch/x86/include/asm/mach_timer.h
diff --git a/arch/x86/include/asm/mach-default/mach_traps.h b/arch/x86/include/asm/mach_traps.h
index f7920601e472..f7920601e472 100644
--- a/arch/x86/include/asm/mach-default/mach_traps.h
+++ b/arch/x86/include/asm/mach_traps.h
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 8aeeb3fd73db..f923203dc39a 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -21,11 +21,54 @@ static inline void paravirt_activate_mm(struct mm_struct *prev,
21int init_new_context(struct task_struct *tsk, struct mm_struct *mm); 21int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
22void destroy_context(struct mm_struct *mm); 22void destroy_context(struct mm_struct *mm);
23 23
24#ifdef CONFIG_X86_32 24
25# include "mmu_context_32.h" 25static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
26#else 26{
27# include "mmu_context_64.h" 27#ifdef CONFIG_SMP
28 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
29 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
30#endif
31}
32
33static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
34 struct task_struct *tsk)
35{
36 unsigned cpu = smp_processor_id();
37
38 if (likely(prev != next)) {
39 /* stop flush ipis for the previous mm */
40 cpu_clear(cpu, prev->cpu_vm_mask);
41#ifdef CONFIG_SMP
42 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
43 percpu_write(cpu_tlbstate.active_mm, next);
28#endif 44#endif
45 cpu_set(cpu, next->cpu_vm_mask);
46
47 /* Re-load page tables */
48 load_cr3(next->pgd);
49
50 /*
51 * load the LDT, if the LDT is different:
52 */
53 if (unlikely(prev->context.ldt != next->context.ldt))
54 load_LDT_nolock(&next->context);
55 }
56#ifdef CONFIG_SMP
57 else {
58 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
59 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
60
61 if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
62 /* We were in lazy tlb mode and leave_mm disabled
63 * tlb flush IPI delivery. We must reload CR3
64 * to make sure to use no freed page tables.
65 */
66 load_cr3(next->pgd);
67 load_LDT_nolock(&next->context);
68 }
69 }
70#endif
71}
29 72
30#define activate_mm(prev, next) \ 73#define activate_mm(prev, next) \
31do { \ 74do { \
@@ -33,5 +76,17 @@ do { \
33 switch_mm((prev), (next), NULL); \ 76 switch_mm((prev), (next), NULL); \
34} while (0); 77} while (0);
35 78
79#ifdef CONFIG_X86_32
80#define deactivate_mm(tsk, mm) \
81do { \
82 lazy_load_gs(0); \
83} while (0)
84#else
85#define deactivate_mm(tsk, mm) \
86do { \
87 load_gs_index(0); \
88 loadsegment(fs, 0); \
89} while (0)
90#endif
36 91
37#endif /* _ASM_X86_MMU_CONTEXT_H */ 92#endif /* _ASM_X86_MMU_CONTEXT_H */
diff --git a/arch/x86/include/asm/mmu_context_32.h b/arch/x86/include/asm/mmu_context_32.h
deleted file mode 100644
index 7e98ce1d2c0e..000000000000
--- a/arch/x86/include/asm/mmu_context_32.h
+++ /dev/null
@@ -1,55 +0,0 @@
1#ifndef _ASM_X86_MMU_CONTEXT_32_H
2#define _ASM_X86_MMU_CONTEXT_32_H
3
4static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
5{
6#ifdef CONFIG_SMP
7 if (x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_OK)
8 x86_write_percpu(cpu_tlbstate.state, TLBSTATE_LAZY);
9#endif
10}
11
12static inline void switch_mm(struct mm_struct *prev,
13 struct mm_struct *next,
14 struct task_struct *tsk)
15{
16 int cpu = smp_processor_id();
17
18 if (likely(prev != next)) {
19 /* stop flush ipis for the previous mm */
20 cpu_clear(cpu, prev->cpu_vm_mask);
21#ifdef CONFIG_SMP
22 x86_write_percpu(cpu_tlbstate.state, TLBSTATE_OK);
23 x86_write_percpu(cpu_tlbstate.active_mm, next);
24#endif
25 cpu_set(cpu, next->cpu_vm_mask);
26
27 /* Re-load page tables */
28 load_cr3(next->pgd);
29
30 /*
31 * load the LDT, if the LDT is different:
32 */
33 if (unlikely(prev->context.ldt != next->context.ldt))
34 load_LDT_nolock(&next->context);
35 }
36#ifdef CONFIG_SMP
37 else {
38 x86_write_percpu(cpu_tlbstate.state, TLBSTATE_OK);
39 BUG_ON(x86_read_percpu(cpu_tlbstate.active_mm) != next);
40
41 if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
42 /* We were in lazy tlb mode and leave_mm disabled
43 * tlb flush IPI delivery. We must reload %cr3.
44 */
45 load_cr3(next->pgd);
46 load_LDT_nolock(&next->context);
47 }
48 }
49#endif
50}
51
52#define deactivate_mm(tsk, mm) \
53 asm("movl %0,%%gs": :"r" (0));
54
55#endif /* _ASM_X86_MMU_CONTEXT_32_H */
diff --git a/arch/x86/include/asm/mmu_context_64.h b/arch/x86/include/asm/mmu_context_64.h
deleted file mode 100644
index 677d36e9540a..000000000000
--- a/arch/x86/include/asm/mmu_context_64.h
+++ /dev/null
@@ -1,54 +0,0 @@
1#ifndef _ASM_X86_MMU_CONTEXT_64_H
2#define _ASM_X86_MMU_CONTEXT_64_H
3
4#include <asm/pda.h>
5
6static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
7{
8#ifdef CONFIG_SMP
9 if (read_pda(mmu_state) == TLBSTATE_OK)
10 write_pda(mmu_state, TLBSTATE_LAZY);
11#endif
12}
13
14static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
15 struct task_struct *tsk)
16{
17 unsigned cpu = smp_processor_id();
18 if (likely(prev != next)) {
19 /* stop flush ipis for the previous mm */
20 cpu_clear(cpu, prev->cpu_vm_mask);
21#ifdef CONFIG_SMP
22 write_pda(mmu_state, TLBSTATE_OK);
23 write_pda(active_mm, next);
24#endif
25 cpu_set(cpu, next->cpu_vm_mask);
26 load_cr3(next->pgd);
27
28 if (unlikely(next->context.ldt != prev->context.ldt))
29 load_LDT_nolock(&next->context);
30 }
31#ifdef CONFIG_SMP
32 else {
33 write_pda(mmu_state, TLBSTATE_OK);
34 if (read_pda(active_mm) != next)
35 BUG();
36 if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
37 /* We were in lazy tlb mode and leave_mm disabled
38 * tlb flush IPI delivery. We must reload CR3
39 * to make sure to use no freed page tables.
40 */
41 load_cr3(next->pgd);
42 load_LDT_nolock(&next->context);
43 }
44 }
45#endif
46}
47
48#define deactivate_mm(tsk, mm) \
49do { \
50 load_gs_index(0); \
51 asm volatile("movl %0,%%fs"::"r"(0)); \
52} while (0)
53
54#endif /* _ASM_X86_MMU_CONTEXT_64_H */
diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h
index bd22f2a3713f..5916c8df09d9 100644
--- a/arch/x86/include/asm/mpspec.h
+++ b/arch/x86/include/asm/mpspec.h
@@ -9,7 +9,18 @@ extern int apic_version[MAX_APICS];
9extern int pic_mode; 9extern int pic_mode;
10 10
11#ifdef CONFIG_X86_32 11#ifdef CONFIG_X86_32
12#include <mach_mpspec.h> 12
13/*
14 * Summit or generic (i.e. installer) kernels need lots of bus entries.
15 * Maximum 256 PCI busses, plus 1 ISA bus in each of 4 cabinets.
16 */
17#if CONFIG_BASE_SMALL == 0
18# define MAX_MP_BUSSES 260
19#else
20# define MAX_MP_BUSSES 32
21#endif
22
23#define MAX_IRQ_SOURCES 256
13 24
14extern unsigned int def_to_bigsmp; 25extern unsigned int def_to_bigsmp;
15extern u8 apicid_2_node[]; 26extern u8 apicid_2_node[];
@@ -20,15 +31,15 @@ extern int mp_bus_id_to_local[MAX_MP_BUSSES];
20extern int quad_local_to_mp_bus_id [NR_CPUS/4][4]; 31extern int quad_local_to_mp_bus_id [NR_CPUS/4][4];
21#endif 32#endif
22 33
23#define MAX_APICID 256 34#define MAX_APICID 256
24 35
25#else 36#else /* CONFIG_X86_64: */
26 37
27#define MAX_MP_BUSSES 256 38#define MAX_MP_BUSSES 256
28/* Each PCI slot may be a combo card with its own bus. 4 IRQ pins per slot. */ 39/* Each PCI slot may be a combo card with its own bus. 4 IRQ pins per slot. */
29#define MAX_IRQ_SOURCES (MAX_MP_BUSSES * 4) 40#define MAX_IRQ_SOURCES (MAX_MP_BUSSES * 4)
30 41
31#endif 42#endif /* CONFIG_X86_64 */
32 43
33extern void early_find_smp_config(void); 44extern void early_find_smp_config(void);
34extern void early_get_smp_config(void); 45extern void early_get_smp_config(void);
@@ -45,11 +56,13 @@ extern int smp_found_config;
45extern int mpc_default_type; 56extern int mpc_default_type;
46extern unsigned long mp_lapic_addr; 57extern unsigned long mp_lapic_addr;
47 58
48extern void find_smp_config(void);
49extern void get_smp_config(void); 59extern void get_smp_config(void);
60
50#ifdef CONFIG_X86_MPPARSE 61#ifdef CONFIG_X86_MPPARSE
62extern void find_smp_config(void);
51extern void early_reserve_e820_mpc_new(void); 63extern void early_reserve_e820_mpc_new(void);
52#else 64#else
65static inline void find_smp_config(void) { }
53static inline void early_reserve_e820_mpc_new(void) { } 66static inline void early_reserve_e820_mpc_new(void) { }
54#endif 67#endif
55 68
@@ -64,6 +77,8 @@ extern int acpi_probe_gsi(void);
64#ifdef CONFIG_X86_IO_APIC 77#ifdef CONFIG_X86_IO_APIC
65extern int mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin, 78extern int mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin,
66 u32 gsi, int triggering, int polarity); 79 u32 gsi, int triggering, int polarity);
80extern int mp_find_ioapic(int gsi);
81extern int mp_find_ioapic_pin(int ioapic, int gsi);
67#else 82#else
68static inline int 83static inline int
69mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin, 84mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin,
@@ -148,4 +163,10 @@ static inline void physid_set_mask_of_physid(int physid, physid_mask_t *map)
148 163
149extern physid_mask_t phys_cpu_present_map; 164extern physid_mask_t phys_cpu_present_map;
150 165
166extern int generic_mps_oem_check(struct mpc_table *, char *, char *);
167
168extern int default_acpi_madt_oem_check(char *, char *);
169
170extern void numaq_mps_oem_check(struct mpc_table *, char *, char *);
171
151#endif /* _ASM_X86_MPSPEC_H */ 172#endif /* _ASM_X86_MPSPEC_H */
diff --git a/arch/x86/include/asm/mpspec_def.h b/arch/x86/include/asm/mpspec_def.h
index 59568bc4767f..4a7f96d7c188 100644
--- a/arch/x86/include/asm/mpspec_def.h
+++ b/arch/x86/include/asm/mpspec_def.h
@@ -24,17 +24,18 @@
24# endif 24# endif
25#endif 25#endif
26 26
27struct intel_mp_floating { 27/* Intel MP Floating Pointer Structure */
28 char mpf_signature[4]; /* "_MP_" */ 28struct mpf_intel {
29 unsigned int mpf_physptr; /* Configuration table address */ 29 char signature[4]; /* "_MP_" */
30 unsigned char mpf_length; /* Our length (paragraphs) */ 30 unsigned int physptr; /* Configuration table address */
31 unsigned char mpf_specification;/* Specification version */ 31 unsigned char length; /* Our length (paragraphs) */
32 unsigned char mpf_checksum; /* Checksum (makes sum 0) */ 32 unsigned char specification; /* Specification version */
33 unsigned char mpf_feature1; /* Standard or configuration ? */ 33 unsigned char checksum; /* Checksum (makes sum 0) */
34 unsigned char mpf_feature2; /* Bit7 set for IMCR|PIC */ 34 unsigned char feature1; /* Standard or configuration ? */
35 unsigned char mpf_feature3; /* Unused (0) */ 35 unsigned char feature2; /* Bit7 set for IMCR|PIC */
36 unsigned char mpf_feature4; /* Unused (0) */ 36 unsigned char feature3; /* Unused (0) */
37 unsigned char mpf_feature5; /* Unused (0) */ 37 unsigned char feature4; /* Unused (0) */
38 unsigned char feature5; /* Unused (0) */
38}; 39};
39 40
40#define MPC_SIGNATURE "PCMP" 41#define MPC_SIGNATURE "PCMP"
diff --git a/arch/x86/include/asm/numaq.h b/arch/x86/include/asm/numaq.h
index 1e8bd30b4c16..9f0a5f5d29ec 100644
--- a/arch/x86/include/asm/numaq.h
+++ b/arch/x86/include/asm/numaq.h
@@ -31,6 +31,8 @@
31extern int found_numaq; 31extern int found_numaq;
32extern int get_memcfg_numaq(void); 32extern int get_memcfg_numaq(void);
33 33
34extern void *xquad_portio;
35
34/* 36/*
35 * SYS_CFG_DATA_PRIV_ADDR, struct eachquadmem, and struct sys_cfg_data are the 37 * SYS_CFG_DATA_PRIV_ADDR, struct eachquadmem, and struct sys_cfg_data are the
36 */ 38 */
diff --git a/arch/x86/include/asm/numaq/apic.h b/arch/x86/include/asm/numaq/apic.h
deleted file mode 100644
index bf37bc49bd8e..000000000000
--- a/arch/x86/include/asm/numaq/apic.h
+++ /dev/null
@@ -1,142 +0,0 @@
1#ifndef __ASM_NUMAQ_APIC_H
2#define __ASM_NUMAQ_APIC_H
3
4#include <asm/io.h>
5#include <linux/mmzone.h>
6#include <linux/nodemask.h>
7
8#define APIC_DFR_VALUE (APIC_DFR_CLUSTER)
9
10static inline const cpumask_t *target_cpus(void)
11{
12 return &CPU_MASK_ALL;
13}
14
15#define NO_BALANCE_IRQ (1)
16#define esr_disable (1)
17
18#define INT_DELIVERY_MODE dest_LowestPrio
19#define INT_DEST_MODE 0 /* physical delivery on LOCAL quad */
20
21static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
22{
23 return physid_isset(apicid, bitmap);
24}
25static inline unsigned long check_apicid_present(int bit)
26{
27 return physid_isset(bit, phys_cpu_present_map);
28}
29#define apicid_cluster(apicid) (apicid & 0xF0)
30
31static inline int apic_id_registered(void)
32{
33 return 1;
34}
35
36static inline void init_apic_ldr(void)
37{
38 /* Already done in NUMA-Q firmware */
39}
40
41static inline void setup_apic_routing(void)
42{
43 printk("Enabling APIC mode: %s. Using %d I/O APICs\n",
44 "NUMA-Q", nr_ioapics);
45}
46
47/*
48 * Skip adding the timer int on secondary nodes, which causes
49 * a small but painful rift in the time-space continuum.
50 */
51static inline int multi_timer_check(int apic, int irq)
52{
53 return apic != 0 && irq == 0;
54}
55
56static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
57{
58 /* We don't have a good way to do this yet - hack */
59 return physids_promote(0xFUL);
60}
61
62/* Mapping from cpu number to logical apicid */
63extern u8 cpu_2_logical_apicid[];
64static inline int cpu_to_logical_apicid(int cpu)
65{
66 if (cpu >= nr_cpu_ids)
67 return BAD_APICID;
68 return (int)cpu_2_logical_apicid[cpu];
69}
70
71/*
72 * Supporting over 60 cpus on NUMA-Q requires a locality-dependent
73 * cpu to APIC ID relation to properly interact with the intelligent
74 * mode of the cluster controller.
75 */
76static inline int cpu_present_to_apicid(int mps_cpu)
77{
78 if (mps_cpu < 60)
79 return ((mps_cpu >> 2) << 4) | (1 << (mps_cpu & 0x3));
80 else
81 return BAD_APICID;
82}
83
84static inline int apicid_to_node(int logical_apicid)
85{
86 return logical_apicid >> 4;
87}
88
89static inline physid_mask_t apicid_to_cpu_present(int logical_apicid)
90{
91 int node = apicid_to_node(logical_apicid);
92 int cpu = __ffs(logical_apicid & 0xf);
93
94 return physid_mask_of_physid(cpu + 4*node);
95}
96
97extern void *xquad_portio;
98
99static inline void setup_portio_remap(void)
100{
101 int num_quads = num_online_nodes();
102
103 if (num_quads <= 1)
104 return;
105
106 printk("Remapping cross-quad port I/O for %d quads\n", num_quads);
107 xquad_portio = ioremap(XQUAD_PORTIO_BASE, num_quads*XQUAD_PORTIO_QUAD);
108 printk("xquad_portio vaddr 0x%08lx, len %08lx\n",
109 (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD);
110}
111
112static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
113{
114 return (1);
115}
116
117static inline void enable_apic_mode(void)
118{
119}
120
121/*
122 * We use physical apicids here, not logical, so just return the default
123 * physical broadcast to stop people from breaking us
124 */
125static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
126{
127 return (int) 0xF;
128}
129
130static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
131 const struct cpumask *andmask)
132{
133 return (int) 0xF;
134}
135
136/* No NUMA-Q box has a HT CPU, but it can't hurt to use the default code. */
137static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
138{
139 return cpuid_apic >> index_msb;
140}
141
142#endif /* __ASM_NUMAQ_APIC_H */
diff --git a/arch/x86/include/asm/numaq/apicdef.h b/arch/x86/include/asm/numaq/apicdef.h
deleted file mode 100644
index e012a46cc22a..000000000000
--- a/arch/x86/include/asm/numaq/apicdef.h
+++ /dev/null
@@ -1,14 +0,0 @@
1#ifndef __ASM_NUMAQ_APICDEF_H
2#define __ASM_NUMAQ_APICDEF_H
3
4
5#define APIC_ID_MASK (0xF<<24)
6
7static inline unsigned get_apic_id(unsigned long x)
8{
9 return (((x)>>24)&0x0F);
10}
11
12#define GET_APIC_ID(x) get_apic_id(x)
13
14#endif
diff --git a/arch/x86/include/asm/numaq/ipi.h b/arch/x86/include/asm/numaq/ipi.h
deleted file mode 100644
index a8374c652778..000000000000
--- a/arch/x86/include/asm/numaq/ipi.h
+++ /dev/null
@@ -1,22 +0,0 @@
1#ifndef __ASM_NUMAQ_IPI_H
2#define __ASM_NUMAQ_IPI_H
3
4void send_IPI_mask_sequence(const struct cpumask *mask, int vector);
5void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
6
7static inline void send_IPI_mask(const struct cpumask *mask, int vector)
8{
9 send_IPI_mask_sequence(mask, vector);
10}
11
12static inline void send_IPI_allbutself(int vector)
13{
14 send_IPI_mask_allbutself(cpu_online_mask, vector);
15}
16
17static inline void send_IPI_all(int vector)
18{
19 send_IPI_mask(cpu_online_mask, vector);
20}
21
22#endif /* __ASM_NUMAQ_IPI_H */
diff --git a/arch/x86/include/asm/numaq/mpparse.h b/arch/x86/include/asm/numaq/mpparse.h
deleted file mode 100644
index a2eeefcd1cc7..000000000000
--- a/arch/x86/include/asm/numaq/mpparse.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_NUMAQ_MPPARSE_H
2#define __ASM_NUMAQ_MPPARSE_H
3
4extern void numaq_mps_oem_check(struct mpc_table *, char *, char *);
5
6#endif /* __ASM_NUMAQ_MPPARSE_H */
diff --git a/arch/x86/include/asm/numaq/wakecpu.h b/arch/x86/include/asm/numaq/wakecpu.h
deleted file mode 100644
index 6f499df8eddb..000000000000
--- a/arch/x86/include/asm/numaq/wakecpu.h
+++ /dev/null
@@ -1,45 +0,0 @@
1#ifndef __ASM_NUMAQ_WAKECPU_H
2#define __ASM_NUMAQ_WAKECPU_H
3
4/* This file copes with machines that wakeup secondary CPUs by NMIs */
5
6#define TRAMPOLINE_PHYS_LOW (0x8)
7#define TRAMPOLINE_PHYS_HIGH (0xa)
8
9/* We don't do anything here because we use NMI's to boot instead */
10static inline void wait_for_init_deassert(atomic_t *deassert)
11{
12}
13
14/*
15 * Because we use NMIs rather than the INIT-STARTUP sequence to
16 * bootstrap the CPUs, the APIC may be in a weird state. Kick it.
17 */
18static inline void smp_callin_clear_local_apic(void)
19{
20 clear_local_APIC();
21}
22
23static inline void store_NMI_vector(unsigned short *high, unsigned short *low)
24{
25 printk("Storing NMI vector\n");
26 *high =
27 *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH));
28 *low =
29 *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW));
30}
31
32static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
33{
34 printk("Restoring NMI vector\n");
35 *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) =
36 *high;
37 *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) =
38 *low;
39}
40
41static inline void inquire_remote_apic(int apicid)
42{
43}
44
45#endif /* __ASM_NUMAQ_WAKECPU_H */
diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
index e9873a2e8695..40226999cbf8 100644
--- a/arch/x86/include/asm/page.h
+++ b/arch/x86/include/asm/page.h
@@ -95,6 +95,11 @@ static inline pgdval_t native_pgd_val(pgd_t pgd)
95 return pgd.pgd; 95 return pgd.pgd;
96} 96}
97 97
98static inline pgdval_t pgd_flags(pgd_t pgd)
99{
100 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
101}
102
98#if PAGETABLE_LEVELS >= 3 103#if PAGETABLE_LEVELS >= 3
99#if PAGETABLE_LEVELS == 4 104#if PAGETABLE_LEVELS == 4
100typedef struct { pudval_t pud; } pud_t; 105typedef struct { pudval_t pud; } pud_t;
@@ -117,6 +122,11 @@ static inline pudval_t native_pud_val(pud_t pud)
117} 122}
118#endif /* PAGETABLE_LEVELS == 4 */ 123#endif /* PAGETABLE_LEVELS == 4 */
119 124
125static inline pudval_t pud_flags(pud_t pud)
126{
127 return native_pud_val(pud) & PTE_FLAGS_MASK;
128}
129
120typedef struct { pmdval_t pmd; } pmd_t; 130typedef struct { pmdval_t pmd; } pmd_t;
121 131
122static inline pmd_t native_make_pmd(pmdval_t val) 132static inline pmd_t native_make_pmd(pmdval_t val)
@@ -128,6 +138,7 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
128{ 138{
129 return pmd.pmd; 139 return pmd.pmd;
130} 140}
141
131#else /* PAGETABLE_LEVELS == 2 */ 142#else /* PAGETABLE_LEVELS == 2 */
132#include <asm-generic/pgtable-nopmd.h> 143#include <asm-generic/pgtable-nopmd.h>
133 144
@@ -137,6 +148,11 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
137} 148}
138#endif /* PAGETABLE_LEVELS >= 3 */ 149#endif /* PAGETABLE_LEVELS >= 3 */
139 150
151static inline pmdval_t pmd_flags(pmd_t pmd)
152{
153 return native_pmd_val(pmd) & PTE_FLAGS_MASK;
154}
155
140static inline pte_t native_make_pte(pteval_t val) 156static inline pte_t native_make_pte(pteval_t val)
141{ 157{
142 return (pte_t) { .pte = val }; 158 return (pte_t) { .pte = val };
@@ -147,7 +163,7 @@ static inline pteval_t native_pte_val(pte_t pte)
147 return pte.pte; 163 return pte.pte;
148} 164}
149 165
150static inline pteval_t native_pte_flags(pte_t pte) 166static inline pteval_t pte_flags(pte_t pte)
151{ 167{
152 return native_pte_val(pte) & PTE_FLAGS_MASK; 168 return native_pte_val(pte) & PTE_FLAGS_MASK;
153} 169}
@@ -173,7 +189,6 @@ static inline pteval_t native_pte_flags(pte_t pte)
173#endif 189#endif
174 190
175#define pte_val(x) native_pte_val(x) 191#define pte_val(x) native_pte_val(x)
176#define pte_flags(x) native_pte_flags(x)
177#define __pte(x) native_make_pte(x) 192#define __pte(x) native_make_pte(x)
178 193
179#endif /* CONFIG_PARAVIRT */ 194#endif /* CONFIG_PARAVIRT */
diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
index 5ebca29f44f0..e27fdbe5f9e4 100644
--- a/arch/x86/include/asm/page_64.h
+++ b/arch/x86/include/asm/page_64.h
@@ -13,8 +13,8 @@
13#define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1) 13#define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1)
14#define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER) 14#define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER)
15 15
16#define IRQSTACK_ORDER 2 16#define IRQ_STACK_ORDER 2
17#define IRQSTACKSIZE (PAGE_SIZE << IRQSTACK_ORDER) 17#define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER)
18 18
19#define STACKFAULT_STACK 1 19#define STACKFAULT_STACK 1
20#define DOUBLEFAULT_STACK 2 20#define DOUBLEFAULT_STACK 2
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index c09a14127584..1c244b64573f 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -12,21 +12,38 @@
12#define CLBR_EAX (1 << 0) 12#define CLBR_EAX (1 << 0)
13#define CLBR_ECX (1 << 1) 13#define CLBR_ECX (1 << 1)
14#define CLBR_EDX (1 << 2) 14#define CLBR_EDX (1 << 2)
15#define CLBR_EDI (1 << 3)
15 16
16#ifdef CONFIG_X86_64 17#ifdef CONFIG_X86_32
17#define CLBR_RSI (1 << 3) 18/* CLBR_ANY should match all regs platform has. For i386, that's just it */
18#define CLBR_RDI (1 << 4) 19#define CLBR_ANY ((1 << 4) - 1)
20
21#define CLBR_ARG_REGS (CLBR_EAX | CLBR_EDX | CLBR_ECX)
22#define CLBR_RET_REG (CLBR_EAX | CLBR_EDX)
23#define CLBR_SCRATCH (0)
24#else
25#define CLBR_RAX CLBR_EAX
26#define CLBR_RCX CLBR_ECX
27#define CLBR_RDX CLBR_EDX
28#define CLBR_RDI CLBR_EDI
29#define CLBR_RSI (1 << 4)
19#define CLBR_R8 (1 << 5) 30#define CLBR_R8 (1 << 5)
20#define CLBR_R9 (1 << 6) 31#define CLBR_R9 (1 << 6)
21#define CLBR_R10 (1 << 7) 32#define CLBR_R10 (1 << 7)
22#define CLBR_R11 (1 << 8) 33#define CLBR_R11 (1 << 8)
34
23#define CLBR_ANY ((1 << 9) - 1) 35#define CLBR_ANY ((1 << 9) - 1)
36
37#define CLBR_ARG_REGS (CLBR_RDI | CLBR_RSI | CLBR_RDX | \
38 CLBR_RCX | CLBR_R8 | CLBR_R9)
39#define CLBR_RET_REG (CLBR_RAX)
40#define CLBR_SCRATCH (CLBR_R10 | CLBR_R11)
41
24#include <asm/desc_defs.h> 42#include <asm/desc_defs.h>
25#else
26/* CLBR_ANY should match all regs platform has. For i386, that's just it */
27#define CLBR_ANY ((1 << 3) - 1)
28#endif /* X86_64 */ 43#endif /* X86_64 */
29 44
45#define CLBR_CALLEE_SAVE ((CLBR_ARG_REGS | CLBR_SCRATCH) & ~CLBR_RET_REG)
46
30#ifndef __ASSEMBLY__ 47#ifndef __ASSEMBLY__
31#include <linux/types.h> 48#include <linux/types.h>
32#include <linux/cpumask.h> 49#include <linux/cpumask.h>
@@ -40,6 +57,14 @@ struct tss_struct;
40struct mm_struct; 57struct mm_struct;
41struct desc_struct; 58struct desc_struct;
42 59
60/*
61 * Wrapper type for pointers to code which uses the non-standard
62 * calling convention. See PV_CALL_SAVE_REGS_THUNK below.
63 */
64struct paravirt_callee_save {
65 void *func;
66};
67
43/* general info */ 68/* general info */
44struct pv_info { 69struct pv_info {
45 unsigned int kernel_rpl; 70 unsigned int kernel_rpl;
@@ -189,11 +214,15 @@ struct pv_irq_ops {
189 * expected to use X86_EFLAGS_IF; all other bits 214 * expected to use X86_EFLAGS_IF; all other bits
190 * returned from save_fl are undefined, and may be ignored by 215 * returned from save_fl are undefined, and may be ignored by
191 * restore_fl. 216 * restore_fl.
217 *
218 * NOTE: These functions callers expect the callee to preserve
219 * more registers than the standard C calling convention.
192 */ 220 */
193 unsigned long (*save_fl)(void); 221 struct paravirt_callee_save save_fl;
194 void (*restore_fl)(unsigned long); 222 struct paravirt_callee_save restore_fl;
195 void (*irq_disable)(void); 223 struct paravirt_callee_save irq_disable;
196 void (*irq_enable)(void); 224 struct paravirt_callee_save irq_enable;
225
197 void (*safe_halt)(void); 226 void (*safe_halt)(void);
198 void (*halt)(void); 227 void (*halt)(void);
199 228
@@ -244,7 +273,8 @@ struct pv_mmu_ops {
244 void (*flush_tlb_user)(void); 273 void (*flush_tlb_user)(void);
245 void (*flush_tlb_kernel)(void); 274 void (*flush_tlb_kernel)(void);
246 void (*flush_tlb_single)(unsigned long addr); 275 void (*flush_tlb_single)(unsigned long addr);
247 void (*flush_tlb_others)(const cpumask_t *cpus, struct mm_struct *mm, 276 void (*flush_tlb_others)(const struct cpumask *cpus,
277 struct mm_struct *mm,
248 unsigned long va); 278 unsigned long va);
249 279
250 /* Hooks for allocating and freeing a pagetable top-level */ 280 /* Hooks for allocating and freeing a pagetable top-level */
@@ -278,12 +308,11 @@ struct pv_mmu_ops {
278 void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr, 308 void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr,
279 pte_t *ptep, pte_t pte); 309 pte_t *ptep, pte_t pte);
280 310
281 pteval_t (*pte_val)(pte_t); 311 struct paravirt_callee_save pte_val;
282 pteval_t (*pte_flags)(pte_t); 312 struct paravirt_callee_save make_pte;
283 pte_t (*make_pte)(pteval_t pte);
284 313
285 pgdval_t (*pgd_val)(pgd_t); 314 struct paravirt_callee_save pgd_val;
286 pgd_t (*make_pgd)(pgdval_t pgd); 315 struct paravirt_callee_save make_pgd;
287 316
288#if PAGETABLE_LEVELS >= 3 317#if PAGETABLE_LEVELS >= 3
289#ifdef CONFIG_X86_PAE 318#ifdef CONFIG_X86_PAE
@@ -298,12 +327,12 @@ struct pv_mmu_ops {
298 327
299 void (*set_pud)(pud_t *pudp, pud_t pudval); 328 void (*set_pud)(pud_t *pudp, pud_t pudval);
300 329
301 pmdval_t (*pmd_val)(pmd_t); 330 struct paravirt_callee_save pmd_val;
302 pmd_t (*make_pmd)(pmdval_t pmd); 331 struct paravirt_callee_save make_pmd;
303 332
304#if PAGETABLE_LEVELS == 4 333#if PAGETABLE_LEVELS == 4
305 pudval_t (*pud_val)(pud_t); 334 struct paravirt_callee_save pud_val;
306 pud_t (*make_pud)(pudval_t pud); 335 struct paravirt_callee_save make_pud;
307 336
308 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval); 337 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
309#endif /* PAGETABLE_LEVELS == 4 */ 338#endif /* PAGETABLE_LEVELS == 4 */
@@ -388,6 +417,8 @@ extern struct pv_lock_ops pv_lock_ops;
388 asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":") 417 asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":")
389 418
390unsigned paravirt_patch_nop(void); 419unsigned paravirt_patch_nop(void);
420unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len);
421unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len);
391unsigned paravirt_patch_ignore(unsigned len); 422unsigned paravirt_patch_ignore(unsigned len);
392unsigned paravirt_patch_call(void *insnbuf, 423unsigned paravirt_patch_call(void *insnbuf,
393 const void *target, u16 tgt_clobbers, 424 const void *target, u16 tgt_clobbers,
@@ -479,25 +510,45 @@ int paravirt_disable_iospace(void);
479 * makes sure the incoming and outgoing types are always correct. 510 * makes sure the incoming and outgoing types are always correct.
480 */ 511 */
481#ifdef CONFIG_X86_32 512#ifdef CONFIG_X86_32
482#define PVOP_VCALL_ARGS unsigned long __eax, __edx, __ecx 513#define PVOP_VCALL_ARGS \
514 unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx
483#define PVOP_CALL_ARGS PVOP_VCALL_ARGS 515#define PVOP_CALL_ARGS PVOP_VCALL_ARGS
516
517#define PVOP_CALL_ARG1(x) "a" ((unsigned long)(x))
518#define PVOP_CALL_ARG2(x) "d" ((unsigned long)(x))
519#define PVOP_CALL_ARG3(x) "c" ((unsigned long)(x))
520
484#define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \ 521#define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \
485 "=c" (__ecx) 522 "=c" (__ecx)
486#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS 523#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS
524
525#define PVOP_VCALLEE_CLOBBERS "=a" (__eax), "=d" (__edx)
526#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
527
487#define EXTRA_CLOBBERS 528#define EXTRA_CLOBBERS
488#define VEXTRA_CLOBBERS 529#define VEXTRA_CLOBBERS
489#else 530#else /* CONFIG_X86_64 */
490#define PVOP_VCALL_ARGS unsigned long __edi, __esi, __edx, __ecx 531#define PVOP_VCALL_ARGS \
532 unsigned long __edi = __edi, __esi = __esi, \
533 __edx = __edx, __ecx = __ecx
491#define PVOP_CALL_ARGS PVOP_VCALL_ARGS, __eax 534#define PVOP_CALL_ARGS PVOP_VCALL_ARGS, __eax
535
536#define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x))
537#define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x))
538#define PVOP_CALL_ARG3(x) "d" ((unsigned long)(x))
539#define PVOP_CALL_ARG4(x) "c" ((unsigned long)(x))
540
492#define PVOP_VCALL_CLOBBERS "=D" (__edi), \ 541#define PVOP_VCALL_CLOBBERS "=D" (__edi), \
493 "=S" (__esi), "=d" (__edx), \ 542 "=S" (__esi), "=d" (__edx), \
494 "=c" (__ecx) 543 "=c" (__ecx)
495
496#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax) 544#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax)
497 545
546#define PVOP_VCALLEE_CLOBBERS "=a" (__eax)
547#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
548
498#define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11" 549#define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11"
499#define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11" 550#define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11"
500#endif 551#endif /* CONFIG_X86_32 */
501 552
502#ifdef CONFIG_PARAVIRT_DEBUG 553#ifdef CONFIG_PARAVIRT_DEBUG
503#define PVOP_TEST_NULL(op) BUG_ON(op == NULL) 554#define PVOP_TEST_NULL(op) BUG_ON(op == NULL)
@@ -505,10 +556,11 @@ int paravirt_disable_iospace(void);
505#define PVOP_TEST_NULL(op) ((void)op) 556#define PVOP_TEST_NULL(op) ((void)op)
506#endif 557#endif
507 558
508#define __PVOP_CALL(rettype, op, pre, post, ...) \ 559#define ____PVOP_CALL(rettype, op, clbr, call_clbr, extra_clbr, \
560 pre, post, ...) \
509 ({ \ 561 ({ \
510 rettype __ret; \ 562 rettype __ret; \
511 PVOP_CALL_ARGS; \ 563 PVOP_CALL_ARGS; \
512 PVOP_TEST_NULL(op); \ 564 PVOP_TEST_NULL(op); \
513 /* This is 32-bit specific, but is okay in 64-bit */ \ 565 /* This is 32-bit specific, but is okay in 64-bit */ \
514 /* since this condition will never hold */ \ 566 /* since this condition will never hold */ \
@@ -516,70 +568,113 @@ int paravirt_disable_iospace(void);
516 asm volatile(pre \ 568 asm volatile(pre \
517 paravirt_alt(PARAVIRT_CALL) \ 569 paravirt_alt(PARAVIRT_CALL) \
518 post \ 570 post \
519 : PVOP_CALL_CLOBBERS \ 571 : call_clbr \
520 : paravirt_type(op), \ 572 : paravirt_type(op), \
521 paravirt_clobber(CLBR_ANY), \ 573 paravirt_clobber(clbr), \
522 ##__VA_ARGS__ \ 574 ##__VA_ARGS__ \
523 : "memory", "cc" EXTRA_CLOBBERS); \ 575 : "memory", "cc" extra_clbr); \
524 __ret = (rettype)((((u64)__edx) << 32) | __eax); \ 576 __ret = (rettype)((((u64)__edx) << 32) | __eax); \
525 } else { \ 577 } else { \
526 asm volatile(pre \ 578 asm volatile(pre \
527 paravirt_alt(PARAVIRT_CALL) \ 579 paravirt_alt(PARAVIRT_CALL) \
528 post \ 580 post \
529 : PVOP_CALL_CLOBBERS \ 581 : call_clbr \
530 : paravirt_type(op), \ 582 : paravirt_type(op), \
531 paravirt_clobber(CLBR_ANY), \ 583 paravirt_clobber(clbr), \
532 ##__VA_ARGS__ \ 584 ##__VA_ARGS__ \
533 : "memory", "cc" EXTRA_CLOBBERS); \ 585 : "memory", "cc" extra_clbr); \
534 __ret = (rettype)__eax; \ 586 __ret = (rettype)__eax; \
535 } \ 587 } \
536 __ret; \ 588 __ret; \
537 }) 589 })
538#define __PVOP_VCALL(op, pre, post, ...) \ 590
591#define __PVOP_CALL(rettype, op, pre, post, ...) \
592 ____PVOP_CALL(rettype, op, CLBR_ANY, PVOP_CALL_CLOBBERS, \
593 EXTRA_CLOBBERS, pre, post, ##__VA_ARGS__)
594
595#define __PVOP_CALLEESAVE(rettype, op, pre, post, ...) \
596 ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
597 PVOP_CALLEE_CLOBBERS, , \
598 pre, post, ##__VA_ARGS__)
599
600
601#define ____PVOP_VCALL(op, clbr, call_clbr, extra_clbr, pre, post, ...) \
539 ({ \ 602 ({ \
540 PVOP_VCALL_ARGS; \ 603 PVOP_VCALL_ARGS; \
541 PVOP_TEST_NULL(op); \ 604 PVOP_TEST_NULL(op); \
542 asm volatile(pre \ 605 asm volatile(pre \
543 paravirt_alt(PARAVIRT_CALL) \ 606 paravirt_alt(PARAVIRT_CALL) \
544 post \ 607 post \
545 : PVOP_VCALL_CLOBBERS \ 608 : call_clbr \
546 : paravirt_type(op), \ 609 : paravirt_type(op), \
547 paravirt_clobber(CLBR_ANY), \ 610 paravirt_clobber(clbr), \
548 ##__VA_ARGS__ \ 611 ##__VA_ARGS__ \
549 : "memory", "cc" VEXTRA_CLOBBERS); \ 612 : "memory", "cc" extra_clbr); \
550 }) 613 })
551 614
615#define __PVOP_VCALL(op, pre, post, ...) \
616 ____PVOP_VCALL(op, CLBR_ANY, PVOP_VCALL_CLOBBERS, \
617 VEXTRA_CLOBBERS, \
618 pre, post, ##__VA_ARGS__)
619
620#define __PVOP_VCALLEESAVE(rettype, op, pre, post, ...) \
621 ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
622 PVOP_VCALLEE_CLOBBERS, , \
623 pre, post, ##__VA_ARGS__)
624
625
626
552#define PVOP_CALL0(rettype, op) \ 627#define PVOP_CALL0(rettype, op) \
553 __PVOP_CALL(rettype, op, "", "") 628 __PVOP_CALL(rettype, op, "", "")
554#define PVOP_VCALL0(op) \ 629#define PVOP_VCALL0(op) \
555 __PVOP_VCALL(op, "", "") 630 __PVOP_VCALL(op, "", "")
556 631
632#define PVOP_CALLEE0(rettype, op) \
633 __PVOP_CALLEESAVE(rettype, op, "", "")
634#define PVOP_VCALLEE0(op) \
635 __PVOP_VCALLEESAVE(op, "", "")
636
637
557#define PVOP_CALL1(rettype, op, arg1) \ 638#define PVOP_CALL1(rettype, op, arg1) \
558 __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1))) 639 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
559#define PVOP_VCALL1(op, arg1) \ 640#define PVOP_VCALL1(op, arg1) \
560 __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1))) 641 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1))
642
643#define PVOP_CALLEE1(rettype, op, arg1) \
644 __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
645#define PVOP_VCALLEE1(op, arg1) \
646 __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1))
647
561 648
562#define PVOP_CALL2(rettype, op, arg1, arg2) \ 649#define PVOP_CALL2(rettype, op, arg1, arg2) \
563 __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \ 650 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
564 "1" ((unsigned long)(arg2))) 651 PVOP_CALL_ARG2(arg2))
565#define PVOP_VCALL2(op, arg1, arg2) \ 652#define PVOP_VCALL2(op, arg1, arg2) \
566 __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \ 653 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
567 "1" ((unsigned long)(arg2))) 654 PVOP_CALL_ARG2(arg2))
655
656#define PVOP_CALLEE2(rettype, op, arg1, arg2) \
657 __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
658 PVOP_CALL_ARG2(arg2))
659#define PVOP_VCALLEE2(op, arg1, arg2) \
660 __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1), \
661 PVOP_CALL_ARG2(arg2))
662
568 663
569#define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \ 664#define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \
570 __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \ 665 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
571 "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3))) 666 PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
572#define PVOP_VCALL3(op, arg1, arg2, arg3) \ 667#define PVOP_VCALL3(op, arg1, arg2, arg3) \
573 __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \ 668 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
574 "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3))) 669 PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
575 670
576/* This is the only difference in x86_64. We can make it much simpler */ 671/* This is the only difference in x86_64. We can make it much simpler */
577#ifdef CONFIG_X86_32 672#ifdef CONFIG_X86_32
578#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \ 673#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
579 __PVOP_CALL(rettype, op, \ 674 __PVOP_CALL(rettype, op, \
580 "push %[_arg4];", "lea 4(%%esp),%%esp;", \ 675 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
581 "0" ((u32)(arg1)), "1" ((u32)(arg2)), \ 676 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
582 "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4))) 677 PVOP_CALL_ARG3(arg3), [_arg4] "mr" ((u32)(arg4)))
583#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \ 678#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
584 __PVOP_VCALL(op, \ 679 __PVOP_VCALL(op, \
585 "push %[_arg4];", "lea 4(%%esp),%%esp;", \ 680 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
@@ -587,13 +682,13 @@ int paravirt_disable_iospace(void);
587 "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4))) 682 "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
588#else 683#else
589#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \ 684#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
590 __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \ 685 __PVOP_CALL(rettype, op, "", "", \
591 "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)), \ 686 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
592 "3"((unsigned long)(arg4))) 687 PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
593#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \ 688#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
594 __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \ 689 __PVOP_VCALL(op, "", "", \
595 "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)), \ 690 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
596 "3"((unsigned long)(arg4))) 691 PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
597#endif 692#endif
598 693
599static inline int paravirt_enabled(void) 694static inline int paravirt_enabled(void)
@@ -984,10 +1079,11 @@ static inline void __flush_tlb_single(unsigned long addr)
984 PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr); 1079 PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
985} 1080}
986 1081
987static inline void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, 1082static inline void flush_tlb_others(const struct cpumask *cpumask,
1083 struct mm_struct *mm,
988 unsigned long va) 1084 unsigned long va)
989{ 1085{
990 PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, &cpumask, mm, va); 1086 PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, cpumask, mm, va);
991} 1087}
992 1088
993static inline int paravirt_pgd_alloc(struct mm_struct *mm) 1089static inline int paravirt_pgd_alloc(struct mm_struct *mm)
@@ -1059,13 +1155,13 @@ static inline pte_t __pte(pteval_t val)
1059 pteval_t ret; 1155 pteval_t ret;
1060 1156
1061 if (sizeof(pteval_t) > sizeof(long)) 1157 if (sizeof(pteval_t) > sizeof(long))
1062 ret = PVOP_CALL2(pteval_t, 1158 ret = PVOP_CALLEE2(pteval_t,
1063 pv_mmu_ops.make_pte, 1159 pv_mmu_ops.make_pte,
1064 val, (u64)val >> 32); 1160 val, (u64)val >> 32);
1065 else 1161 else
1066 ret = PVOP_CALL1(pteval_t, 1162 ret = PVOP_CALLEE1(pteval_t,
1067 pv_mmu_ops.make_pte, 1163 pv_mmu_ops.make_pte,
1068 val); 1164 val);
1069 1165
1070 return (pte_t) { .pte = ret }; 1166 return (pte_t) { .pte = ret };
1071} 1167}
@@ -1075,29 +1171,12 @@ static inline pteval_t pte_val(pte_t pte)
1075 pteval_t ret; 1171 pteval_t ret;
1076 1172
1077 if (sizeof(pteval_t) > sizeof(long)) 1173 if (sizeof(pteval_t) > sizeof(long))
1078 ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_val, 1174 ret = PVOP_CALLEE2(pteval_t, pv_mmu_ops.pte_val,
1079 pte.pte, (u64)pte.pte >> 32); 1175 pte.pte, (u64)pte.pte >> 32);
1080 else
1081 ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_val,
1082 pte.pte);
1083
1084 return ret;
1085}
1086
1087static inline pteval_t pte_flags(pte_t pte)
1088{
1089 pteval_t ret;
1090
1091 if (sizeof(pteval_t) > sizeof(long))
1092 ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_flags,
1093 pte.pte, (u64)pte.pte >> 32);
1094 else 1176 else
1095 ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_flags, 1177 ret = PVOP_CALLEE1(pteval_t, pv_mmu_ops.pte_val,
1096 pte.pte); 1178 pte.pte);
1097 1179
1098#ifdef CONFIG_PARAVIRT_DEBUG
1099 BUG_ON(ret & PTE_PFN_MASK);
1100#endif
1101 return ret; 1180 return ret;
1102} 1181}
1103 1182
@@ -1106,11 +1185,11 @@ static inline pgd_t __pgd(pgdval_t val)
1106 pgdval_t ret; 1185 pgdval_t ret;
1107 1186
1108 if (sizeof(pgdval_t) > sizeof(long)) 1187 if (sizeof(pgdval_t) > sizeof(long))
1109 ret = PVOP_CALL2(pgdval_t, pv_mmu_ops.make_pgd, 1188 ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.make_pgd,
1110 val, (u64)val >> 32); 1189 val, (u64)val >> 32);
1111 else 1190 else
1112 ret = PVOP_CALL1(pgdval_t, pv_mmu_ops.make_pgd, 1191 ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.make_pgd,
1113 val); 1192 val);
1114 1193
1115 return (pgd_t) { ret }; 1194 return (pgd_t) { ret };
1116} 1195}
@@ -1120,11 +1199,11 @@ static inline pgdval_t pgd_val(pgd_t pgd)
1120 pgdval_t ret; 1199 pgdval_t ret;
1121 1200
1122 if (sizeof(pgdval_t) > sizeof(long)) 1201 if (sizeof(pgdval_t) > sizeof(long))
1123 ret = PVOP_CALL2(pgdval_t, pv_mmu_ops.pgd_val, 1202 ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.pgd_val,
1124 pgd.pgd, (u64)pgd.pgd >> 32); 1203 pgd.pgd, (u64)pgd.pgd >> 32);
1125 else 1204 else
1126 ret = PVOP_CALL1(pgdval_t, pv_mmu_ops.pgd_val, 1205 ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.pgd_val,
1127 pgd.pgd); 1206 pgd.pgd);
1128 1207
1129 return ret; 1208 return ret;
1130} 1209}
@@ -1188,11 +1267,11 @@ static inline pmd_t __pmd(pmdval_t val)
1188 pmdval_t ret; 1267 pmdval_t ret;
1189 1268
1190 if (sizeof(pmdval_t) > sizeof(long)) 1269 if (sizeof(pmdval_t) > sizeof(long))
1191 ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.make_pmd, 1270 ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.make_pmd,
1192 val, (u64)val >> 32); 1271 val, (u64)val >> 32);
1193 else 1272 else
1194 ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.make_pmd, 1273 ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.make_pmd,
1195 val); 1274 val);
1196 1275
1197 return (pmd_t) { ret }; 1276 return (pmd_t) { ret };
1198} 1277}
@@ -1202,11 +1281,11 @@ static inline pmdval_t pmd_val(pmd_t pmd)
1202 pmdval_t ret; 1281 pmdval_t ret;
1203 1282
1204 if (sizeof(pmdval_t) > sizeof(long)) 1283 if (sizeof(pmdval_t) > sizeof(long))
1205 ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.pmd_val, 1284 ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.pmd_val,
1206 pmd.pmd, (u64)pmd.pmd >> 32); 1285 pmd.pmd, (u64)pmd.pmd >> 32);
1207 else 1286 else
1208 ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.pmd_val, 1287 ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.pmd_val,
1209 pmd.pmd); 1288 pmd.pmd);
1210 1289
1211 return ret; 1290 return ret;
1212} 1291}
@@ -1228,11 +1307,11 @@ static inline pud_t __pud(pudval_t val)
1228 pudval_t ret; 1307 pudval_t ret;
1229 1308
1230 if (sizeof(pudval_t) > sizeof(long)) 1309 if (sizeof(pudval_t) > sizeof(long))
1231 ret = PVOP_CALL2(pudval_t, pv_mmu_ops.make_pud, 1310 ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.make_pud,
1232 val, (u64)val >> 32); 1311 val, (u64)val >> 32);
1233 else 1312 else
1234 ret = PVOP_CALL1(pudval_t, pv_mmu_ops.make_pud, 1313 ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.make_pud,
1235 val); 1314 val);
1236 1315
1237 return (pud_t) { ret }; 1316 return (pud_t) { ret };
1238} 1317}
@@ -1242,11 +1321,11 @@ static inline pudval_t pud_val(pud_t pud)
1242 pudval_t ret; 1321 pudval_t ret;
1243 1322
1244 if (sizeof(pudval_t) > sizeof(long)) 1323 if (sizeof(pudval_t) > sizeof(long))
1245 ret = PVOP_CALL2(pudval_t, pv_mmu_ops.pud_val, 1324 ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.pud_val,
1246 pud.pud, (u64)pud.pud >> 32); 1325 pud.pud, (u64)pud.pud >> 32);
1247 else 1326 else
1248 ret = PVOP_CALL1(pudval_t, pv_mmu_ops.pud_val, 1327 ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.pud_val,
1249 pud.pud); 1328 pud.pud);
1250 1329
1251 return ret; 1330 return ret;
1252} 1331}
@@ -1387,9 +1466,10 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
1387} 1466}
1388 1467
1389void _paravirt_nop(void); 1468void _paravirt_nop(void);
1390#define paravirt_nop ((void *)_paravirt_nop) 1469u32 _paravirt_ident_32(u32);
1470u64 _paravirt_ident_64(u64);
1391 1471
1392void paravirt_use_bytelocks(void); 1472#define paravirt_nop ((void *)_paravirt_nop)
1393 1473
1394#ifdef CONFIG_SMP 1474#ifdef CONFIG_SMP
1395 1475
@@ -1439,12 +1519,37 @@ extern struct paravirt_patch_site __parainstructions[],
1439 __parainstructions_end[]; 1519 __parainstructions_end[];
1440 1520
1441#ifdef CONFIG_X86_32 1521#ifdef CONFIG_X86_32
1442#define PV_SAVE_REGS "pushl %%ecx; pushl %%edx;" 1522#define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
1443#define PV_RESTORE_REGS "popl %%edx; popl %%ecx" 1523#define PV_RESTORE_REGS "popl %edx; popl %ecx;"
1524
1525/* save and restore all caller-save registers, except return value */
1526#define PV_SAVE_ALL_CALLER_REGS "pushl %ecx;"
1527#define PV_RESTORE_ALL_CALLER_REGS "popl %ecx;"
1528
1444#define PV_FLAGS_ARG "0" 1529#define PV_FLAGS_ARG "0"
1445#define PV_EXTRA_CLOBBERS 1530#define PV_EXTRA_CLOBBERS
1446#define PV_VEXTRA_CLOBBERS 1531#define PV_VEXTRA_CLOBBERS
1447#else 1532#else
1533/* save and restore all caller-save registers, except return value */
1534#define PV_SAVE_ALL_CALLER_REGS \
1535 "push %rcx;" \
1536 "push %rdx;" \
1537 "push %rsi;" \
1538 "push %rdi;" \
1539 "push %r8;" \
1540 "push %r9;" \
1541 "push %r10;" \
1542 "push %r11;"
1543#define PV_RESTORE_ALL_CALLER_REGS \
1544 "pop %r11;" \
1545 "pop %r10;" \
1546 "pop %r9;" \
1547 "pop %r8;" \
1548 "pop %rdi;" \
1549 "pop %rsi;" \
1550 "pop %rdx;" \
1551 "pop %rcx;"
1552
1448/* We save some registers, but all of them, that's too much. We clobber all 1553/* We save some registers, but all of them, that's too much. We clobber all
1449 * caller saved registers but the argument parameter */ 1554 * caller saved registers but the argument parameter */
1450#define PV_SAVE_REGS "pushq %%rdi;" 1555#define PV_SAVE_REGS "pushq %%rdi;"
@@ -1454,52 +1559,76 @@ extern struct paravirt_patch_site __parainstructions[],
1454#define PV_FLAGS_ARG "D" 1559#define PV_FLAGS_ARG "D"
1455#endif 1560#endif
1456 1561
1562/*
1563 * Generate a thunk around a function which saves all caller-save
1564 * registers except for the return value. This allows C functions to
1565 * be called from assembler code where fewer than normal registers are
1566 * available. It may also help code generation around calls from C
1567 * code if the common case doesn't use many registers.
1568 *
1569 * When a callee is wrapped in a thunk, the caller can assume that all
1570 * arg regs and all scratch registers are preserved across the
1571 * call. The return value in rax/eax will not be saved, even for void
1572 * functions.
1573 */
1574#define PV_CALLEE_SAVE_REGS_THUNK(func) \
1575 extern typeof(func) __raw_callee_save_##func; \
1576 static void *__##func##__ __used = func; \
1577 \
1578 asm(".pushsection .text;" \
1579 "__raw_callee_save_" #func ": " \
1580 PV_SAVE_ALL_CALLER_REGS \
1581 "call " #func ";" \
1582 PV_RESTORE_ALL_CALLER_REGS \
1583 "ret;" \
1584 ".popsection")
1585
1586/* Get a reference to a callee-save function */
1587#define PV_CALLEE_SAVE(func) \
1588 ((struct paravirt_callee_save) { __raw_callee_save_##func })
1589
1590/* Promise that "func" already uses the right calling convention */
1591#define __PV_IS_CALLEE_SAVE(func) \
1592 ((struct paravirt_callee_save) { func })
1593
1457static inline unsigned long __raw_local_save_flags(void) 1594static inline unsigned long __raw_local_save_flags(void)
1458{ 1595{
1459 unsigned long f; 1596 unsigned long f;
1460 1597
1461 asm volatile(paravirt_alt(PV_SAVE_REGS 1598 asm volatile(paravirt_alt(PARAVIRT_CALL)
1462 PARAVIRT_CALL
1463 PV_RESTORE_REGS)
1464 : "=a"(f) 1599 : "=a"(f)
1465 : paravirt_type(pv_irq_ops.save_fl), 1600 : paravirt_type(pv_irq_ops.save_fl),
1466 paravirt_clobber(CLBR_EAX) 1601 paravirt_clobber(CLBR_EAX)
1467 : "memory", "cc" PV_VEXTRA_CLOBBERS); 1602 : "memory", "cc");
1468 return f; 1603 return f;
1469} 1604}
1470 1605
1471static inline void raw_local_irq_restore(unsigned long f) 1606static inline void raw_local_irq_restore(unsigned long f)
1472{ 1607{
1473 asm volatile(paravirt_alt(PV_SAVE_REGS 1608 asm volatile(paravirt_alt(PARAVIRT_CALL)
1474 PARAVIRT_CALL
1475 PV_RESTORE_REGS)
1476 : "=a"(f) 1609 : "=a"(f)
1477 : PV_FLAGS_ARG(f), 1610 : PV_FLAGS_ARG(f),
1478 paravirt_type(pv_irq_ops.restore_fl), 1611 paravirt_type(pv_irq_ops.restore_fl),
1479 paravirt_clobber(CLBR_EAX) 1612 paravirt_clobber(CLBR_EAX)
1480 : "memory", "cc" PV_EXTRA_CLOBBERS); 1613 : "memory", "cc");
1481} 1614}
1482 1615
1483static inline void raw_local_irq_disable(void) 1616static inline void raw_local_irq_disable(void)
1484{ 1617{
1485 asm volatile(paravirt_alt(PV_SAVE_REGS 1618 asm volatile(paravirt_alt(PARAVIRT_CALL)
1486 PARAVIRT_CALL
1487 PV_RESTORE_REGS)
1488 : 1619 :
1489 : paravirt_type(pv_irq_ops.irq_disable), 1620 : paravirt_type(pv_irq_ops.irq_disable),
1490 paravirt_clobber(CLBR_EAX) 1621 paravirt_clobber(CLBR_EAX)
1491 : "memory", "eax", "cc" PV_EXTRA_CLOBBERS); 1622 : "memory", "eax", "cc");
1492} 1623}
1493 1624
1494static inline void raw_local_irq_enable(void) 1625static inline void raw_local_irq_enable(void)
1495{ 1626{
1496 asm volatile(paravirt_alt(PV_SAVE_REGS 1627 asm volatile(paravirt_alt(PARAVIRT_CALL)
1497 PARAVIRT_CALL
1498 PV_RESTORE_REGS)
1499 : 1628 :
1500 : paravirt_type(pv_irq_ops.irq_enable), 1629 : paravirt_type(pv_irq_ops.irq_enable),
1501 paravirt_clobber(CLBR_EAX) 1630 paravirt_clobber(CLBR_EAX)
1502 : "memory", "eax", "cc" PV_EXTRA_CLOBBERS); 1631 : "memory", "eax", "cc");
1503} 1632}
1504 1633
1505static inline unsigned long __raw_local_irq_save(void) 1634static inline unsigned long __raw_local_irq_save(void)
@@ -1542,33 +1671,49 @@ static inline unsigned long __raw_local_irq_save(void)
1542 .popsection 1671 .popsection
1543 1672
1544 1673
1674#define COND_PUSH(set, mask, reg) \
1675 .if ((~(set)) & mask); push %reg; .endif
1676#define COND_POP(set, mask, reg) \
1677 .if ((~(set)) & mask); pop %reg; .endif
1678
1545#ifdef CONFIG_X86_64 1679#ifdef CONFIG_X86_64
1546#define PV_SAVE_REGS \ 1680
1547 push %rax; \ 1681#define PV_SAVE_REGS(set) \
1548 push %rcx; \ 1682 COND_PUSH(set, CLBR_RAX, rax); \
1549 push %rdx; \ 1683 COND_PUSH(set, CLBR_RCX, rcx); \
1550 push %rsi; \ 1684 COND_PUSH(set, CLBR_RDX, rdx); \
1551 push %rdi; \ 1685 COND_PUSH(set, CLBR_RSI, rsi); \
1552 push %r8; \ 1686 COND_PUSH(set, CLBR_RDI, rdi); \
1553 push %r9; \ 1687 COND_PUSH(set, CLBR_R8, r8); \
1554 push %r10; \ 1688 COND_PUSH(set, CLBR_R9, r9); \
1555 push %r11 1689 COND_PUSH(set, CLBR_R10, r10); \
1556#define PV_RESTORE_REGS \ 1690 COND_PUSH(set, CLBR_R11, r11)
1557 pop %r11; \ 1691#define PV_RESTORE_REGS(set) \
1558 pop %r10; \ 1692 COND_POP(set, CLBR_R11, r11); \
1559 pop %r9; \ 1693 COND_POP(set, CLBR_R10, r10); \
1560 pop %r8; \ 1694 COND_POP(set, CLBR_R9, r9); \
1561 pop %rdi; \ 1695 COND_POP(set, CLBR_R8, r8); \
1562 pop %rsi; \ 1696 COND_POP(set, CLBR_RDI, rdi); \
1563 pop %rdx; \ 1697 COND_POP(set, CLBR_RSI, rsi); \
1564 pop %rcx; \ 1698 COND_POP(set, CLBR_RDX, rdx); \
1565 pop %rax 1699 COND_POP(set, CLBR_RCX, rcx); \
1700 COND_POP(set, CLBR_RAX, rax)
1701
1566#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8) 1702#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8)
1567#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8) 1703#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
1568#define PARA_INDIRECT(addr) *addr(%rip) 1704#define PARA_INDIRECT(addr) *addr(%rip)
1569#else 1705#else
1570#define PV_SAVE_REGS pushl %eax; pushl %edi; pushl %ecx; pushl %edx 1706#define PV_SAVE_REGS(set) \
1571#define PV_RESTORE_REGS popl %edx; popl %ecx; popl %edi; popl %eax 1707 COND_PUSH(set, CLBR_EAX, eax); \
1708 COND_PUSH(set, CLBR_EDI, edi); \
1709 COND_PUSH(set, CLBR_ECX, ecx); \
1710 COND_PUSH(set, CLBR_EDX, edx)
1711#define PV_RESTORE_REGS(set) \
1712 COND_POP(set, CLBR_EDX, edx); \
1713 COND_POP(set, CLBR_ECX, ecx); \
1714 COND_POP(set, CLBR_EDI, edi); \
1715 COND_POP(set, CLBR_EAX, eax)
1716
1572#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4) 1717#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
1573#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4) 1718#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
1574#define PARA_INDIRECT(addr) *%cs:addr 1719#define PARA_INDIRECT(addr) *%cs:addr
@@ -1580,15 +1725,15 @@ static inline unsigned long __raw_local_irq_save(void)
1580 1725
1581#define DISABLE_INTERRUPTS(clobbers) \ 1726#define DISABLE_INTERRUPTS(clobbers) \
1582 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \ 1727 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
1583 PV_SAVE_REGS; \ 1728 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
1584 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \ 1729 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \
1585 PV_RESTORE_REGS;) \ 1730 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
1586 1731
1587#define ENABLE_INTERRUPTS(clobbers) \ 1732#define ENABLE_INTERRUPTS(clobbers) \
1588 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \ 1733 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \
1589 PV_SAVE_REGS; \ 1734 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
1590 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \ 1735 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \
1591 PV_RESTORE_REGS;) 1736 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
1592 1737
1593#define USERGS_SYSRET32 \ 1738#define USERGS_SYSRET32 \
1594 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32), \ 1739 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32), \
@@ -1618,11 +1763,15 @@ static inline unsigned long __raw_local_irq_save(void)
1618 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \ 1763 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
1619 swapgs) 1764 swapgs)
1620 1765
1766/*
1767 * Note: swapgs is very special, and in practise is either going to be
1768 * implemented with a single "swapgs" instruction or something very
1769 * special. Either way, we don't need to save any registers for
1770 * it.
1771 */
1621#define SWAPGS \ 1772#define SWAPGS \
1622 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \ 1773 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
1623 PV_SAVE_REGS; \ 1774 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs) \
1624 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs); \
1625 PV_RESTORE_REGS \
1626 ) 1775 )
1627 1776
1628#define GET_CR2_INTO_RCX \ 1777#define GET_CR2_INTO_RCX \
diff --git a/arch/x86/include/asm/pat.h b/arch/x86/include/asm/pat.h
index b8493b3b9890..9709fdff6615 100644
--- a/arch/x86/include/asm/pat.h
+++ b/arch/x86/include/asm/pat.h
@@ -5,10 +5,8 @@
5 5
6#ifdef CONFIG_X86_PAT 6#ifdef CONFIG_X86_PAT
7extern int pat_enabled; 7extern int pat_enabled;
8extern void validate_pat_support(struct cpuinfo_x86 *c);
9#else 8#else
10static const int pat_enabled; 9static const int pat_enabled;
11static inline void validate_pat_support(struct cpuinfo_x86 *c) { }
12#endif 10#endif
13 11
14extern void pat_init(void); 12extern void pat_init(void);
@@ -17,6 +15,4 @@ extern int reserve_memtype(u64 start, u64 end,
17 unsigned long req_type, unsigned long *ret_type); 15 unsigned long req_type, unsigned long *ret_type);
18extern int free_memtype(u64 start, u64 end); 16extern int free_memtype(u64 start, u64 end);
19 17
20extern void pat_disable(char *reason);
21
22#endif /* _ASM_X86_PAT_H */ 18#endif /* _ASM_X86_PAT_H */
diff --git a/arch/x86/include/asm/mach-default/pci-functions.h b/arch/x86/include/asm/pci-functions.h
index ed0bab427354..ed0bab427354 100644
--- a/arch/x86/include/asm/mach-default/pci-functions.h
+++ b/arch/x86/include/asm/pci-functions.h
diff --git a/arch/x86/include/asm/pda.h b/arch/x86/include/asm/pda.h
deleted file mode 100644
index 2fbfff88df37..000000000000
--- a/arch/x86/include/asm/pda.h
+++ /dev/null
@@ -1,137 +0,0 @@
1#ifndef _ASM_X86_PDA_H
2#define _ASM_X86_PDA_H
3
4#ifndef __ASSEMBLY__
5#include <linux/stddef.h>
6#include <linux/types.h>
7#include <linux/cache.h>
8#include <asm/page.h>
9
10/* Per processor datastructure. %gs points to it while the kernel runs */
11struct x8664_pda {
12 struct task_struct *pcurrent; /* 0 Current process */
13 unsigned long data_offset; /* 8 Per cpu data offset from linker
14 address */
15 unsigned long kernelstack; /* 16 top of kernel stack for current */
16 unsigned long oldrsp; /* 24 user rsp for system call */
17 int irqcount; /* 32 Irq nesting counter. Starts -1 */
18 unsigned int cpunumber; /* 36 Logical CPU number */
19#ifdef CONFIG_CC_STACKPROTECTOR
20 unsigned long stack_canary; /* 40 stack canary value */
21 /* gcc-ABI: this canary MUST be at
22 offset 40!!! */
23#endif
24 char *irqstackptr;
25 short nodenumber; /* number of current node (32k max) */
26 short in_bootmem; /* pda lives in bootmem */
27 unsigned int __softirq_pending;
28 unsigned int __nmi_count; /* number of NMI on this CPUs */
29 short mmu_state;
30 short isidle;
31 struct mm_struct *active_mm;
32 unsigned apic_timer_irqs;
33 unsigned irq0_irqs;
34 unsigned irq_resched_count;
35 unsigned irq_call_count;
36 unsigned irq_tlb_count;
37 unsigned irq_thermal_count;
38 unsigned irq_threshold_count;
39 unsigned irq_spurious_count;
40} ____cacheline_aligned_in_smp;
41
42extern struct x8664_pda **_cpu_pda;
43extern void pda_init(int);
44
45#define cpu_pda(i) (_cpu_pda[i])
46
47/*
48 * There is no fast way to get the base address of the PDA, all the accesses
49 * have to mention %fs/%gs. So it needs to be done this Torvaldian way.
50 */
51extern void __bad_pda_field(void) __attribute__((noreturn));
52
53/*
54 * proxy_pda doesn't actually exist, but tell gcc it is accessed for
55 * all PDA accesses so it gets read/write dependencies right.
56 */
57extern struct x8664_pda _proxy_pda;
58
59#define pda_offset(field) offsetof(struct x8664_pda, field)
60
61#define pda_to_op(op, field, val) \
62do { \
63 typedef typeof(_proxy_pda.field) T__; \
64 if (0) { T__ tmp__; tmp__ = (val); } /* type checking */ \
65 switch (sizeof(_proxy_pda.field)) { \
66 case 2: \
67 asm(op "w %1,%%gs:%c2" : \
68 "+m" (_proxy_pda.field) : \
69 "ri" ((T__)val), \
70 "i"(pda_offset(field))); \
71 break; \
72 case 4: \
73 asm(op "l %1,%%gs:%c2" : \
74 "+m" (_proxy_pda.field) : \
75 "ri" ((T__)val), \
76 "i" (pda_offset(field))); \
77 break; \
78 case 8: \
79 asm(op "q %1,%%gs:%c2": \
80 "+m" (_proxy_pda.field) : \
81 "ri" ((T__)val), \
82 "i"(pda_offset(field))); \
83 break; \
84 default: \
85 __bad_pda_field(); \
86 } \
87} while (0)
88
89#define pda_from_op(op, field) \
90({ \
91 typeof(_proxy_pda.field) ret__; \
92 switch (sizeof(_proxy_pda.field)) { \
93 case 2: \
94 asm(op "w %%gs:%c1,%0" : \
95 "=r" (ret__) : \
96 "i" (pda_offset(field)), \
97 "m" (_proxy_pda.field)); \
98 break; \
99 case 4: \
100 asm(op "l %%gs:%c1,%0": \
101 "=r" (ret__): \
102 "i" (pda_offset(field)), \
103 "m" (_proxy_pda.field)); \
104 break; \
105 case 8: \
106 asm(op "q %%gs:%c1,%0": \
107 "=r" (ret__) : \
108 "i" (pda_offset(field)), \
109 "m" (_proxy_pda.field)); \
110 break; \
111 default: \
112 __bad_pda_field(); \
113 } \
114 ret__; \
115})
116
117#define read_pda(field) pda_from_op("mov", field)
118#define write_pda(field, val) pda_to_op("mov", field, val)
119#define add_pda(field, val) pda_to_op("add", field, val)
120#define sub_pda(field, val) pda_to_op("sub", field, val)
121#define or_pda(field, val) pda_to_op("or", field, val)
122
123/* This is not atomic against other CPUs -- CPU preemption needs to be off */
124#define test_and_clear_bit_pda(bit, field) \
125({ \
126 int old__; \
127 asm volatile("btr %2,%%gs:%c3\n\tsbbl %0,%0" \
128 : "=r" (old__), "+m" (_proxy_pda.field) \
129 : "dIr" (bit), "i" (pda_offset(field)) : "memory");\
130 old__; \
131})
132
133#endif
134
135#define PDA_STACKOFFSET (5*8)
136
137#endif /* _ASM_X86_PDA_H */
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index ece72053ba63..aee103b26d01 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -2,53 +2,12 @@
2#define _ASM_X86_PERCPU_H 2#define _ASM_X86_PERCPU_H
3 3
4#ifdef CONFIG_X86_64 4#ifdef CONFIG_X86_64
5#include <linux/compiler.h> 5#define __percpu_seg gs
6 6#define __percpu_mov_op movq
7/* Same as asm-generic/percpu.h, except that we store the per cpu offset 7#else
8 in the PDA. Longer term the PDA and every per cpu variable 8#define __percpu_seg fs
9 should be just put into a single section and referenced directly 9#define __percpu_mov_op movl
10 from %gs */
11
12#ifdef CONFIG_SMP
13#include <asm/pda.h>
14
15#define __per_cpu_offset(cpu) (cpu_pda(cpu)->data_offset)
16#define __my_cpu_offset read_pda(data_offset)
17
18#define per_cpu_offset(x) (__per_cpu_offset(x))
19
20#endif 10#endif
21#include <asm-generic/percpu.h>
22
23DECLARE_PER_CPU(struct x8664_pda, pda);
24
25/*
26 * These are supposed to be implemented as a single instruction which
27 * operates on the per-cpu data base segment. x86-64 doesn't have
28 * that yet, so this is a fairly inefficient workaround for the
29 * meantime. The single instruction is atomic with respect to
30 * preemption and interrupts, so we need to explicitly disable
31 * interrupts here to achieve the same effect. However, because it
32 * can be used from within interrupt-disable/enable, we can't actually
33 * disable interrupts; disabling preemption is enough.
34 */
35#define x86_read_percpu(var) \
36 ({ \
37 typeof(per_cpu_var(var)) __tmp; \
38 preempt_disable(); \
39 __tmp = __get_cpu_var(var); \
40 preempt_enable(); \
41 __tmp; \
42 })
43
44#define x86_write_percpu(var, val) \
45 do { \
46 preempt_disable(); \
47 __get_cpu_var(var) = (val); \
48 preempt_enable(); \
49 } while(0)
50
51#else /* CONFIG_X86_64 */
52 11
53#ifdef __ASSEMBLY__ 12#ifdef __ASSEMBLY__
54 13
@@ -65,47 +24,48 @@ DECLARE_PER_CPU(struct x8664_pda, pda);
65 * PER_CPU(cpu_gdt_descr, %ebx) 24 * PER_CPU(cpu_gdt_descr, %ebx)
66 */ 25 */
67#ifdef CONFIG_SMP 26#ifdef CONFIG_SMP
68#define PER_CPU(var, reg) \ 27#define PER_CPU(var, reg) \
69 movl %fs:per_cpu__##this_cpu_off, reg; \ 28 __percpu_mov_op %__percpu_seg:per_cpu__this_cpu_off, reg; \
70 lea per_cpu__##var(reg), reg 29 lea per_cpu__##var(reg), reg
71#define PER_CPU_VAR(var) %fs:per_cpu__##var 30#define PER_CPU_VAR(var) %__percpu_seg:per_cpu__##var
72#else /* ! SMP */ 31#else /* ! SMP */
73#define PER_CPU(var, reg) \ 32#define PER_CPU(var, reg) \
74 movl $per_cpu__##var, reg 33 __percpu_mov_op $per_cpu__##var, reg
75#define PER_CPU_VAR(var) per_cpu__##var 34#define PER_CPU_VAR(var) per_cpu__##var
76#endif /* SMP */ 35#endif /* SMP */
77 36
37#ifdef CONFIG_X86_64_SMP
38#define INIT_PER_CPU_VAR(var) init_per_cpu__##var
39#else
40#define INIT_PER_CPU_VAR(var) per_cpu__##var
41#endif
42
78#else /* ...!ASSEMBLY */ 43#else /* ...!ASSEMBLY */
79 44
45#include <linux/stringify.h>
46
47#ifdef CONFIG_SMP
48#define __percpu_arg(x) "%%"__stringify(__percpu_seg)":%P" #x
49#define __my_cpu_offset percpu_read(this_cpu_off)
50#else
51#define __percpu_arg(x) "%" #x
52#endif
53
80/* 54/*
81 * PER_CPU finds an address of a per-cpu variable. 55 * Initialized pointers to per-cpu variables needed for the boot
56 * processor need to use these macros to get the proper address
57 * offset from __per_cpu_load on SMP.
82 * 58 *
83 * Args: 59 * There also must be an entry in vmlinux_64.lds.S
84 * var - variable name
85 * cpu - 32bit register containing the current CPU number
86 *
87 * The resulting address is stored in the "cpu" argument.
88 *
89 * Example:
90 * PER_CPU(cpu_gdt_descr, %ebx)
91 */ 60 */
92#ifdef CONFIG_SMP 61#define DECLARE_INIT_PER_CPU(var) \
93 62 extern typeof(per_cpu_var(var)) init_per_cpu_var(var)
94#define __my_cpu_offset x86_read_percpu(this_cpu_off)
95
96/* fs segment starts at (positive) offset == __per_cpu_offset[cpu] */
97#define __percpu_seg "%%fs:"
98
99#else /* !SMP */
100
101#define __percpu_seg ""
102
103#endif /* SMP */
104
105#include <asm-generic/percpu.h>
106 63
107/* We can use this directly for local CPU (faster). */ 64#ifdef CONFIG_X86_64_SMP
108DECLARE_PER_CPU(unsigned long, this_cpu_off); 65#define init_per_cpu_var(var) init_per_cpu__##var
66#else
67#define init_per_cpu_var(var) per_cpu_var(var)
68#endif
109 69
110/* For arch-specific code, we can use direct single-insn ops (they 70/* For arch-specific code, we can use direct single-insn ops (they
111 * don't give an lvalue though). */ 71 * don't give an lvalue though). */
@@ -120,20 +80,25 @@ do { \
120 } \ 80 } \
121 switch (sizeof(var)) { \ 81 switch (sizeof(var)) { \
122 case 1: \ 82 case 1: \
123 asm(op "b %1,"__percpu_seg"%0" \ 83 asm(op "b %1,"__percpu_arg(0) \
124 : "+m" (var) \ 84 : "+m" (var) \
125 : "ri" ((T__)val)); \ 85 : "ri" ((T__)val)); \
126 break; \ 86 break; \
127 case 2: \ 87 case 2: \
128 asm(op "w %1,"__percpu_seg"%0" \ 88 asm(op "w %1,"__percpu_arg(0) \
129 : "+m" (var) \ 89 : "+m" (var) \
130 : "ri" ((T__)val)); \ 90 : "ri" ((T__)val)); \
131 break; \ 91 break; \
132 case 4: \ 92 case 4: \
133 asm(op "l %1,"__percpu_seg"%0" \ 93 asm(op "l %1,"__percpu_arg(0) \
134 : "+m" (var) \ 94 : "+m" (var) \
135 : "ri" ((T__)val)); \ 95 : "ri" ((T__)val)); \
136 break; \ 96 break; \
97 case 8: \
98 asm(op "q %1,"__percpu_arg(0) \
99 : "+m" (var) \
100 : "re" ((T__)val)); \
101 break; \
137 default: __bad_percpu_size(); \ 102 default: __bad_percpu_size(); \
138 } \ 103 } \
139} while (0) 104} while (0)
@@ -143,17 +108,22 @@ do { \
143 typeof(var) ret__; \ 108 typeof(var) ret__; \
144 switch (sizeof(var)) { \ 109 switch (sizeof(var)) { \
145 case 1: \ 110 case 1: \
146 asm(op "b "__percpu_seg"%1,%0" \ 111 asm(op "b "__percpu_arg(1)",%0" \
147 : "=r" (ret__) \ 112 : "=r" (ret__) \
148 : "m" (var)); \ 113 : "m" (var)); \
149 break; \ 114 break; \
150 case 2: \ 115 case 2: \
151 asm(op "w "__percpu_seg"%1,%0" \ 116 asm(op "w "__percpu_arg(1)",%0" \
152 : "=r" (ret__) \ 117 : "=r" (ret__) \
153 : "m" (var)); \ 118 : "m" (var)); \
154 break; \ 119 break; \
155 case 4: \ 120 case 4: \
156 asm(op "l "__percpu_seg"%1,%0" \ 121 asm(op "l "__percpu_arg(1)",%0" \
122 : "=r" (ret__) \
123 : "m" (var)); \
124 break; \
125 case 8: \
126 asm(op "q "__percpu_arg(1)",%0" \
157 : "=r" (ret__) \ 127 : "=r" (ret__) \
158 : "m" (var)); \ 128 : "m" (var)); \
159 break; \ 129 break; \
@@ -162,13 +132,30 @@ do { \
162 ret__; \ 132 ret__; \
163}) 133})
164 134
165#define x86_read_percpu(var) percpu_from_op("mov", per_cpu__##var) 135#define percpu_read(var) percpu_from_op("mov", per_cpu__##var)
166#define x86_write_percpu(var, val) percpu_to_op("mov", per_cpu__##var, val) 136#define percpu_write(var, val) percpu_to_op("mov", per_cpu__##var, val)
167#define x86_add_percpu(var, val) percpu_to_op("add", per_cpu__##var, val) 137#define percpu_add(var, val) percpu_to_op("add", per_cpu__##var, val)
168#define x86_sub_percpu(var, val) percpu_to_op("sub", per_cpu__##var, val) 138#define percpu_sub(var, val) percpu_to_op("sub", per_cpu__##var, val)
169#define x86_or_percpu(var, val) percpu_to_op("or", per_cpu__##var, val) 139#define percpu_and(var, val) percpu_to_op("and", per_cpu__##var, val)
140#define percpu_or(var, val) percpu_to_op("or", per_cpu__##var, val)
141#define percpu_xor(var, val) percpu_to_op("xor", per_cpu__##var, val)
142
143/* This is not atomic against other CPUs -- CPU preemption needs to be off */
144#define x86_test_and_clear_bit_percpu(bit, var) \
145({ \
146 int old__; \
147 asm volatile("btr %2,"__percpu_arg(1)"\n\tsbbl %0,%0" \
148 : "=r" (old__), "+m" (per_cpu__##var) \
149 : "dIr" (bit)); \
150 old__; \
151})
152
153#include <asm-generic/percpu.h>
154
155/* We can use this directly for local CPU (faster). */
156DECLARE_PER_CPU(unsigned long, this_cpu_off);
157
170#endif /* !__ASSEMBLY__ */ 158#endif /* !__ASSEMBLY__ */
171#endif /* !CONFIG_X86_64 */
172 159
173#ifdef CONFIG_SMP 160#ifdef CONFIG_SMP
174 161
@@ -195,9 +182,9 @@ do { \
195#define early_per_cpu_ptr(_name) (_name##_early_ptr) 182#define early_per_cpu_ptr(_name) (_name##_early_ptr)
196#define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx]) 183#define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx])
197#define early_per_cpu(_name, _cpu) \ 184#define early_per_cpu(_name, _cpu) \
198 (early_per_cpu_ptr(_name) ? \ 185 *(early_per_cpu_ptr(_name) ? \
199 early_per_cpu_ptr(_name)[_cpu] : \ 186 &early_per_cpu_ptr(_name)[_cpu] : \
200 per_cpu(_name, _cpu)) 187 &per_cpu(_name, _cpu))
201 188
202#else /* !CONFIG_SMP */ 189#else /* !CONFIG_SMP */
203#define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \ 190#define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \
diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
index e0d199fe1d83..c1774ac9da7a 100644
--- a/arch/x86/include/asm/pgtable-2level.h
+++ b/arch/x86/include/asm/pgtable-2level.h
@@ -53,8 +53,6 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
53#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp) 53#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
54#endif 54#endif
55 55
56#define pte_none(x) (!(x).pte_low)
57
58/* 56/*
59 * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken, 57 * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken,
60 * split up the 29 bits of offset into this range: 58 * split up the 29 bits of offset into this range:
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
index 447da43cddb3..3f13cdf61156 100644
--- a/arch/x86/include/asm/pgtable-3level.h
+++ b/arch/x86/include/asm/pgtable-3level.h
@@ -18,21 +18,6 @@
18 printk("%s:%d: bad pgd %p(%016Lx).\n", \ 18 printk("%s:%d: bad pgd %p(%016Lx).\n", \
19 __FILE__, __LINE__, &(e), pgd_val(e)) 19 __FILE__, __LINE__, &(e), pgd_val(e))
20 20
21static inline int pud_none(pud_t pud)
22{
23 return pud_val(pud) == 0;
24}
25
26static inline int pud_bad(pud_t pud)
27{
28 return (pud_val(pud) & ~(PTE_PFN_MASK | _KERNPG_TABLE | _PAGE_USER)) != 0;
29}
30
31static inline int pud_present(pud_t pud)
32{
33 return pud_val(pud) & _PAGE_PRESENT;
34}
35
36/* Rules for using set_pte: the pte being assigned *must* be 21/* Rules for using set_pte: the pte being assigned *must* be
37 * either not present or in a state where the hardware will 22 * either not present or in a state where the hardware will
38 * not attempt to update the pte. In places where this is 23 * not attempt to update the pte. In places where this is
@@ -120,15 +105,6 @@ static inline void pud_clear(pud_t *pudp)
120 write_cr3(pgd); 105 write_cr3(pgd);
121} 106}
122 107
123#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
124
125#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PTE_PFN_MASK))
126
127
128/* Find an entry in the second-level page table.. */
129#define pmd_offset(pud, address) ((pmd_t *)pud_page_vaddr(*(pud)) + \
130 pmd_index(address))
131
132#ifdef CONFIG_SMP 108#ifdef CONFIG_SMP
133static inline pte_t native_ptep_get_and_clear(pte_t *ptep) 109static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
134{ 110{
@@ -145,17 +121,6 @@ static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
145#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp) 121#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
146#endif 122#endif
147 123
148#define __HAVE_ARCH_PTE_SAME
149static inline int pte_same(pte_t a, pte_t b)
150{
151 return a.pte_low == b.pte_low && a.pte_high == b.pte_high;
152}
153
154static inline int pte_none(pte_t pte)
155{
156 return !pte.pte_low && !pte.pte_high;
157}
158
159/* 124/*
160 * Bits 0, 6 and 7 are taken in the low part of the pte, 125 * Bits 0, 6 and 7 are taken in the low part of the pte,
161 * put the 32 bits of offset into the high part. 126 * put the 32 bits of offset into the high part.
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 4f5af8447d54..8fef0f6bfbb6 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -1,6 +1,8 @@
1#ifndef _ASM_X86_PGTABLE_H 1#ifndef _ASM_X86_PGTABLE_H
2#define _ASM_X86_PGTABLE_H 2#define _ASM_X86_PGTABLE_H
3 3
4#include <asm/page.h>
5
4#define FIRST_USER_ADDRESS 0 6#define FIRST_USER_ADDRESS 0
5 7
6#define _PAGE_BIT_PRESENT 0 /* is present */ 8#define _PAGE_BIT_PRESENT 0 /* is present */
@@ -236,68 +238,82 @@ static inline unsigned long pte_pfn(pte_t pte)
236 238
237static inline int pmd_large(pmd_t pte) 239static inline int pmd_large(pmd_t pte)
238{ 240{
239 return (pmd_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) == 241 return (pmd_flags(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
240 (_PAGE_PSE | _PAGE_PRESENT); 242 (_PAGE_PSE | _PAGE_PRESENT);
241} 243}
242 244
245static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
246{
247 pteval_t v = native_pte_val(pte);
248
249 return native_make_pte(v | set);
250}
251
252static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
253{
254 pteval_t v = native_pte_val(pte);
255
256 return native_make_pte(v & ~clear);
257}
258
243static inline pte_t pte_mkclean(pte_t pte) 259static inline pte_t pte_mkclean(pte_t pte)
244{ 260{
245 return __pte(pte_val(pte) & ~_PAGE_DIRTY); 261 return pte_clear_flags(pte, _PAGE_DIRTY);
246} 262}
247 263
248static inline pte_t pte_mkold(pte_t pte) 264static inline pte_t pte_mkold(pte_t pte)
249{ 265{
250 return __pte(pte_val(pte) & ~_PAGE_ACCESSED); 266 return pte_clear_flags(pte, _PAGE_ACCESSED);
251} 267}
252 268
253static inline pte_t pte_wrprotect(pte_t pte) 269static inline pte_t pte_wrprotect(pte_t pte)
254{ 270{
255 return __pte(pte_val(pte) & ~_PAGE_RW); 271 return pte_clear_flags(pte, _PAGE_RW);
256} 272}
257 273
258static inline pte_t pte_mkexec(pte_t pte) 274static inline pte_t pte_mkexec(pte_t pte)
259{ 275{
260 return __pte(pte_val(pte) & ~_PAGE_NX); 276 return pte_clear_flags(pte, _PAGE_NX);
261} 277}
262 278
263static inline pte_t pte_mkdirty(pte_t pte) 279static inline pte_t pte_mkdirty(pte_t pte)
264{ 280{
265 return __pte(pte_val(pte) | _PAGE_DIRTY); 281 return pte_set_flags(pte, _PAGE_DIRTY);
266} 282}
267 283
268static inline pte_t pte_mkyoung(pte_t pte) 284static inline pte_t pte_mkyoung(pte_t pte)
269{ 285{
270 return __pte(pte_val(pte) | _PAGE_ACCESSED); 286 return pte_set_flags(pte, _PAGE_ACCESSED);
271} 287}
272 288
273static inline pte_t pte_mkwrite(pte_t pte) 289static inline pte_t pte_mkwrite(pte_t pte)
274{ 290{
275 return __pte(pte_val(pte) | _PAGE_RW); 291 return pte_set_flags(pte, _PAGE_RW);
276} 292}
277 293
278static inline pte_t pte_mkhuge(pte_t pte) 294static inline pte_t pte_mkhuge(pte_t pte)
279{ 295{
280 return __pte(pte_val(pte) | _PAGE_PSE); 296 return pte_set_flags(pte, _PAGE_PSE);
281} 297}
282 298
283static inline pte_t pte_clrhuge(pte_t pte) 299static inline pte_t pte_clrhuge(pte_t pte)
284{ 300{
285 return __pte(pte_val(pte) & ~_PAGE_PSE); 301 return pte_clear_flags(pte, _PAGE_PSE);
286} 302}
287 303
288static inline pte_t pte_mkglobal(pte_t pte) 304static inline pte_t pte_mkglobal(pte_t pte)
289{ 305{
290 return __pte(pte_val(pte) | _PAGE_GLOBAL); 306 return pte_set_flags(pte, _PAGE_GLOBAL);
291} 307}
292 308
293static inline pte_t pte_clrglobal(pte_t pte) 309static inline pte_t pte_clrglobal(pte_t pte)
294{ 310{
295 return __pte(pte_val(pte) & ~_PAGE_GLOBAL); 311 return pte_clear_flags(pte, _PAGE_GLOBAL);
296} 312}
297 313
298static inline pte_t pte_mkspecial(pte_t pte) 314static inline pte_t pte_mkspecial(pte_t pte)
299{ 315{
300 return __pte(pte_val(pte) | _PAGE_SPECIAL); 316 return pte_set_flags(pte, _PAGE_SPECIAL);
301} 317}
302 318
303extern pteval_t __supported_pte_mask; 319extern pteval_t __supported_pte_mask;
@@ -451,6 +467,190 @@ static inline void __init paravirt_pagetable_setup_done(pgd_t *base)
451# include "pgtable_64.h" 467# include "pgtable_64.h"
452#endif 468#endif
453 469
470#ifndef __ASSEMBLY__
471#include <linux/mm_types.h>
472
473static inline int pte_none(pte_t pte)
474{
475 return !pte.pte;
476}
477
478#define __HAVE_ARCH_PTE_SAME
479static inline int pte_same(pte_t a, pte_t b)
480{
481 return a.pte == b.pte;
482}
483
484static inline int pte_present(pte_t a)
485{
486 return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
487}
488
489static inline int pmd_present(pmd_t pmd)
490{
491 return pmd_flags(pmd) & _PAGE_PRESENT;
492}
493
494static inline int pmd_none(pmd_t pmd)
495{
496 /* Only check low word on 32-bit platforms, since it might be
497 out of sync with upper half. */
498 return (unsigned long)native_pmd_val(pmd) == 0;
499}
500
501static inline unsigned long pmd_page_vaddr(pmd_t pmd)
502{
503 return (unsigned long)__va(pmd_val(pmd) & PTE_PFN_MASK);
504}
505
506/*
507 * Currently stuck as a macro due to indirect forward reference to
508 * linux/mmzone.h's __section_mem_map_addr() definition:
509 */
510#define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
511
512/*
513 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
514 *
515 * this macro returns the index of the entry in the pmd page which would
516 * control the given virtual address
517 */
518static inline unsigned pmd_index(unsigned long address)
519{
520 return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
521}
522
523/*
524 * Conversion functions: convert a page and protection to a page entry,
525 * and a page entry and page directory to the page they refer to.
526 *
527 * (Currently stuck as a macro because of indirect forward reference
528 * to linux/mm.h:page_to_nid())
529 */
530#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
531
532/*
533 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
534 *
535 * this function returns the index of the entry in the pte page which would
536 * control the given virtual address
537 */
538static inline unsigned pte_index(unsigned long address)
539{
540 return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
541}
542
543static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
544{
545 return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
546}
547
548static inline int pmd_bad(pmd_t pmd)
549{
550 return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
551}
552
553static inline unsigned long pages_to_mb(unsigned long npg)
554{
555 return npg >> (20 - PAGE_SHIFT);
556}
557
558#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
559 remap_pfn_range(vma, vaddr, pfn, size, prot)
560
561#if PAGETABLE_LEVELS == 2
562static inline int pud_large(pud_t pud)
563{
564 return 0;
565}
566#endif
567
568#if PAGETABLE_LEVELS > 2
569static inline int pud_none(pud_t pud)
570{
571 return native_pud_val(pud) == 0;
572}
573
574static inline int pud_present(pud_t pud)
575{
576 return pud_flags(pud) & _PAGE_PRESENT;
577}
578
579static inline unsigned long pud_page_vaddr(pud_t pud)
580{
581 return (unsigned long)__va((unsigned long)pud_val(pud) & PTE_PFN_MASK);
582}
583
584/*
585 * Currently stuck as a macro due to indirect forward reference to
586 * linux/mmzone.h's __section_mem_map_addr() definition:
587 */
588#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
589
590/* Find an entry in the second-level page table.. */
591static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
592{
593 return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
594}
595
596static inline unsigned long pmd_pfn(pmd_t pmd)
597{
598 return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT;
599}
600
601static inline int pud_large(pud_t pud)
602{
603 return (pud_flags(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
604 (_PAGE_PSE | _PAGE_PRESENT);
605}
606
607static inline int pud_bad(pud_t pud)
608{
609 return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
610}
611#endif /* PAGETABLE_LEVELS > 2 */
612
613#if PAGETABLE_LEVELS > 3
614static inline int pgd_present(pgd_t pgd)
615{
616 return pgd_flags(pgd) & _PAGE_PRESENT;
617}
618
619static inline unsigned long pgd_page_vaddr(pgd_t pgd)
620{
621 return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
622}
623
624/*
625 * Currently stuck as a macro due to indirect forward reference to
626 * linux/mmzone.h's __section_mem_map_addr() definition:
627 */
628#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
629
630/* to find an entry in a page-table-directory. */
631static inline unsigned pud_index(unsigned long address)
632{
633 return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
634}
635
636static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
637{
638 return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address);
639}
640
641static inline int pgd_bad(pgd_t pgd)
642{
643 return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
644}
645
646static inline int pgd_none(pgd_t pgd)
647{
648 return !native_pgd_val(pgd);
649}
650#endif /* PAGETABLE_LEVELS > 3 */
651
652#endif /* __ASSEMBLY__ */
653
454/* 654/*
455 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD] 655 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
456 * 656 *
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
index 72b020deb46b..1952bb762aac 100644
--- a/arch/x86/include/asm/pgtable_32.h
+++ b/arch/x86/include/asm/pgtable_32.h
@@ -85,55 +85,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
85/* The boot page tables (all created as a single array) */ 85/* The boot page tables (all created as a single array) */
86extern unsigned long pg0[]; 86extern unsigned long pg0[];
87 87
88#define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
89
90/* To avoid harmful races, pmd_none(x) should check only the lower when PAE */
91#define pmd_none(x) (!(unsigned long)pmd_val((x)))
92#define pmd_present(x) (pmd_val((x)) & _PAGE_PRESENT)
93#define pmd_bad(x) ((pmd_val(x) & (PTE_FLAGS_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
94
95#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
96
97#ifdef CONFIG_X86_PAE 88#ifdef CONFIG_X86_PAE
98# include <asm/pgtable-3level.h> 89# include <asm/pgtable-3level.h>
99#else 90#else
100# include <asm/pgtable-2level.h> 91# include <asm/pgtable-2level.h>
101#endif 92#endif
102 93
103/*
104 * Conversion functions: convert a page and protection to a page entry,
105 * and a page entry and page directory to the page they refer to.
106 */
107#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
108
109
110static inline int pud_large(pud_t pud) { return 0; }
111
112/*
113 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
114 *
115 * this macro returns the index of the entry in the pmd page which would
116 * control the given virtual address
117 */
118#define pmd_index(address) \
119 (((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
120
121/*
122 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
123 *
124 * this macro returns the index of the entry in the pte page which would
125 * control the given virtual address
126 */
127#define pte_index(address) \
128 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
129#define pte_offset_kernel(dir, address) \
130 ((pte_t *)pmd_page_vaddr(*(dir)) + pte_index((address)))
131
132#define pmd_page(pmd) (pfn_to_page(pmd_val((pmd)) >> PAGE_SHIFT))
133
134#define pmd_page_vaddr(pmd) \
135 ((unsigned long)__va(pmd_val((pmd)) & PTE_PFN_MASK))
136
137#if defined(CONFIG_HIGHPTE) 94#if defined(CONFIG_HIGHPTE)
138#define pte_offset_map(dir, address) \ 95#define pte_offset_map(dir, address) \
139 ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE0) + \ 96 ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE0) + \
@@ -176,7 +133,4 @@ do { \
176#define kern_addr_valid(kaddr) (0) 133#define kern_addr_valid(kaddr) (0)
177#endif 134#endif
178 135
179#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
180 remap_pfn_range(vma, vaddr, pfn, size, prot)
181
182#endif /* _ASM_X86_PGTABLE_32_H */ 136#endif /* _ASM_X86_PGTABLE_32_H */
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index ba09289accaa..1c4e247c51fd 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -11,7 +11,6 @@
11#include <asm/processor.h> 11#include <asm/processor.h>
12#include <linux/bitops.h> 12#include <linux/bitops.h>
13#include <linux/threads.h> 13#include <linux/threads.h>
14#include <asm/pda.h>
15 14
16extern pud_t level3_kernel_pgt[512]; 15extern pud_t level3_kernel_pgt[512];
17extern pud_t level3_ident_pgt[512]; 16extern pud_t level3_ident_pgt[512];
@@ -67,9 +66,6 @@ extern void paging_init(void);
67 printk("%s:%d: bad pgd %p(%016lx).\n", \ 66 printk("%s:%d: bad pgd %p(%016lx).\n", \
68 __FILE__, __LINE__, &(e), pgd_val(e)) 67 __FILE__, __LINE__, &(e), pgd_val(e))
69 68
70#define pgd_none(x) (!pgd_val(x))
71#define pud_none(x) (!pud_val(x))
72
73struct mm_struct; 69struct mm_struct;
74 70
75void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte); 71void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte);
@@ -134,8 +130,6 @@ static inline void native_pgd_clear(pgd_t *pgd)
134 native_set_pgd(pgd, native_make_pgd(0)); 130 native_set_pgd(pgd, native_make_pgd(0));
135} 131}
136 132
137#define pte_same(a, b) ((a).pte == (b).pte)
138
139#endif /* !__ASSEMBLY__ */ 133#endif /* !__ASSEMBLY__ */
140 134
141#define PMD_SIZE (_AC(1, UL) << PMD_SHIFT) 135#define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
@@ -156,26 +150,6 @@ static inline void native_pgd_clear(pgd_t *pgd)
156 150
157#ifndef __ASSEMBLY__ 151#ifndef __ASSEMBLY__
158 152
159static inline int pgd_bad(pgd_t pgd)
160{
161 return (pgd_val(pgd) & ~(PTE_PFN_MASK | _PAGE_USER)) != _KERNPG_TABLE;
162}
163
164static inline int pud_bad(pud_t pud)
165{
166 return (pud_val(pud) & ~(PTE_PFN_MASK | _PAGE_USER)) != _KERNPG_TABLE;
167}
168
169static inline int pmd_bad(pmd_t pmd)
170{
171 return (pmd_val(pmd) & ~(PTE_PFN_MASK | _PAGE_USER)) != _KERNPG_TABLE;
172}
173
174#define pte_none(x) (!pte_val((x)))
175#define pte_present(x) (pte_val((x)) & (_PAGE_PRESENT | _PAGE_PROTNONE))
176
177#define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT)) /* FIXME: is this right? */
178
179/* 153/*
180 * Conversion functions: convert a page and protection to a page entry, 154 * Conversion functions: convert a page and protection to a page entry,
181 * and a page entry and page directory to the page they refer to. 155 * and a page entry and page directory to the page they refer to.
@@ -184,41 +158,12 @@ static inline int pmd_bad(pmd_t pmd)
184/* 158/*
185 * Level 4 access. 159 * Level 4 access.
186 */ 160 */
187#define pgd_page_vaddr(pgd) \
188 ((unsigned long)__va((unsigned long)pgd_val((pgd)) & PTE_PFN_MASK))
189#define pgd_page(pgd) (pfn_to_page(pgd_val((pgd)) >> PAGE_SHIFT))
190#define pgd_present(pgd) (pgd_val(pgd) & _PAGE_PRESENT)
191static inline int pgd_large(pgd_t pgd) { return 0; } 161static inline int pgd_large(pgd_t pgd) { return 0; }
192#define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE) 162#define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE)
193 163
194/* PUD - Level3 access */ 164/* PUD - Level3 access */
195/* to find an entry in a page-table-directory. */
196#define pud_page_vaddr(pud) \
197 ((unsigned long)__va(pud_val((pud)) & PHYSICAL_PAGE_MASK))
198#define pud_page(pud) (pfn_to_page(pud_val((pud)) >> PAGE_SHIFT))
199#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
200#define pud_offset(pgd, address) \
201 ((pud_t *)pgd_page_vaddr(*(pgd)) + pud_index((address)))
202#define pud_present(pud) (pud_val((pud)) & _PAGE_PRESENT)
203
204static inline int pud_large(pud_t pte)
205{
206 return (pud_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
207 (_PAGE_PSE | _PAGE_PRESENT);
208}
209 165
210/* PMD - Level 2 access */ 166/* PMD - Level 2 access */
211#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val((pmd)) & PTE_PFN_MASK))
212#define pmd_page(pmd) (pfn_to_page(pmd_val((pmd)) >> PAGE_SHIFT))
213
214#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
215#define pmd_offset(dir, address) ((pmd_t *)pud_page_vaddr(*(dir)) + \
216 pmd_index(address))
217#define pmd_none(x) (!pmd_val((x)))
218#define pmd_present(x) (pmd_val((x)) & _PAGE_PRESENT)
219#define pfn_pmd(nr, prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val((prot))))
220#define pmd_pfn(x) ((pmd_val((x)) & __PHYSICAL_MASK) >> PAGE_SHIFT)
221
222#define pte_to_pgoff(pte) ((pte_val((pte)) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT) 167#define pte_to_pgoff(pte) ((pte_val((pte)) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT)
223#define pgoff_to_pte(off) ((pte_t) { .pte = ((off) << PAGE_SHIFT) | \ 168#define pgoff_to_pte(off) ((pte_t) { .pte = ((off) << PAGE_SHIFT) | \
224 _PAGE_FILE }) 169 _PAGE_FILE })
@@ -226,13 +171,6 @@ static inline int pud_large(pud_t pte)
226 171
227/* PTE - Level 1 access. */ 172/* PTE - Level 1 access. */
228 173
229/* page, protection -> pte */
230#define mk_pte(page, pgprot) pfn_pte(page_to_pfn((page)), (pgprot))
231
232#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
233#define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_vaddr(*(dir)) + \
234 pte_index((address)))
235
236/* x86-64 always has all page tables mapped. */ 174/* x86-64 always has all page tables mapped. */
237#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address)) 175#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
238#define pte_offset_map_nested(dir, address) pte_offset_kernel((dir), (address)) 176#define pte_offset_map_nested(dir, address) pte_offset_kernel((dir), (address))
@@ -266,9 +204,6 @@ extern int direct_gbpages;
266extern int kern_addr_valid(unsigned long addr); 204extern int kern_addr_valid(unsigned long addr);
267extern void cleanup_highmap(void); 205extern void cleanup_highmap(void);
268 206
269#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
270 remap_pfn_range(vma, vaddr, pfn, size, prot)
271
272#define HAVE_ARCH_UNMAPPED_AREA 207#define HAVE_ARCH_UNMAPPED_AREA
273#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 208#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
274 209
diff --git a/arch/x86/include/asm/prctl.h b/arch/x86/include/asm/prctl.h
index a8894647dd9a..3ac5032fae09 100644
--- a/arch/x86/include/asm/prctl.h
+++ b/arch/x86/include/asm/prctl.h
@@ -6,8 +6,4 @@
6#define ARCH_GET_FS 0x1003 6#define ARCH_GET_FS 0x1003
7#define ARCH_GET_GS 0x1004 7#define ARCH_GET_GS 0x1004
8 8
9#ifdef CONFIG_X86_64
10extern long sys_arch_prctl(int, unsigned long);
11#endif /* CONFIG_X86_64 */
12
13#endif /* _ASM_X86_PRCTL_H */ 9#endif /* _ASM_X86_PRCTL_H */
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 3bfd5235a9eb..a0133838b67c 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -73,7 +73,7 @@ struct cpuinfo_x86 {
73 char pad0; 73 char pad0;
74#else 74#else
75 /* Number of 4K pages in DTLB/ITLB combined(in pages): */ 75 /* Number of 4K pages in DTLB/ITLB combined(in pages): */
76 int x86_tlbsize; 76 int x86_tlbsize;
77 __u8 x86_virt_bits; 77 __u8 x86_virt_bits;
78 __u8 x86_phys_bits; 78 __u8 x86_phys_bits;
79#endif 79#endif
@@ -378,7 +378,29 @@ union thread_xstate {
378 378
379#ifdef CONFIG_X86_64 379#ifdef CONFIG_X86_64
380DECLARE_PER_CPU(struct orig_ist, orig_ist); 380DECLARE_PER_CPU(struct orig_ist, orig_ist);
381
382union irq_stack_union {
383 char irq_stack[IRQ_STACK_SIZE];
384 /*
385 * GCC hardcodes the stack canary as %gs:40. Since the
386 * irq_stack is the object at %gs:0, we reserve the bottom
387 * 48 bytes of the irq stack for the canary.
388 */
389 struct {
390 char gs_base[40];
391 unsigned long stack_canary;
392 };
393};
394
395DECLARE_PER_CPU(union irq_stack_union, irq_stack_union);
396DECLARE_INIT_PER_CPU(irq_stack_union);
397
398DECLARE_PER_CPU(char *, irq_stack_ptr);
399#else /* X86_64 */
400#ifdef CONFIG_CC_STACKPROTECTOR
401DECLARE_PER_CPU(unsigned long, stack_canary);
381#endif 402#endif
403#endif /* X86_64 */
382 404
383extern void print_cpu_info(struct cpuinfo_x86 *); 405extern void print_cpu_info(struct cpuinfo_x86 *);
384extern unsigned int xstate_size; 406extern unsigned int xstate_size;
@@ -752,9 +774,9 @@ extern int sysenter_setup(void);
752extern struct desc_ptr early_gdt_descr; 774extern struct desc_ptr early_gdt_descr;
753 775
754extern void cpu_set_gdt(int); 776extern void cpu_set_gdt(int);
755extern void switch_to_new_gdt(void); 777extern void switch_to_new_gdt(int);
778extern void load_percpu_segment(int);
756extern void cpu_init(void); 779extern void cpu_init(void);
757extern void init_gdt(int cpu);
758 780
759static inline unsigned long get_debugctlmsr(void) 781static inline unsigned long get_debugctlmsr(void)
760{ 782{
diff --git a/arch/x86/include/asm/proto.h b/arch/x86/include/asm/proto.h
index d6a22f92ba77..49fb3ecf3bb3 100644
--- a/arch/x86/include/asm/proto.h
+++ b/arch/x86/include/asm/proto.h
@@ -18,11 +18,7 @@ extern void syscall32_cpu_init(void);
18 18
19extern void check_efer(void); 19extern void check_efer(void);
20 20
21#ifdef CONFIG_X86_BIOS_REBOOT
22extern int reboot_force; 21extern int reboot_force;
23#else
24static const int reboot_force = 0;
25#endif
26 22
27long do_arch_prctl(struct task_struct *task, int code, unsigned long addr); 23long do_arch_prctl(struct task_struct *task, int code, unsigned long addr);
28 24
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index 6d34d954c228..e304b66abeea 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -28,7 +28,7 @@ struct pt_regs {
28 int xds; 28 int xds;
29 int xes; 29 int xes;
30 int xfs; 30 int xfs;
31 /* int gs; */ 31 int xgs;
32 long orig_eax; 32 long orig_eax;
33 long eip; 33 long eip;
34 int xcs; 34 int xcs;
@@ -50,7 +50,7 @@ struct pt_regs {
50 unsigned long ds; 50 unsigned long ds;
51 unsigned long es; 51 unsigned long es;
52 unsigned long fs; 52 unsigned long fs;
53 /* int gs; */ 53 unsigned long gs;
54 unsigned long orig_ax; 54 unsigned long orig_ax;
55 unsigned long ip; 55 unsigned long ip;
56 unsigned long cs; 56 unsigned long cs;
diff --git a/arch/x86/include/asm/mach-rdc321x/rdc321x_defs.h b/arch/x86/include/asm/rdc321x_defs.h
index c8e9c8bed3d0..c8e9c8bed3d0 100644
--- a/arch/x86/include/asm/mach-rdc321x/rdc321x_defs.h
+++ b/arch/x86/include/asm/rdc321x_defs.h
diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
index 1dc1b51ac623..14e0ed86a6f9 100644
--- a/arch/x86/include/asm/segment.h
+++ b/arch/x86/include/asm/segment.h
@@ -61,7 +61,7 @@
61 * 61 *
62 * 26 - ESPFIX small SS 62 * 26 - ESPFIX small SS
63 * 27 - per-cpu [ offset to per-cpu data area ] 63 * 27 - per-cpu [ offset to per-cpu data area ]
64 * 28 - unused 64 * 28 - stack_canary-20 [ for stack protector ]
65 * 29 - unused 65 * 29 - unused
66 * 30 - unused 66 * 30 - unused
67 * 31 - TSS for double fault handler 67 * 31 - TSS for double fault handler
@@ -95,6 +95,13 @@
95#define __KERNEL_PERCPU 0 95#define __KERNEL_PERCPU 0
96#endif 96#endif
97 97
98#define GDT_ENTRY_STACK_CANARY (GDT_ENTRY_KERNEL_BASE + 16)
99#ifdef CONFIG_CC_STACKPROTECTOR
100#define __KERNEL_STACK_CANARY (GDT_ENTRY_STACK_CANARY * 8)
101#else
102#define __KERNEL_STACK_CANARY 0
103#endif
104
98#define GDT_ENTRY_DOUBLEFAULT_TSS 31 105#define GDT_ENTRY_DOUBLEFAULT_TSS 31
99 106
100/* 107/*
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index ebe858cdc8a3..45b40278b582 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -1,6 +1,8 @@
1#ifndef _ASM_X86_SETUP_H 1#ifndef _ASM_X86_SETUP_H
2#define _ASM_X86_SETUP_H 2#define _ASM_X86_SETUP_H
3 3
4#ifdef __KERNEL__
5
4#define COMMAND_LINE_SIZE 2048 6#define COMMAND_LINE_SIZE 2048
5 7
6#ifndef __ASSEMBLY__ 8#ifndef __ASSEMBLY__
@@ -8,10 +10,8 @@
8/* Interrupt control for vSMPowered x86_64 systems */ 10/* Interrupt control for vSMPowered x86_64 systems */
9void vsmp_init(void); 11void vsmp_init(void);
10 12
11
12void setup_bios_corruption_check(void); 13void setup_bios_corruption_check(void);
13 14
14
15#ifdef CONFIG_X86_VISWS 15#ifdef CONFIG_X86_VISWS
16extern void visws_early_detect(void); 16extern void visws_early_detect(void);
17extern int is_visws_box(void); 17extern int is_visws_box(void);
@@ -43,7 +43,7 @@ struct x86_quirks {
43 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name); 43 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
44 void (*mpc_oem_pci_bus)(struct mpc_bus *m); 44 void (*mpc_oem_pci_bus)(struct mpc_bus *m);
45 void (*smp_read_mpc_oem)(struct mpc_oemtable *oemtable, 45 void (*smp_read_mpc_oem)(struct mpc_oemtable *oemtable,
46 unsigned short oemsize); 46 unsigned short oemsize);
47 int (*setup_ioapic_ids)(void); 47 int (*setup_ioapic_ids)(void);
48 int (*update_genapic)(void); 48 int (*update_genapic)(void);
49}; 49};
@@ -56,8 +56,6 @@ extern unsigned long saved_video_mode;
56#endif 56#endif
57#endif /* __ASSEMBLY__ */ 57#endif /* __ASSEMBLY__ */
58 58
59#ifdef __KERNEL__
60
61#ifdef __i386__ 59#ifdef __i386__
62 60
63#include <linux/pfn.h> 61#include <linux/pfn.h>
@@ -100,7 +98,6 @@ extern unsigned long init_pg_tables_start;
100extern unsigned long init_pg_tables_end; 98extern unsigned long init_pg_tables_end;
101 99
102#else 100#else
103void __init x86_64_init_pda(void);
104void __init x86_64_start_kernel(char *real_mode); 101void __init x86_64_start_kernel(char *real_mode);
105void __init x86_64_start_reservations(char *real_mode_data); 102void __init x86_64_start_reservations(char *real_mode_data);
106 103
diff --git a/arch/x86/include/asm/mach-default/setup_arch.h b/arch/x86/include/asm/setup_arch.h
index 38846208b548..38846208b548 100644
--- a/arch/x86/include/asm/mach-default/setup_arch.h
+++ b/arch/x86/include/asm/setup_arch.h
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 19953df61c52..47d0e21f2b9e 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -15,34 +15,8 @@
15# include <asm/io_apic.h> 15# include <asm/io_apic.h>
16# endif 16# endif
17#endif 17#endif
18#include <asm/pda.h>
19#include <asm/thread_info.h> 18#include <asm/thread_info.h>
20 19#include <asm/cpumask.h>
21#ifdef CONFIG_X86_64
22
23extern cpumask_var_t cpu_callin_mask;
24extern cpumask_var_t cpu_callout_mask;
25extern cpumask_var_t cpu_initialized_mask;
26extern cpumask_var_t cpu_sibling_setup_mask;
27
28#else /* CONFIG_X86_32 */
29
30extern cpumask_t cpu_callin_map;
31extern cpumask_t cpu_callout_map;
32extern cpumask_t cpu_initialized;
33extern cpumask_t cpu_sibling_setup_map;
34
35#define cpu_callin_mask ((struct cpumask *)&cpu_callin_map)
36#define cpu_callout_mask ((struct cpumask *)&cpu_callout_map)
37#define cpu_initialized_mask ((struct cpumask *)&cpu_initialized)
38#define cpu_sibling_setup_mask ((struct cpumask *)&cpu_sibling_setup_map)
39
40#endif /* CONFIG_X86_32 */
41
42extern void (*mtrr_hook)(void);
43extern void zap_low_mappings(void);
44
45extern int __cpuinit get_local_pda(int cpu);
46 20
47extern int smp_num_siblings; 21extern int smp_num_siblings;
48extern unsigned int num_processors; 22extern unsigned int num_processors;
@@ -50,9 +24,7 @@ extern unsigned int num_processors;
50DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); 24DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
51DECLARE_PER_CPU(cpumask_t, cpu_core_map); 25DECLARE_PER_CPU(cpumask_t, cpu_core_map);
52DECLARE_PER_CPU(u16, cpu_llc_id); 26DECLARE_PER_CPU(u16, cpu_llc_id);
53#ifdef CONFIG_X86_32
54DECLARE_PER_CPU(int, cpu_number); 27DECLARE_PER_CPU(int, cpu_number);
55#endif
56 28
57static inline struct cpumask *cpu_sibling_mask(int cpu) 29static inline struct cpumask *cpu_sibling_mask(int cpu)
58{ 30{
@@ -167,8 +139,6 @@ void play_dead_common(void);
167void native_send_call_func_ipi(const struct cpumask *mask); 139void native_send_call_func_ipi(const struct cpumask *mask);
168void native_send_call_func_single_ipi(int cpu); 140void native_send_call_func_single_ipi(int cpu);
169 141
170extern void prefill_possible_map(void);
171
172void smp_store_cpu_info(int id); 142void smp_store_cpu_info(int id);
173#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) 143#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu)
174 144
@@ -177,10 +147,6 @@ static inline int num_booting_cpus(void)
177{ 147{
178 return cpumask_weight(cpu_callout_mask); 148 return cpumask_weight(cpu_callout_mask);
179} 149}
180#else
181static inline void prefill_possible_map(void)
182{
183}
184#endif /* CONFIG_SMP */ 150#endif /* CONFIG_SMP */
185 151
186extern unsigned disabled_cpus __cpuinitdata; 152extern unsigned disabled_cpus __cpuinitdata;
@@ -191,11 +157,11 @@ extern unsigned disabled_cpus __cpuinitdata;
191 * from the initial startup. We map APIC_BASE very early in page_setup(), 157 * from the initial startup. We map APIC_BASE very early in page_setup(),
192 * so this is correct in the x86 case. 158 * so this is correct in the x86 case.
193 */ 159 */
194#define raw_smp_processor_id() (x86_read_percpu(cpu_number)) 160#define raw_smp_processor_id() (percpu_read(cpu_number))
195extern int safe_smp_processor_id(void); 161extern int safe_smp_processor_id(void);
196 162
197#elif defined(CONFIG_X86_64_SMP) 163#elif defined(CONFIG_X86_64_SMP)
198#define raw_smp_processor_id() read_pda(cpunumber) 164#define raw_smp_processor_id() (percpu_read(cpu_number))
199 165
200#define stack_smp_processor_id() \ 166#define stack_smp_processor_id() \
201({ \ 167({ \
@@ -205,10 +171,6 @@ extern int safe_smp_processor_id(void);
205}) 171})
206#define safe_smp_processor_id() smp_processor_id() 172#define safe_smp_processor_id() smp_processor_id()
207 173
208#else /* !CONFIG_X86_32_SMP && !CONFIG_X86_64_SMP */
209#define cpu_physical_id(cpu) boot_cpu_physical_apicid
210#define safe_smp_processor_id() 0
211#define stack_smp_processor_id() 0
212#endif 174#endif
213 175
214#ifdef CONFIG_X86_LOCAL_APIC 176#ifdef CONFIG_X86_LOCAL_APIC
@@ -220,28 +182,9 @@ static inline int logical_smp_processor_id(void)
220 return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR)); 182 return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR));
221} 183}
222 184
223#include <mach_apicdef.h>
224static inline unsigned int read_apic_id(void)
225{
226 unsigned int reg;
227
228 reg = *(u32 *)(APIC_BASE + APIC_ID);
229
230 return GET_APIC_ID(reg);
231}
232#endif 185#endif
233 186
234
235# if defined(APIC_DEFINITION) || defined(CONFIG_X86_64)
236extern int hard_smp_processor_id(void); 187extern int hard_smp_processor_id(void);
237# else
238#include <mach_apicdef.h>
239static inline int hard_smp_processor_id(void)
240{
241 /* we don't want to mark this access volatile - bad code generation */
242 return read_apic_id();
243}
244# endif /* APIC_DEFINITION */
245 188
246#else /* CONFIG_X86_LOCAL_APIC */ 189#else /* CONFIG_X86_LOCAL_APIC */
247 190
@@ -251,11 +194,5 @@ static inline int hard_smp_processor_id(void)
251 194
252#endif /* CONFIG_X86_LOCAL_APIC */ 195#endif /* CONFIG_X86_LOCAL_APIC */
253 196
254#ifdef CONFIG_X86_HAS_BOOT_CPU_ID
255extern unsigned char boot_cpu_id;
256#else
257#define boot_cpu_id 0
258#endif
259
260#endif /* __ASSEMBLY__ */ 197#endif /* __ASSEMBLY__ */
261#endif /* _ASM_X86_SMP_H */ 198#endif /* _ASM_X86_SMP_H */
diff --git a/arch/x86/include/asm/mach-default/smpboot_hooks.h b/arch/x86/include/asm/smpboot_hooks.h
index 23bf52103b89..1def60114906 100644
--- a/arch/x86/include/asm/mach-default/smpboot_hooks.h
+++ b/arch/x86/include/asm/smpboot_hooks.h
@@ -13,10 +13,10 @@ static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
13 CMOS_WRITE(0xa, 0xf); 13 CMOS_WRITE(0xa, 0xf);
14 local_flush_tlb(); 14 local_flush_tlb();
15 pr_debug("1.\n"); 15 pr_debug("1.\n");
16 *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) = 16 *((volatile unsigned short *)phys_to_virt(apic->trampoline_phys_high)) =
17 start_eip >> 4; 17 start_eip >> 4;
18 pr_debug("2.\n"); 18 pr_debug("2.\n");
19 *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = 19 *((volatile unsigned short *)phys_to_virt(apic->trampoline_phys_low)) =
20 start_eip & 0xf; 20 start_eip & 0xf;
21 pr_debug("3.\n"); 21 pr_debug("3.\n");
22} 22}
@@ -34,7 +34,7 @@ static inline void smpboot_restore_warm_reset_vector(void)
34 */ 34 */
35 CMOS_WRITE(0, 0xf); 35 CMOS_WRITE(0, 0xf);
36 36
37 *((volatile long *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = 0; 37 *((volatile long *)phys_to_virt(apic->trampoline_phys_low)) = 0;
38} 38}
39 39
40static inline void __init smpboot_setup_io_apic(void) 40static inline void __init smpboot_setup_io_apic(void)
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index 8247e94ac6b1..3a5696656680 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -172,70 +172,8 @@ static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
172 return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1; 172 return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1;
173} 173}
174 174
175#ifdef CONFIG_PARAVIRT 175#ifndef CONFIG_PARAVIRT
176/*
177 * Define virtualization-friendly old-style lock byte lock, for use in
178 * pv_lock_ops if desired.
179 *
180 * This differs from the pre-2.6.24 spinlock by always using xchgb
181 * rather than decb to take the lock; this allows it to use a
182 * zero-initialized lock structure. It also maintains a 1-byte
183 * contention counter, so that we can implement
184 * __byte_spin_is_contended.
185 */
186struct __byte_spinlock {
187 s8 lock;
188 s8 spinners;
189};
190
191static inline int __byte_spin_is_locked(raw_spinlock_t *lock)
192{
193 struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
194 return bl->lock != 0;
195}
196
197static inline int __byte_spin_is_contended(raw_spinlock_t *lock)
198{
199 struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
200 return bl->spinners != 0;
201}
202
203static inline void __byte_spin_lock(raw_spinlock_t *lock)
204{
205 struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
206 s8 val = 1;
207
208 asm("1: xchgb %1, %0\n"
209 " test %1,%1\n"
210 " jz 3f\n"
211 " " LOCK_PREFIX "incb %2\n"
212 "2: rep;nop\n"
213 " cmpb $1, %0\n"
214 " je 2b\n"
215 " " LOCK_PREFIX "decb %2\n"
216 " jmp 1b\n"
217 "3:"
218 : "+m" (bl->lock), "+q" (val), "+m" (bl->spinners): : "memory");
219}
220
221static inline int __byte_spin_trylock(raw_spinlock_t *lock)
222{
223 struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
224 u8 old = 1;
225
226 asm("xchgb %1,%0"
227 : "+m" (bl->lock), "+q" (old) : : "memory");
228 176
229 return old == 0;
230}
231
232static inline void __byte_spin_unlock(raw_spinlock_t *lock)
233{
234 struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
235 smp_wmb();
236 bl->lock = 0;
237}
238#else /* !CONFIG_PARAVIRT */
239static inline int __raw_spin_is_locked(raw_spinlock_t *lock) 177static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
240{ 178{
241 return __ticket_spin_is_locked(lock); 179 return __ticket_spin_is_locked(lock);
@@ -268,7 +206,7 @@ static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
268 __raw_spin_lock(lock); 206 __raw_spin_lock(lock);
269} 207}
270 208
271#endif /* CONFIG_PARAVIRT */ 209#endif
272 210
273static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) 211static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
274{ 212{
@@ -330,8 +268,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *lock)
330{ 268{
331 atomic_t *count = (atomic_t *)lock; 269 atomic_t *count = (atomic_t *)lock;
332 270
333 atomic_dec(count); 271 if (atomic_dec_return(count) >= 0)
334 if (atomic_read(count) >= 0)
335 return 1; 272 return 1;
336 atomic_inc(count); 273 atomic_inc(count);
337 return 0; 274 return 0;
diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
new file mode 100644
index 000000000000..c2d742c6e15f
--- /dev/null
+++ b/arch/x86/include/asm/stackprotector.h
@@ -0,0 +1,124 @@
1/*
2 * GCC stack protector support.
3 *
4 * Stack protector works by putting predefined pattern at the start of
5 * the stack frame and verifying that it hasn't been overwritten when
6 * returning from the function. The pattern is called stack canary
7 * and unfortunately gcc requires it to be at a fixed offset from %gs.
8 * On x86_64, the offset is 40 bytes and on x86_32 20 bytes. x86_64
9 * and x86_32 use segment registers differently and thus handles this
10 * requirement differently.
11 *
12 * On x86_64, %gs is shared by percpu area and stack canary. All
13 * percpu symbols are zero based and %gs points to the base of percpu
14 * area. The first occupant of the percpu area is always
15 * irq_stack_union which contains stack_canary at offset 40. Userland
16 * %gs is always saved and restored on kernel entry and exit using
17 * swapgs, so stack protector doesn't add any complexity there.
18 *
19 * On x86_32, it's slightly more complicated. As in x86_64, %gs is
20 * used for userland TLS. Unfortunately, some processors are much
21 * slower at loading segment registers with different value when
22 * entering and leaving the kernel, so the kernel uses %fs for percpu
23 * area and manages %gs lazily so that %gs is switched only when
24 * necessary, usually during task switch.
25 *
26 * As gcc requires the stack canary at %gs:20, %gs can't be managed
27 * lazily if stack protector is enabled, so the kernel saves and
28 * restores userland %gs on kernel entry and exit. This behavior is
29 * controlled by CONFIG_X86_32_LAZY_GS and accessors are defined in
30 * system.h to hide the details.
31 */
32
33#ifndef _ASM_STACKPROTECTOR_H
34#define _ASM_STACKPROTECTOR_H 1
35
36#ifdef CONFIG_CC_STACKPROTECTOR
37
38#include <asm/tsc.h>
39#include <asm/processor.h>
40#include <asm/percpu.h>
41#include <asm/system.h>
42#include <asm/desc.h>
43#include <linux/random.h>
44
45/*
46 * 24 byte read-only segment initializer for stack canary. Linker
47 * can't handle the address bit shifting. Address will be set in
48 * head_32 for boot CPU and setup_per_cpu_areas() for others.
49 */
50#define GDT_STACK_CANARY_INIT \
51 [GDT_ENTRY_STACK_CANARY] = { { { 0x00000018, 0x00409000 } } },
52
53/*
54 * Initialize the stackprotector canary value.
55 *
56 * NOTE: this must only be called from functions that never return,
57 * and it must always be inlined.
58 */
59static __always_inline void boot_init_stack_canary(void)
60{
61 u64 canary;
62 u64 tsc;
63
64#ifdef CONFIG_X86_64
65 BUILD_BUG_ON(offsetof(union irq_stack_union, stack_canary) != 40);
66#endif
67 /*
68 * We both use the random pool and the current TSC as a source
69 * of randomness. The TSC only matters for very early init,
70 * there it already has some randomness on most systems. Later
71 * on during the bootup the random pool has true entropy too.
72 */
73 get_random_bytes(&canary, sizeof(canary));
74 tsc = __native_read_tsc();
75 canary += tsc + (tsc << 32UL);
76
77 current->stack_canary = canary;
78#ifdef CONFIG_X86_64
79 percpu_write(irq_stack_union.stack_canary, canary);
80#else
81 percpu_write(stack_canary, canary);
82#endif
83}
84
85static inline void setup_stack_canary_segment(int cpu)
86{
87#ifdef CONFIG_X86_32
88 unsigned long canary = (unsigned long)&per_cpu(stack_canary, cpu) - 20;
89 struct desc_struct *gdt_table = get_cpu_gdt_table(cpu);
90 struct desc_struct desc;
91
92 desc = gdt_table[GDT_ENTRY_STACK_CANARY];
93 desc.base0 = canary & 0xffff;
94 desc.base1 = (canary >> 16) & 0xff;
95 desc.base2 = (canary >> 24) & 0xff;
96 write_gdt_entry(gdt_table, GDT_ENTRY_STACK_CANARY, &desc, DESCTYPE_S);
97#endif
98}
99
100static inline void load_stack_canary_segment(void)
101{
102#ifdef CONFIG_X86_32
103 asm("mov %0, %%gs" : : "r" (__KERNEL_STACK_CANARY) : "memory");
104#endif
105}
106
107#else /* CC_STACKPROTECTOR */
108
109#define GDT_STACK_CANARY_INIT
110
111/* dummy boot_init_stack_canary() is defined in linux/stackprotector.h */
112
113static inline void setup_stack_canary_segment(int cpu)
114{ }
115
116static inline void load_stack_canary_segment(void)
117{
118#ifdef CONFIG_X86_32
119 asm volatile ("mov %0, %%gs" : : "r" (0));
120#endif
121}
122
123#endif /* CC_STACKPROTECTOR */
124#endif /* _ASM_STACKPROTECTOR_H */
diff --git a/arch/x86/include/asm/summit/apic.h b/arch/x86/include/asm/summit/apic.h
deleted file mode 100644
index 93d2c8667cfe..000000000000
--- a/arch/x86/include/asm/summit/apic.h
+++ /dev/null
@@ -1,202 +0,0 @@
1#ifndef __ASM_SUMMIT_APIC_H
2#define __ASM_SUMMIT_APIC_H
3
4#include <asm/smp.h>
5#include <linux/gfp.h>
6
7#define esr_disable (1)
8#define NO_BALANCE_IRQ (0)
9
10/* In clustered mode, the high nibble of APIC ID is a cluster number.
11 * The low nibble is a 4-bit bitmap. */
12#define XAPIC_DEST_CPUS_SHIFT 4
13#define XAPIC_DEST_CPUS_MASK ((1u << XAPIC_DEST_CPUS_SHIFT) - 1)
14#define XAPIC_DEST_CLUSTER_MASK (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT)
15
16#define APIC_DFR_VALUE (APIC_DFR_CLUSTER)
17
18static inline const cpumask_t *target_cpus(void)
19{
20 /* CPU_MASK_ALL (0xff) has undefined behaviour with
21 * dest_LowestPrio mode logical clustered apic interrupt routing
22 * Just start on cpu 0. IRQ balancing will spread load
23 */
24 return &cpumask_of_cpu(0);
25}
26
27#define INT_DELIVERY_MODE (dest_LowestPrio)
28#define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */
29
30static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
31{
32 return 0;
33}
34
35/* we don't use the phys_cpu_present_map to indicate apicid presence */
36static inline unsigned long check_apicid_present(int bit)
37{
38 return 1;
39}
40
41#define apicid_cluster(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK)
42
43extern u8 cpu_2_logical_apicid[];
44
45static inline void init_apic_ldr(void)
46{
47 unsigned long val, id;
48 int count = 0;
49 u8 my_id = (u8)hard_smp_processor_id();
50 u8 my_cluster = (u8)apicid_cluster(my_id);
51#ifdef CONFIG_SMP
52 u8 lid;
53 int i;
54
55 /* Create logical APIC IDs by counting CPUs already in cluster. */
56 for (count = 0, i = nr_cpu_ids; --i >= 0; ) {
57 lid = cpu_2_logical_apicid[i];
58 if (lid != BAD_APICID && apicid_cluster(lid) == my_cluster)
59 ++count;
60 }
61#endif
62 /* We only have a 4 wide bitmap in cluster mode. If a deranged
63 * BIOS puts 5 CPUs in one APIC cluster, we're hosed. */
64 BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT);
65 id = my_cluster | (1UL << count);
66 apic_write(APIC_DFR, APIC_DFR_VALUE);
67 val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
68 val |= SET_APIC_LOGICAL_ID(id);
69 apic_write(APIC_LDR, val);
70}
71
72static inline int multi_timer_check(int apic, int irq)
73{
74 return 0;
75}
76
77static inline int apic_id_registered(void)
78{
79 return 1;
80}
81
82static inline void setup_apic_routing(void)
83{
84 printk("Enabling APIC mode: Summit. Using %d I/O APICs\n",
85 nr_ioapics);
86}
87
88static inline int apicid_to_node(int logical_apicid)
89{
90#ifdef CONFIG_SMP
91 return apicid_2_node[hard_smp_processor_id()];
92#else
93 return 0;
94#endif
95}
96
97/* Mapping from cpu number to logical apicid */
98static inline int cpu_to_logical_apicid(int cpu)
99{
100#ifdef CONFIG_SMP
101 if (cpu >= nr_cpu_ids)
102 return BAD_APICID;
103 return (int)cpu_2_logical_apicid[cpu];
104#else
105 return logical_smp_processor_id();
106#endif
107}
108
109static inline int cpu_present_to_apicid(int mps_cpu)
110{
111 if (mps_cpu < nr_cpu_ids)
112 return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
113 else
114 return BAD_APICID;
115}
116
117static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_id_map)
118{
119 /* For clustered we don't have a good way to do this yet - hack */
120 return physids_promote(0x0F);
121}
122
123static inline physid_mask_t apicid_to_cpu_present(int apicid)
124{
125 return physid_mask_of_physid(0);
126}
127
128static inline void setup_portio_remap(void)
129{
130}
131
132static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
133{
134 return 1;
135}
136
137static inline void enable_apic_mode(void)
138{
139}
140
141static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
142{
143 int num_bits_set;
144 int cpus_found = 0;
145 int cpu;
146 int apicid;
147
148 num_bits_set = cpus_weight(*cpumask);
149 /* Return id to all */
150 if (num_bits_set >= nr_cpu_ids)
151 return (int) 0xFF;
152 /*
153 * The cpus in the mask must all be on the apic cluster. If are not
154 * on the same apicid cluster return default value of TARGET_CPUS.
155 */
156 cpu = first_cpu(*cpumask);
157 apicid = cpu_to_logical_apicid(cpu);
158 while (cpus_found < num_bits_set) {
159 if (cpu_isset(cpu, *cpumask)) {
160 int new_apicid = cpu_to_logical_apicid(cpu);
161 if (apicid_cluster(apicid) !=
162 apicid_cluster(new_apicid)){
163 printk ("%s: Not a valid mask!\n", __func__);
164 return 0xFF;
165 }
166 apicid = apicid | new_apicid;
167 cpus_found++;
168 }
169 cpu++;
170 }
171 return apicid;
172}
173
174static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask,
175 const struct cpumask *andmask)
176{
177 int apicid = cpu_to_logical_apicid(0);
178 cpumask_var_t cpumask;
179
180 if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
181 return apicid;
182
183 cpumask_and(cpumask, inmask, andmask);
184 cpumask_and(cpumask, cpumask, cpu_online_mask);
185 apicid = cpu_mask_to_apicid(cpumask);
186
187 free_cpumask_var(cpumask);
188 return apicid;
189}
190
191/* cpuid returns the value latched in the HW at reset, not the APIC ID
192 * register's value. For any box whose BIOS changes APIC IDs, like
193 * clustered APIC systems, we must use hard_smp_processor_id.
194 *
195 * See Intel's IA-32 SW Dev's Manual Vol2 under CPUID.
196 */
197static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
198{
199 return hard_smp_processor_id() >> index_msb;
200}
201
202#endif /* __ASM_SUMMIT_APIC_H */
diff --git a/arch/x86/include/asm/summit/apicdef.h b/arch/x86/include/asm/summit/apicdef.h
deleted file mode 100644
index f3fbca1f61c1..000000000000
--- a/arch/x86/include/asm/summit/apicdef.h
+++ /dev/null
@@ -1,13 +0,0 @@
1#ifndef __ASM_SUMMIT_APICDEF_H
2#define __ASM_SUMMIT_APICDEF_H
3
4#define APIC_ID_MASK (0xFF<<24)
5
6static inline unsigned get_apic_id(unsigned long x)
7{
8 return (x>>24)&0xFF;
9}
10
11#define GET_APIC_ID(x) get_apic_id(x)
12
13#endif
diff --git a/arch/x86/include/asm/summit/ipi.h b/arch/x86/include/asm/summit/ipi.h
deleted file mode 100644
index a8a2c24f50cc..000000000000
--- a/arch/x86/include/asm/summit/ipi.h
+++ /dev/null
@@ -1,26 +0,0 @@
1#ifndef __ASM_SUMMIT_IPI_H
2#define __ASM_SUMMIT_IPI_H
3
4void send_IPI_mask_sequence(const cpumask_t *mask, int vector);
5void send_IPI_mask_allbutself(const cpumask_t *mask, int vector);
6
7static inline void send_IPI_mask(const cpumask_t *mask, int vector)
8{
9 send_IPI_mask_sequence(mask, vector);
10}
11
12static inline void send_IPI_allbutself(int vector)
13{
14 cpumask_t mask = cpu_online_map;
15 cpu_clear(smp_processor_id(), mask);
16
17 if (!cpus_empty(mask))
18 send_IPI_mask(&mask, vector);
19}
20
21static inline void send_IPI_all(int vector)
22{
23 send_IPI_mask(&cpu_online_map, vector);
24}
25
26#endif /* __ASM_SUMMIT_IPI_H */
diff --git a/arch/x86/include/asm/summit/mpparse.h b/arch/x86/include/asm/summit/mpparse.h
deleted file mode 100644
index 380e86c02363..000000000000
--- a/arch/x86/include/asm/summit/mpparse.h
+++ /dev/null
@@ -1,109 +0,0 @@
1#ifndef __ASM_SUMMIT_MPPARSE_H
2#define __ASM_SUMMIT_MPPARSE_H
3
4#include <asm/tsc.h>
5
6extern int use_cyclone;
7
8#ifdef CONFIG_X86_SUMMIT_NUMA
9extern void setup_summit(void);
10#else
11#define setup_summit() {}
12#endif
13
14static inline int mps_oem_check(struct mpc_table *mpc, char *oem,
15 char *productid)
16{
17 if (!strncmp(oem, "IBM ENSW", 8) &&
18 (!strncmp(productid, "VIGIL SMP", 9)
19 || !strncmp(productid, "EXA", 3)
20 || !strncmp(productid, "RUTHLESS SMP", 12))){
21 mark_tsc_unstable("Summit based system");
22 use_cyclone = 1; /*enable cyclone-timer*/
23 setup_summit();
24 return 1;
25 }
26 return 0;
27}
28
29/* Hook from generic ACPI tables.c */
30static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
31{
32 if (!strncmp(oem_id, "IBM", 3) &&
33 (!strncmp(oem_table_id, "SERVIGIL", 8)
34 || !strncmp(oem_table_id, "EXA", 3))){
35 mark_tsc_unstable("Summit based system");
36 use_cyclone = 1; /*enable cyclone-timer*/
37 setup_summit();
38 return 1;
39 }
40 return 0;
41}
42
43struct rio_table_hdr {
44 unsigned char version; /* Version number of this data structure */
45 /* Version 3 adds chassis_num & WP_index */
46 unsigned char num_scal_dev; /* # of Scalability devices (Twisters for Vigil) */
47 unsigned char num_rio_dev; /* # of RIO I/O devices (Cyclones and Winnipegs) */
48} __attribute__((packed));
49
50struct scal_detail {
51 unsigned char node_id; /* Scalability Node ID */
52 unsigned long CBAR; /* Address of 1MB register space */
53 unsigned char port0node; /* Node ID port connected to: 0xFF=None */
54 unsigned char port0port; /* Port num port connected to: 0,1,2, or 0xFF=None */
55 unsigned char port1node; /* Node ID port connected to: 0xFF = None */
56 unsigned char port1port; /* Port num port connected to: 0,1,2, or 0xFF=None */
57 unsigned char port2node; /* Node ID port connected to: 0xFF = None */
58 unsigned char port2port; /* Port num port connected to: 0,1,2, or 0xFF=None */
59 unsigned char chassis_num; /* 1 based Chassis number (1 = boot node) */
60} __attribute__((packed));
61
62struct rio_detail {
63 unsigned char node_id; /* RIO Node ID */
64 unsigned long BBAR; /* Address of 1MB register space */
65 unsigned char type; /* Type of device */
66 unsigned char owner_id; /* For WPEG: Node ID of Cyclone that owns this WPEG*/
67 /* For CYC: Node ID of Twister that owns this CYC */
68 unsigned char port0node; /* Node ID port connected to: 0xFF=None */
69 unsigned char port0port; /* Port num port connected to: 0,1,2, or 0xFF=None */
70 unsigned char port1node; /* Node ID port connected to: 0xFF=None */
71 unsigned char port1port; /* Port num port connected to: 0,1,2, or 0xFF=None */
72 unsigned char first_slot; /* For WPEG: Lowest slot number below this WPEG */
73 /* For CYC: 0 */
74 unsigned char status; /* For WPEG: Bit 0 = 1 : the XAPIC is used */
75 /* = 0 : the XAPIC is not used, ie:*/
76 /* ints fwded to another XAPIC */
77 /* Bits1:7 Reserved */
78 /* For CYC: Bits0:7 Reserved */
79 unsigned char WP_index; /* For WPEG: WPEG instance index - lower ones have */
80 /* lower slot numbers/PCI bus numbers */
81 /* For CYC: No meaning */
82 unsigned char chassis_num; /* 1 based Chassis number */
83 /* For LookOut WPEGs this field indicates the */
84 /* Expansion Chassis #, enumerated from Boot */
85 /* Node WPEG external port, then Boot Node CYC */
86 /* external port, then Next Vigil chassis WPEG */
87 /* external port, etc. */
88 /* Shared Lookouts have only 1 chassis number (the */
89 /* first one assigned) */
90} __attribute__((packed));
91
92
93typedef enum {
94 CompatTwister = 0, /* Compatibility Twister */
95 AltTwister = 1, /* Alternate Twister of internal 8-way */
96 CompatCyclone = 2, /* Compatibility Cyclone */
97 AltCyclone = 3, /* Alternate Cyclone of internal 8-way */
98 CompatWPEG = 4, /* Compatibility WPEG */
99 AltWPEG = 5, /* Second Planar WPEG */
100 LookOutAWPEG = 6, /* LookOut WPEG */
101 LookOutBWPEG = 7, /* LookOut WPEG */
102} node_type;
103
104static inline int is_WPEG(struct rio_detail *rio){
105 return (rio->type == CompatWPEG || rio->type == AltWPEG ||
106 rio->type == LookOutAWPEG || rio->type == LookOutBWPEG);
107}
108
109#endif /* __ASM_SUMMIT_MPPARSE_H */
diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h
index c0b0bda754ee..68b1be10cfad 100644
--- a/arch/x86/include/asm/syscalls.h
+++ b/arch/x86/include/asm/syscalls.h
@@ -29,21 +29,21 @@ asmlinkage int sys_get_thread_area(struct user_desc __user *);
29/* X86_32 only */ 29/* X86_32 only */
30#ifdef CONFIG_X86_32 30#ifdef CONFIG_X86_32
31/* kernel/process_32.c */ 31/* kernel/process_32.c */
32asmlinkage int sys_fork(struct pt_regs); 32int sys_fork(struct pt_regs *);
33asmlinkage int sys_clone(struct pt_regs); 33int sys_clone(struct pt_regs *);
34asmlinkage int sys_vfork(struct pt_regs); 34int sys_vfork(struct pt_regs *);
35asmlinkage int sys_execve(struct pt_regs); 35int sys_execve(struct pt_regs *);
36 36
37/* kernel/signal_32.c */ 37/* kernel/signal_32.c */
38asmlinkage int sys_sigsuspend(int, int, old_sigset_t); 38asmlinkage int sys_sigsuspend(int, int, old_sigset_t);
39asmlinkage int sys_sigaction(int, const struct old_sigaction __user *, 39asmlinkage int sys_sigaction(int, const struct old_sigaction __user *,
40 struct old_sigaction __user *); 40 struct old_sigaction __user *);
41asmlinkage int sys_sigaltstack(unsigned long); 41int sys_sigaltstack(struct pt_regs *);
42asmlinkage unsigned long sys_sigreturn(unsigned long); 42unsigned long sys_sigreturn(struct pt_regs *);
43asmlinkage int sys_rt_sigreturn(unsigned long); 43long sys_rt_sigreturn(struct pt_regs *);
44 44
45/* kernel/ioport.c */ 45/* kernel/ioport.c */
46asmlinkage long sys_iopl(unsigned long); 46long sys_iopl(struct pt_regs *);
47 47
48/* kernel/sys_i386_32.c */ 48/* kernel/sys_i386_32.c */
49asmlinkage long sys_mmap2(unsigned long, unsigned long, unsigned long, 49asmlinkage long sys_mmap2(unsigned long, unsigned long, unsigned long,
@@ -59,8 +59,8 @@ struct oldold_utsname;
59asmlinkage int sys_olduname(struct oldold_utsname __user *); 59asmlinkage int sys_olduname(struct oldold_utsname __user *);
60 60
61/* kernel/vm86_32.c */ 61/* kernel/vm86_32.c */
62asmlinkage int sys_vm86old(struct pt_regs); 62int sys_vm86old(struct pt_regs *);
63asmlinkage int sys_vm86(struct pt_regs); 63int sys_vm86(struct pt_regs *);
64 64
65#else /* CONFIG_X86_32 */ 65#else /* CONFIG_X86_32 */
66 66
diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
index 8e626ea33a1a..c00bfdbdd456 100644
--- a/arch/x86/include/asm/system.h
+++ b/arch/x86/include/asm/system.h
@@ -23,6 +23,20 @@ struct task_struct *__switch_to(struct task_struct *prev,
23 23
24#ifdef CONFIG_X86_32 24#ifdef CONFIG_X86_32
25 25
26#ifdef CONFIG_CC_STACKPROTECTOR
27#define __switch_canary \
28 "movl %P[task_canary](%[next]), %%ebx\n\t" \
29 "movl %%ebx, "__percpu_arg([stack_canary])"\n\t"
30#define __switch_canary_oparam \
31 , [stack_canary] "=m" (per_cpu_var(stack_canary))
32#define __switch_canary_iparam \
33 , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
34#else /* CC_STACKPROTECTOR */
35#define __switch_canary
36#define __switch_canary_oparam
37#define __switch_canary_iparam
38#endif /* CC_STACKPROTECTOR */
39
26/* 40/*
27 * Saving eflags is important. It switches not only IOPL between tasks, 41 * Saving eflags is important. It switches not only IOPL between tasks,
28 * it also protects other tasks from NT leaking through sysenter etc. 42 * it also protects other tasks from NT leaking through sysenter etc.
@@ -44,6 +58,7 @@ do { \
44 "movl %[next_sp],%%esp\n\t" /* restore ESP */ \ 58 "movl %[next_sp],%%esp\n\t" /* restore ESP */ \
45 "movl $1f,%[prev_ip]\n\t" /* save EIP */ \ 59 "movl $1f,%[prev_ip]\n\t" /* save EIP */ \
46 "pushl %[next_ip]\n\t" /* restore EIP */ \ 60 "pushl %[next_ip]\n\t" /* restore EIP */ \
61 __switch_canary \
47 "jmp __switch_to\n" /* regparm call */ \ 62 "jmp __switch_to\n" /* regparm call */ \
48 "1:\t" \ 63 "1:\t" \
49 "popl %%ebp\n\t" /* restore EBP */ \ 64 "popl %%ebp\n\t" /* restore EBP */ \
@@ -58,6 +73,8 @@ do { \
58 "=b" (ebx), "=c" (ecx), "=d" (edx), \ 73 "=b" (ebx), "=c" (ecx), "=d" (edx), \
59 "=S" (esi), "=D" (edi) \ 74 "=S" (esi), "=D" (edi) \
60 \ 75 \
76 __switch_canary_oparam \
77 \
61 /* input parameters: */ \ 78 /* input parameters: */ \
62 : [next_sp] "m" (next->thread.sp), \ 79 : [next_sp] "m" (next->thread.sp), \
63 [next_ip] "m" (next->thread.ip), \ 80 [next_ip] "m" (next->thread.ip), \
@@ -66,6 +83,8 @@ do { \
66 [prev] "a" (prev), \ 83 [prev] "a" (prev), \
67 [next] "d" (next) \ 84 [next] "d" (next) \
68 \ 85 \
86 __switch_canary_iparam \
87 \
69 : /* reloaded segment registers */ \ 88 : /* reloaded segment registers */ \
70 "memory"); \ 89 "memory"); \
71} while (0) 90} while (0)
@@ -86,27 +105,44 @@ do { \
86 , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \ 105 , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
87 "r12", "r13", "r14", "r15" 106 "r12", "r13", "r14", "r15"
88 107
108#ifdef CONFIG_CC_STACKPROTECTOR
109#define __switch_canary \
110 "movq %P[task_canary](%%rsi),%%r8\n\t" \
111 "movq %%r8,"__percpu_arg([gs_canary])"\n\t"
112#define __switch_canary_oparam \
113 , [gs_canary] "=m" (per_cpu_var(irq_stack_union.stack_canary))
114#define __switch_canary_iparam \
115 , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
116#else /* CC_STACKPROTECTOR */
117#define __switch_canary
118#define __switch_canary_oparam
119#define __switch_canary_iparam
120#endif /* CC_STACKPROTECTOR */
121
89/* Save restore flags to clear handle leaking NT */ 122/* Save restore flags to clear handle leaking NT */
90#define switch_to(prev, next, last) \ 123#define switch_to(prev, next, last) \
91 asm volatile(SAVE_CONTEXT \ 124 asm volatile(SAVE_CONTEXT \
92 "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \ 125 "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
93 "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \ 126 "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
94 "call __switch_to\n\t" \ 127 "call __switch_to\n\t" \
95 ".globl thread_return\n" \ 128 ".globl thread_return\n" \
96 "thread_return:\n\t" \ 129 "thread_return:\n\t" \
97 "movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \ 130 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
131 __switch_canary \
98 "movq %P[thread_info](%%rsi),%%r8\n\t" \ 132 "movq %P[thread_info](%%rsi),%%r8\n\t" \
99 LOCK_PREFIX "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \
100 "movq %%rax,%%rdi\n\t" \ 133 "movq %%rax,%%rdi\n\t" \
101 "jc ret_from_fork\n\t" \ 134 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
135 "jnz ret_from_fork\n\t" \
102 RESTORE_CONTEXT \ 136 RESTORE_CONTEXT \
103 : "=a" (last) \ 137 : "=a" (last) \
138 __switch_canary_oparam \
104 : [next] "S" (next), [prev] "D" (prev), \ 139 : [next] "S" (next), [prev] "D" (prev), \
105 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \ 140 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
106 [ti_flags] "i" (offsetof(struct thread_info, flags)), \ 141 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
107 [tif_fork] "i" (TIF_FORK), \ 142 [_tif_fork] "i" (_TIF_FORK), \
108 [thread_info] "i" (offsetof(struct task_struct, stack)), \ 143 [thread_info] "i" (offsetof(struct task_struct, stack)), \
109 [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \ 144 [current_task] "m" (per_cpu_var(current_task)) \
145 __switch_canary_iparam \
110 : "memory", "cc" __EXTRA_CLOBBER) 146 : "memory", "cc" __EXTRA_CLOBBER)
111#endif 147#endif
112 148
@@ -165,6 +201,25 @@ extern void native_load_gs_index(unsigned);
165#define savesegment(seg, value) \ 201#define savesegment(seg, value) \
166 asm("mov %%" #seg ",%0":"=r" (value) : : "memory") 202 asm("mov %%" #seg ",%0":"=r" (value) : : "memory")
167 203
204/*
205 * x86_32 user gs accessors.
206 */
207#ifdef CONFIG_X86_32
208#ifdef CONFIG_X86_32_LAZY_GS
209#define get_user_gs(regs) (u16)({unsigned long v; savesegment(gs, v); v;})
210#define set_user_gs(regs, v) loadsegment(gs, (unsigned long)(v))
211#define task_user_gs(tsk) ((tsk)->thread.gs)
212#define lazy_save_gs(v) savesegment(gs, (v))
213#define lazy_load_gs(v) loadsegment(gs, (v))
214#else /* X86_32_LAZY_GS */
215#define get_user_gs(regs) (u16)((regs)->gs)
216#define set_user_gs(regs, v) do { (regs)->gs = (v); } while (0)
217#define task_user_gs(tsk) (task_pt_regs(tsk)->gs)
218#define lazy_save_gs(v) do { } while (0)
219#define lazy_load_gs(v) do { } while (0)
220#endif /* X86_32_LAZY_GS */
221#endif /* X86_32 */
222
168static inline unsigned long get_limit(unsigned long segment) 223static inline unsigned long get_limit(unsigned long segment)
169{ 224{
170 unsigned long __limit; 225 unsigned long __limit;
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 98789647baa9..df9d5f78385e 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -40,6 +40,7 @@ struct thread_info {
40 */ 40 */
41 __u8 supervisor_stack[0]; 41 __u8 supervisor_stack[0];
42#endif 42#endif
43 int uaccess_err;
43}; 44};
44 45
45#define INIT_THREAD_INFO(tsk) \ 46#define INIT_THREAD_INFO(tsk) \
@@ -194,25 +195,21 @@ static inline struct thread_info *current_thread_info(void)
194 195
195#else /* X86_32 */ 196#else /* X86_32 */
196 197
197#include <asm/pda.h> 198#include <asm/percpu.h>
199#define KERNEL_STACK_OFFSET (5*8)
198 200
199/* 201/*
200 * macros/functions for gaining access to the thread information structure 202 * macros/functions for gaining access to the thread information structure
201 * preempt_count needs to be 1 initially, until the scheduler is functional. 203 * preempt_count needs to be 1 initially, until the scheduler is functional.
202 */ 204 */
203#ifndef __ASSEMBLY__ 205#ifndef __ASSEMBLY__
204static inline struct thread_info *current_thread_info(void) 206DECLARE_PER_CPU(unsigned long, kernel_stack);
205{
206 struct thread_info *ti;
207 ti = (void *)(read_pda(kernelstack) + PDA_STACKOFFSET - THREAD_SIZE);
208 return ti;
209}
210 207
211/* do not use in interrupt context */ 208static inline struct thread_info *current_thread_info(void)
212static inline struct thread_info *stack_thread_info(void)
213{ 209{
214 struct thread_info *ti; 210 struct thread_info *ti;
215 asm("andq %%rsp,%0; " : "=r" (ti) : "0" (~(THREAD_SIZE - 1))); 211 ti = (void *)(percpu_read(kernel_stack) +
212 KERNEL_STACK_OFFSET - THREAD_SIZE);
216 return ti; 213 return ti;
217} 214}
218 215
@@ -220,8 +217,8 @@ static inline struct thread_info *stack_thread_info(void)
220 217
221/* how to get the thread information struct from ASM */ 218/* how to get the thread information struct from ASM */
222#define GET_THREAD_INFO(reg) \ 219#define GET_THREAD_INFO(reg) \
223 movq %gs:pda_kernelstack,reg ; \ 220 movq PER_CPU_VAR(kernel_stack),reg ; \
224 subq $(THREAD_SIZE-PDA_STACKOFFSET),reg 221 subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
225 222
226#endif 223#endif
227 224
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 0e7bbb549116..d3539f998f88 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -113,7 +113,7 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
113 __flush_tlb(); 113 __flush_tlb();
114} 114}
115 115
116static inline void native_flush_tlb_others(const cpumask_t *cpumask, 116static inline void native_flush_tlb_others(const struct cpumask *cpumask,
117 struct mm_struct *mm, 117 struct mm_struct *mm,
118 unsigned long va) 118 unsigned long va)
119{ 119{
@@ -142,31 +142,28 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
142 flush_tlb_mm(vma->vm_mm); 142 flush_tlb_mm(vma->vm_mm);
143} 143}
144 144
145void native_flush_tlb_others(const cpumask_t *cpumask, struct mm_struct *mm, 145void native_flush_tlb_others(const struct cpumask *cpumask,
146 unsigned long va); 146 struct mm_struct *mm, unsigned long va);
147 147
148#define TLBSTATE_OK 1 148#define TLBSTATE_OK 1
149#define TLBSTATE_LAZY 2 149#define TLBSTATE_LAZY 2
150 150
151#ifdef CONFIG_X86_32
152struct tlb_state { 151struct tlb_state {
153 struct mm_struct *active_mm; 152 struct mm_struct *active_mm;
154 int state; 153 int state;
155 char __cacheline_padding[L1_CACHE_BYTES-8];
156}; 154};
157DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate); 155DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate);
158 156
159void reset_lazy_tlbstate(void);
160#else
161static inline void reset_lazy_tlbstate(void) 157static inline void reset_lazy_tlbstate(void)
162{ 158{
159 percpu_write(cpu_tlbstate.state, 0);
160 percpu_write(cpu_tlbstate.active_mm, &init_mm);
163} 161}
164#endif
165 162
166#endif /* SMP */ 163#endif /* SMP */
167 164
168#ifndef CONFIG_PARAVIRT 165#ifndef CONFIG_PARAVIRT
169#define flush_tlb_others(mask, mm, va) native_flush_tlb_others(&mask, mm, va) 166#define flush_tlb_others(mask, mm, va) native_flush_tlb_others(mask, mm, va)
170#endif 167#endif
171 168
172static inline void flush_tlb_kernel_range(unsigned long start, 169static inline void flush_tlb_kernel_range(unsigned long start,
@@ -175,4 +172,6 @@ static inline void flush_tlb_kernel_range(unsigned long start,
175 flush_tlb_all(); 172 flush_tlb_all();
176} 173}
177 174
175extern void zap_low_mappings(void);
176
178#endif /* _ASM_X86_TLBFLUSH_H */ 177#endif /* _ASM_X86_TLBFLUSH_H */
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index 4e2f2e0aab27..77cfb2cfb386 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -74,6 +74,8 @@ static inline const struct cpumask *cpumask_of_node(int node)
74 return &node_to_cpumask_map[node]; 74 return &node_to_cpumask_map[node];
75} 75}
76 76
77static inline void setup_node_to_cpumask_map(void) { }
78
77#else /* CONFIG_X86_64 */ 79#else /* CONFIG_X86_64 */
78 80
79/* Mappings between node number and cpus on that node. */ 81/* Mappings between node number and cpus on that node. */
@@ -83,7 +85,8 @@ extern cpumask_t *node_to_cpumask_map;
83DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map); 85DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map);
84 86
85/* Returns the number of the current Node. */ 87/* Returns the number of the current Node. */
86#define numa_node_id() read_pda(nodenumber) 88DECLARE_PER_CPU(int, node_number);
89#define numa_node_id() percpu_read(node_number)
87 90
88#ifdef CONFIG_DEBUG_PER_CPU_MAPS 91#ifdef CONFIG_DEBUG_PER_CPU_MAPS
89extern int cpu_to_node(int cpu); 92extern int cpu_to_node(int cpu);
@@ -102,10 +105,7 @@ static inline int cpu_to_node(int cpu)
102/* Same function but used if called before per_cpu areas are setup */ 105/* Same function but used if called before per_cpu areas are setup */
103static inline int early_cpu_to_node(int cpu) 106static inline int early_cpu_to_node(int cpu)
104{ 107{
105 if (early_per_cpu_ptr(x86_cpu_to_node_map)) 108 return early_per_cpu(x86_cpu_to_node_map, cpu);
106 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
107
108 return per_cpu(x86_cpu_to_node_map, cpu);
109} 109}
110 110
111/* Returns a pointer to the cpumask of CPUs on Node 'node'. */ 111/* Returns a pointer to the cpumask of CPUs on Node 'node'. */
@@ -122,6 +122,8 @@ static inline cpumask_t node_to_cpumask(int node)
122 122
123#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ 123#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
124 124
125extern void setup_node_to_cpumask_map(void);
126
125/* 127/*
126 * Replace default node_to_cpumask_ptr with optimized version 128 * Replace default node_to_cpumask_ptr with optimized version
127 * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)" 129 * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
@@ -192,9 +194,20 @@ extern int __node_distance(int, int);
192 194
193#else /* !CONFIG_NUMA */ 195#else /* !CONFIG_NUMA */
194 196
195#define numa_node_id() 0 197static inline int numa_node_id(void)
196#define cpu_to_node(cpu) 0 198{
197#define early_cpu_to_node(cpu) 0 199 return 0;
200}
201
202static inline int cpu_to_node(int cpu)
203{
204 return 0;
205}
206
207static inline int early_cpu_to_node(int cpu)
208{
209 return 0;
210}
198 211
199static inline const cpumask_t *cpumask_of_node(int node) 212static inline const cpumask_t *cpumask_of_node(int node)
200{ 213{
@@ -209,6 +222,8 @@ static inline int node_to_first_cpu(int node)
209 return first_cpu(cpu_online_map); 222 return first_cpu(cpu_online_map);
210} 223}
211 224
225static inline void setup_node_to_cpumask_map(void) { }
226
212/* 227/*
213 * Replace default node_to_cpumask_ptr with optimized version 228 * Replace default node_to_cpumask_ptr with optimized version
214 * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)" 229 * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
diff --git a/arch/x86/include/asm/trampoline.h b/arch/x86/include/asm/trampoline.h
index 780ba0ab94f9..90f06c25221d 100644
--- a/arch/x86/include/asm/trampoline.h
+++ b/arch/x86/include/asm/trampoline.h
@@ -13,6 +13,7 @@ extern unsigned char *trampoline_base;
13 13
14extern unsigned long init_rsp; 14extern unsigned long init_rsp;
15extern unsigned long initial_code; 15extern unsigned long initial_code;
16extern unsigned long initial_gs;
16 17
17#define TRAMPOLINE_SIZE roundup(trampoline_end - trampoline_data, PAGE_SIZE) 18#define TRAMPOLINE_SIZE roundup(trampoline_end - trampoline_data, PAGE_SIZE)
18#define TRAMPOLINE_BASE 0x6000 19#define TRAMPOLINE_BASE 0x6000
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
index cf3bb053da0b..0d5342515b86 100644
--- a/arch/x86/include/asm/traps.h
+++ b/arch/x86/include/asm/traps.h
@@ -41,7 +41,7 @@ dotraplinkage void do_int3(struct pt_regs *, long);
41dotraplinkage void do_overflow(struct pt_regs *, long); 41dotraplinkage void do_overflow(struct pt_regs *, long);
42dotraplinkage void do_bounds(struct pt_regs *, long); 42dotraplinkage void do_bounds(struct pt_regs *, long);
43dotraplinkage void do_invalid_op(struct pt_regs *, long); 43dotraplinkage void do_invalid_op(struct pt_regs *, long);
44dotraplinkage void do_device_not_available(struct pt_regs); 44dotraplinkage void do_device_not_available(struct pt_regs *, long);
45dotraplinkage void do_coprocessor_segment_overrun(struct pt_regs *, long); 45dotraplinkage void do_coprocessor_segment_overrun(struct pt_regs *, long);
46dotraplinkage void do_invalid_TSS(struct pt_regs *, long); 46dotraplinkage void do_invalid_TSS(struct pt_regs *, long);
47dotraplinkage void do_segment_not_present(struct pt_regs *, long); 47dotraplinkage void do_segment_not_present(struct pt_regs *, long);
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 4340055b7559..b685ece89d5c 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -121,7 +121,7 @@ extern int __get_user_bad(void);
121 121
122#define __get_user_x(size, ret, x, ptr) \ 122#define __get_user_x(size, ret, x, ptr) \
123 asm volatile("call __get_user_" #size \ 123 asm volatile("call __get_user_" #size \
124 : "=a" (ret),"=d" (x) \ 124 : "=a" (ret), "=d" (x) \
125 : "0" (ptr)) \ 125 : "0" (ptr)) \
126 126
127/* Careful: we have to cast the result to the type of the pointer 127/* Careful: we have to cast the result to the type of the pointer
@@ -181,12 +181,12 @@ extern int __get_user_bad(void);
181 181
182#define __put_user_x(size, x, ptr, __ret_pu) \ 182#define __put_user_x(size, x, ptr, __ret_pu) \
183 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \ 183 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
184 :"0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") 184 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
185 185
186 186
187 187
188#ifdef CONFIG_X86_32 188#ifdef CONFIG_X86_32
189#define __put_user_u64(x, addr, err) \ 189#define __put_user_asm_u64(x, addr, err, errret) \
190 asm volatile("1: movl %%eax,0(%2)\n" \ 190 asm volatile("1: movl %%eax,0(%2)\n" \
191 "2: movl %%edx,4(%2)\n" \ 191 "2: movl %%edx,4(%2)\n" \
192 "3:\n" \ 192 "3:\n" \
@@ -197,14 +197,24 @@ extern int __get_user_bad(void);
197 _ASM_EXTABLE(1b, 4b) \ 197 _ASM_EXTABLE(1b, 4b) \
198 _ASM_EXTABLE(2b, 4b) \ 198 _ASM_EXTABLE(2b, 4b) \
199 : "=r" (err) \ 199 : "=r" (err) \
200 : "A" (x), "r" (addr), "i" (-EFAULT), "0" (err)) 200 : "A" (x), "r" (addr), "i" (errret), "0" (err))
201
202#define __put_user_asm_ex_u64(x, addr) \
203 asm volatile("1: movl %%eax,0(%1)\n" \
204 "2: movl %%edx,4(%1)\n" \
205 "3:\n" \
206 _ASM_EXTABLE(1b, 2b - 1b) \
207 _ASM_EXTABLE(2b, 3b - 2b) \
208 : : "A" (x), "r" (addr))
201 209
202#define __put_user_x8(x, ptr, __ret_pu) \ 210#define __put_user_x8(x, ptr, __ret_pu) \
203 asm volatile("call __put_user_8" : "=a" (__ret_pu) \ 211 asm volatile("call __put_user_8" : "=a" (__ret_pu) \
204 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") 212 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
205#else 213#else
206#define __put_user_u64(x, ptr, retval) \ 214#define __put_user_asm_u64(x, ptr, retval, errret) \
207 __put_user_asm(x, ptr, retval, "q", "", "Zr", -EFAULT) 215 __put_user_asm(x, ptr, retval, "q", "", "Zr", errret)
216#define __put_user_asm_ex_u64(x, addr) \
217 __put_user_asm_ex(x, addr, "q", "", "Zr")
208#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu) 218#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
209#endif 219#endif
210 220
@@ -276,10 +286,32 @@ do { \
276 __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \ 286 __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \
277 break; \ 287 break; \
278 case 4: \ 288 case 4: \
279 __put_user_asm(x, ptr, retval, "l", "k", "ir", errret);\ 289 __put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \
280 break; \ 290 break; \
281 case 8: \ 291 case 8: \
282 __put_user_u64((__typeof__(*ptr))(x), ptr, retval); \ 292 __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval, \
293 errret); \
294 break; \
295 default: \
296 __put_user_bad(); \
297 } \
298} while (0)
299
300#define __put_user_size_ex(x, ptr, size) \
301do { \
302 __chk_user_ptr(ptr); \
303 switch (size) { \
304 case 1: \
305 __put_user_asm_ex(x, ptr, "b", "b", "iq"); \
306 break; \
307 case 2: \
308 __put_user_asm_ex(x, ptr, "w", "w", "ir"); \
309 break; \
310 case 4: \
311 __put_user_asm_ex(x, ptr, "l", "k", "ir"); \
312 break; \
313 case 8: \
314 __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr); \
283 break; \ 315 break; \
284 default: \ 316 default: \
285 __put_user_bad(); \ 317 __put_user_bad(); \
@@ -311,9 +343,12 @@ do { \
311 343
312#ifdef CONFIG_X86_32 344#ifdef CONFIG_X86_32
313#define __get_user_asm_u64(x, ptr, retval, errret) (x) = __get_user_bad() 345#define __get_user_asm_u64(x, ptr, retval, errret) (x) = __get_user_bad()
346#define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad()
314#else 347#else
315#define __get_user_asm_u64(x, ptr, retval, errret) \ 348#define __get_user_asm_u64(x, ptr, retval, errret) \
316 __get_user_asm(x, ptr, retval, "q", "", "=r", errret) 349 __get_user_asm(x, ptr, retval, "q", "", "=r", errret)
350#define __get_user_asm_ex_u64(x, ptr) \
351 __get_user_asm_ex(x, ptr, "q", "", "=r")
317#endif 352#endif
318 353
319#define __get_user_size(x, ptr, size, retval, errret) \ 354#define __get_user_size(x, ptr, size, retval, errret) \
@@ -350,6 +385,33 @@ do { \
350 : "=r" (err), ltype(x) \ 385 : "=r" (err), ltype(x) \
351 : "m" (__m(addr)), "i" (errret), "0" (err)) 386 : "m" (__m(addr)), "i" (errret), "0" (err))
352 387
388#define __get_user_size_ex(x, ptr, size) \
389do { \
390 __chk_user_ptr(ptr); \
391 switch (size) { \
392 case 1: \
393 __get_user_asm_ex(x, ptr, "b", "b", "=q"); \
394 break; \
395 case 2: \
396 __get_user_asm_ex(x, ptr, "w", "w", "=r"); \
397 break; \
398 case 4: \
399 __get_user_asm_ex(x, ptr, "l", "k", "=r"); \
400 break; \
401 case 8: \
402 __get_user_asm_ex_u64(x, ptr); \
403 break; \
404 default: \
405 (x) = __get_user_bad(); \
406 } \
407} while (0)
408
409#define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
410 asm volatile("1: mov"itype" %1,%"rtype"0\n" \
411 "2:\n" \
412 _ASM_EXTABLE(1b, 2b - 1b) \
413 : ltype(x) : "m" (__m(addr)))
414
353#define __put_user_nocheck(x, ptr, size) \ 415#define __put_user_nocheck(x, ptr, size) \
354({ \ 416({ \
355 int __pu_err; \ 417 int __pu_err; \
@@ -385,6 +447,26 @@ struct __large_struct { unsigned long buf[100]; };
385 _ASM_EXTABLE(1b, 3b) \ 447 _ASM_EXTABLE(1b, 3b) \
386 : "=r"(err) \ 448 : "=r"(err) \
387 : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err)) 449 : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
450
451#define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
452 asm volatile("1: mov"itype" %"rtype"0,%1\n" \
453 "2:\n" \
454 _ASM_EXTABLE(1b, 2b - 1b) \
455 : : ltype(x), "m" (__m(addr)))
456
457/*
458 * uaccess_try and catch
459 */
460#define uaccess_try do { \
461 int prev_err = current_thread_info()->uaccess_err; \
462 current_thread_info()->uaccess_err = 0; \
463 barrier();
464
465#define uaccess_catch(err) \
466 (err) |= current_thread_info()->uaccess_err; \
467 current_thread_info()->uaccess_err = prev_err; \
468} while (0)
469
388/** 470/**
389 * __get_user: - Get a simple variable from user space, with less checking. 471 * __get_user: - Get a simple variable from user space, with less checking.
390 * @x: Variable to store result. 472 * @x: Variable to store result.
@@ -408,6 +490,7 @@ struct __large_struct { unsigned long buf[100]; };
408 490
409#define __get_user(x, ptr) \ 491#define __get_user(x, ptr) \
410 __get_user_nocheck((x), (ptr), sizeof(*(ptr))) 492 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
493
411/** 494/**
412 * __put_user: - Write a simple value into user space, with less checking. 495 * __put_user: - Write a simple value into user space, with less checking.
413 * @x: Value to copy to user space. 496 * @x: Value to copy to user space.
@@ -435,6 +518,45 @@ struct __large_struct { unsigned long buf[100]; };
435#define __put_user_unaligned __put_user 518#define __put_user_unaligned __put_user
436 519
437/* 520/*
521 * {get|put}_user_try and catch
522 *
523 * get_user_try {
524 * get_user_ex(...);
525 * } get_user_catch(err)
526 */
527#define get_user_try uaccess_try
528#define get_user_catch(err) uaccess_catch(err)
529
530#define get_user_ex(x, ptr) do { \
531 unsigned long __gue_val; \
532 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
533 (x) = (__force __typeof__(*(ptr)))__gue_val; \
534} while (0)
535
536#ifdef CONFIG_X86_WP_WORKS_OK
537
538#define put_user_try uaccess_try
539#define put_user_catch(err) uaccess_catch(err)
540
541#define put_user_ex(x, ptr) \
542 __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
543
544#else /* !CONFIG_X86_WP_WORKS_OK */
545
546#define put_user_try do { \
547 int __uaccess_err = 0;
548
549#define put_user_catch(err) \
550 (err) |= __uaccess_err; \
551} while (0)
552
553#define put_user_ex(x, ptr) do { \
554 __uaccess_err |= __put_user(x, ptr); \
555} while (0)
556
557#endif /* CONFIG_X86_WP_WORKS_OK */
558
559/*
438 * movsl can be slow when source and dest are not both 8-byte aligned 560 * movsl can be slow when source and dest are not both 8-byte aligned
439 */ 561 */
440#ifdef CONFIG_X86_INTEL_USERCOPY 562#ifdef CONFIG_X86_INTEL_USERCOPY
diff --git a/arch/x86/include/asm/uv/uv.h b/arch/x86/include/asm/uv/uv.h
new file mode 100644
index 000000000000..8242bf965812
--- /dev/null
+++ b/arch/x86/include/asm/uv/uv.h
@@ -0,0 +1,36 @@
1#ifndef _ASM_X86_UV_UV_H
2#define _ASM_X86_UV_UV_H
3
4enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
5
6struct cpumask;
7struct mm_struct;
8
9#ifdef CONFIG_X86_UV
10
11extern enum uv_system_type get_uv_system_type(void);
12extern int is_uv_system(void);
13extern void uv_cpu_init(void);
14extern void uv_system_init(void);
15extern int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip);
16extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
17 struct mm_struct *mm,
18 unsigned long va,
19 unsigned int cpu);
20
21#else /* X86_UV */
22
23static inline enum uv_system_type get_uv_system_type(void) { return UV_NONE; }
24static inline int is_uv_system(void) { return 0; }
25static inline void uv_cpu_init(void) { }
26static inline void uv_system_init(void) { }
27static inline int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip)
28{ return 1; }
29static inline const struct cpumask *
30uv_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm,
31 unsigned long va, unsigned int cpu)
32{ return cpumask; }
33
34#endif /* X86_UV */
35
36#endif /* _ASM_X86_UV_UV_H */
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
index 50423c7b56b2..9b0e61bf7a88 100644
--- a/arch/x86/include/asm/uv/uv_bau.h
+++ b/arch/x86/include/asm/uv/uv_bau.h
@@ -325,7 +325,6 @@ static inline void bau_cpubits_clear(struct bau_local_cpumask *dstp, int nbits)
325#define cpubit_isset(cpu, bau_local_cpumask) \ 325#define cpubit_isset(cpu, bau_local_cpumask) \
326 test_bit((cpu), (bau_local_cpumask).bits) 326 test_bit((cpu), (bau_local_cpumask).bits)
327 327
328extern int uv_flush_tlb_others(cpumask_t *, struct mm_struct *, unsigned long);
329extern void uv_bau_message_intr1(void); 328extern void uv_bau_message_intr1(void);
330extern void uv_bau_timeout_intr1(void); 329extern void uv_bau_timeout_intr1(void);
331 330
diff --git a/arch/x86/include/asm/voyager.h b/arch/x86/include/asm/voyager.h
index b3e647307625..c1635d43616f 100644
--- a/arch/x86/include/asm/voyager.h
+++ b/arch/x86/include/asm/voyager.h
@@ -527,3 +527,45 @@ extern void voyager_smp_intr_init(void);
527#define VOYAGER_PSI_SUBREAD 2 527#define VOYAGER_PSI_SUBREAD 2
528#define VOYAGER_PSI_SUBWRITE 3 528#define VOYAGER_PSI_SUBWRITE 3
529extern void voyager_cat_psi(__u8, __u16, __u8 *); 529extern void voyager_cat_psi(__u8, __u16, __u8 *);
530
531/* These define the CPIs we use in linux */
532#define VIC_CPI_LEVEL0 0
533#define VIC_CPI_LEVEL1 1
534/* now the fake CPIs */
535#define VIC_TIMER_CPI 2
536#define VIC_INVALIDATE_CPI 3
537#define VIC_RESCHEDULE_CPI 4
538#define VIC_ENABLE_IRQ_CPI 5
539#define VIC_CALL_FUNCTION_CPI 6
540#define VIC_CALL_FUNCTION_SINGLE_CPI 7
541
542/* Now the QIC CPIs: Since we don't need the two initial levels,
543 * these are 2 less than the VIC CPIs */
544#define QIC_CPI_OFFSET 1
545#define QIC_TIMER_CPI (VIC_TIMER_CPI - QIC_CPI_OFFSET)
546#define QIC_INVALIDATE_CPI (VIC_INVALIDATE_CPI - QIC_CPI_OFFSET)
547#define QIC_RESCHEDULE_CPI (VIC_RESCHEDULE_CPI - QIC_CPI_OFFSET)
548#define QIC_ENABLE_IRQ_CPI (VIC_ENABLE_IRQ_CPI - QIC_CPI_OFFSET)
549#define QIC_CALL_FUNCTION_CPI (VIC_CALL_FUNCTION_CPI - QIC_CPI_OFFSET)
550#define QIC_CALL_FUNCTION_SINGLE_CPI (VIC_CALL_FUNCTION_SINGLE_CPI - QIC_CPI_OFFSET)
551
552#define VIC_START_FAKE_CPI VIC_TIMER_CPI
553#define VIC_END_FAKE_CPI VIC_CALL_FUNCTION_SINGLE_CPI
554
555/* this is the SYS_INT CPI. */
556#define VIC_SYS_INT 8
557#define VIC_CMN_INT 15
558
559/* This is the boot CPI for alternate processors. It gets overwritten
560 * by the above once the system has activated all available processors */
561#define VIC_CPU_BOOT_CPI VIC_CPI_LEVEL0
562#define VIC_CPU_BOOT_ERRATA_CPI (VIC_CPI_LEVEL0 + 8)
563
564extern asmlinkage void vic_cpi_interrupt(void);
565extern asmlinkage void vic_sys_interrupt(void);
566extern asmlinkage void vic_cmn_interrupt(void);
567extern asmlinkage void qic_timer_interrupt(void);
568extern asmlinkage void qic_invalidate_interrupt(void);
569extern asmlinkage void qic_reschedule_interrupt(void);
570extern asmlinkage void qic_enable_irq_interrupt(void);
571extern asmlinkage void qic_call_function_interrupt(void);
diff --git a/arch/x86/include/asm/xen/events.h b/arch/x86/include/asm/xen/events.h
index 19144184983a..1df35417c412 100644
--- a/arch/x86/include/asm/xen/events.h
+++ b/arch/x86/include/asm/xen/events.h
@@ -15,10 +15,4 @@ static inline int xen_irqs_disabled(struct pt_regs *regs)
15 return raw_irqs_disabled_flags(regs->flags); 15 return raw_irqs_disabled_flags(regs->flags);
16} 16}
17 17
18static inline void xen_do_IRQ(int irq, struct pt_regs *regs)
19{
20 regs->orig_ax = ~irq;
21 do_IRQ(regs);
22}
23
24#endif /* _ASM_X86_XEN_EVENTS_H */ 18#endif /* _ASM_X86_XEN_EVENTS_H */
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index d364df03c1d6..24f357e7557a 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -23,13 +23,14 @@ nostackp := $(call cc-option, -fno-stack-protector)
23CFLAGS_vsyscall_64.o := $(PROFILING) -g0 $(nostackp) 23CFLAGS_vsyscall_64.o := $(PROFILING) -g0 $(nostackp)
24CFLAGS_hpet.o := $(nostackp) 24CFLAGS_hpet.o := $(nostackp)
25CFLAGS_tsc.o := $(nostackp) 25CFLAGS_tsc.o := $(nostackp)
26CFLAGS_paravirt.o := $(nostackp)
26 27
27obj-y := process_$(BITS).o signal.o entry_$(BITS).o 28obj-y := process_$(BITS).o signal.o entry_$(BITS).o
28obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o 29obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
29obj-y += time_$(BITS).o ioport.o ldt.o dumpstack.o 30obj-y += time_$(BITS).o ioport.o ldt.o dumpstack.o
30obj-y += setup.o i8259.o irqinit_$(BITS).o setup_percpu.o 31obj-y += setup.o i8259.o irqinit_$(BITS).o
31obj-$(CONFIG_X86_VISWS) += visws_quirks.o 32obj-$(CONFIG_X86_VISWS) += visws_quirks.o
32obj-$(CONFIG_X86_32) += probe_roms_32.o 33obj-$(CONFIG_X86_32) += probe_32.o probe_roms_32.o
33obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o 34obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
34obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o 35obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
35obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o 36obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o
@@ -49,20 +50,20 @@ obj-y += step.o
49obj-$(CONFIG_STACKTRACE) += stacktrace.o 50obj-$(CONFIG_STACKTRACE) += stacktrace.o
50obj-y += cpu/ 51obj-y += cpu/
51obj-y += acpi/ 52obj-y += acpi/
52obj-$(CONFIG_X86_BIOS_REBOOT) += reboot.o 53obj-y += reboot.o
53obj-$(CONFIG_MCA) += mca_32.o 54obj-$(CONFIG_MCA) += mca_32.o
54obj-$(CONFIG_X86_MSR) += msr.o 55obj-$(CONFIG_X86_MSR) += msr.o
55obj-$(CONFIG_X86_CPUID) += cpuid.o 56obj-$(CONFIG_X86_CPUID) += cpuid.o
56obj-$(CONFIG_PCI) += early-quirks.o 57obj-$(CONFIG_PCI) += early-quirks.o
57apm-y := apm_32.o 58apm-y := apm_32.o
58obj-$(CONFIG_APM) += apm.o 59obj-$(CONFIG_APM) += apm.o
59obj-$(CONFIG_X86_SMP) += smp.o 60obj-$(CONFIG_SMP) += smp.o
60obj-$(CONFIG_X86_SMP) += smpboot.o tsc_sync.o ipi.o tlb_$(BITS).o 61obj-$(CONFIG_SMP) += smpboot.o tsc_sync.o ipi.o
61obj-$(CONFIG_X86_32_SMP) += smpcommon.o 62obj-$(CONFIG_SMP) += setup_percpu.o
62obj-$(CONFIG_X86_64_SMP) += tsc_sync.o smpcommon.o 63obj-$(CONFIG_X86_64_SMP) += tsc_sync.o
63obj-$(CONFIG_X86_TRAMPOLINE) += trampoline_$(BITS).o 64obj-$(CONFIG_X86_TRAMPOLINE) += trampoline_$(BITS).o
64obj-$(CONFIG_X86_MPPARSE) += mpparse.o 65obj-$(CONFIG_X86_MPPARSE) += mpparse.o
65obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o 66obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o ipi.o
66obj-$(CONFIG_X86_IO_APIC) += io_apic.o 67obj-$(CONFIG_X86_IO_APIC) += io_apic.o
67obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o 68obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o
68obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o 69obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
@@ -70,9 +71,10 @@ obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
70obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o 71obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o
71obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o 72obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o
72obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o 73obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o
74obj-$(CONFIG_X86_BIGSMP) += bigsmp_32.o
73obj-$(CONFIG_X86_NUMAQ) += numaq_32.o 75obj-$(CONFIG_X86_NUMAQ) += numaq_32.o
74obj-$(CONFIG_X86_ES7000) += es7000_32.o 76obj-$(CONFIG_X86_ES7000) += es7000_32.o
75obj-$(CONFIG_X86_SUMMIT_NUMA) += summit_32.o 77obj-$(CONFIG_X86_SUMMIT) += summit_32.o
76obj-y += vsmp_64.o 78obj-y += vsmp_64.o
77obj-$(CONFIG_KPROBES) += kprobes.o 79obj-$(CONFIG_KPROBES) += kprobes.o
78obj-$(CONFIG_MODULES) += module_$(BITS).o 80obj-$(CONFIG_MODULES) += module_$(BITS).o
@@ -114,10 +116,11 @@ obj-$(CONFIG_SWIOTLB) += pci-swiotlb_64.o # NB rename without _64
114### 116###
115# 64 bit specific files 117# 64 bit specific files
116ifeq ($(CONFIG_X86_64),y) 118ifeq ($(CONFIG_X86_64),y)
117 obj-y += genapic_64.o genapic_flat_64.o genx2apic_uv_x.o tlb_uv.o 119 obj-y += genapic_64.o genapic_flat_64.o
118 obj-y += bios_uv.o uv_irq.o uv_sysfs.o
119 obj-y += genx2apic_cluster.o 120 obj-y += genx2apic_cluster.o
120 obj-y += genx2apic_phys.o 121 obj-y += genx2apic_phys.o
122 obj-$(CONFIG_X86_UV) += genx2apic_uv_x.o tlb_uv.o
123 obj-$(CONFIG_X86_UV) += bios_uv.o uv_irq.o uv_sysfs.o
121 obj-$(CONFIG_X86_PM_TIMER) += pmtimer_64.o 124 obj-$(CONFIG_X86_PM_TIMER) += pmtimer_64.o
122 obj-$(CONFIG_AUDIT) += audit_64.o 125 obj-$(CONFIG_AUDIT) += audit_64.o
123 126
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 7678f10c4568..956c1dee6fbe 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -42,10 +42,6 @@
42#include <asm/mpspec.h> 42#include <asm/mpspec.h>
43#include <asm/smp.h> 43#include <asm/smp.h>
44 44
45#ifdef CONFIG_X86_LOCAL_APIC
46# include <mach_apic.h>
47#endif
48
49static int __initdata acpi_force = 0; 45static int __initdata acpi_force = 0;
50u32 acpi_rsdt_forced; 46u32 acpi_rsdt_forced;
51#ifdef CONFIG_ACPI 47#ifdef CONFIG_ACPI
@@ -56,16 +52,7 @@ int acpi_disabled = 1;
56EXPORT_SYMBOL(acpi_disabled); 52EXPORT_SYMBOL(acpi_disabled);
57 53
58#ifdef CONFIG_X86_64 54#ifdef CONFIG_X86_64
59 55# include <asm/proto.h>
60#include <asm/proto.h>
61
62#else /* X86 */
63
64#ifdef CONFIG_X86_LOCAL_APIC
65#include <mach_apic.h>
66#include <mach_mpparse.h>
67#endif /* CONFIG_X86_LOCAL_APIC */
68
69#endif /* X86 */ 56#endif /* X86 */
70 57
71#define BAD_MADT_ENTRY(entry, end) ( \ 58#define BAD_MADT_ENTRY(entry, end) ( \
@@ -121,35 +108,18 @@ enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_PIC;
121 */ 108 */
122char *__init __acpi_map_table(unsigned long phys, unsigned long size) 109char *__init __acpi_map_table(unsigned long phys, unsigned long size)
123{ 110{
124 unsigned long base, offset, mapped_size;
125 int idx;
126 111
127 if (!phys || !size) 112 if (!phys || !size)
128 return NULL; 113 return NULL;
129 114
130 if (phys+size <= (max_low_pfn_mapped << PAGE_SHIFT)) 115 return early_ioremap(phys, size);
131 return __va(phys); 116}
132 117void __init __acpi_unmap_table(char *map, unsigned long size)
133 offset = phys & (PAGE_SIZE - 1); 118{
134 mapped_size = PAGE_SIZE - offset; 119 if (!map || !size)
135 clear_fixmap(FIX_ACPI_END); 120 return;
136 set_fixmap(FIX_ACPI_END, phys);
137 base = fix_to_virt(FIX_ACPI_END);
138
139 /*
140 * Most cases can be covered by the below.
141 */
142 idx = FIX_ACPI_END;
143 while (mapped_size < size) {
144 if (--idx < FIX_ACPI_BEGIN)
145 return NULL; /* cannot handle this */
146 phys += PAGE_SIZE;
147 clear_fixmap(idx);
148 set_fixmap(idx, phys);
149 mapped_size += PAGE_SIZE;
150 }
151 121
152 return ((unsigned char *)base + offset); 122 early_iounmap(map, size);
153} 123}
154 124
155#ifdef CONFIG_PCI_MMCONFIG 125#ifdef CONFIG_PCI_MMCONFIG
@@ -239,7 +209,8 @@ static int __init acpi_parse_madt(struct acpi_table_header *table)
239 madt->address); 209 madt->address);
240 } 210 }
241 211
242 acpi_madt_oem_check(madt->header.oem_id, madt->header.oem_table_id); 212 default_acpi_madt_oem_check(madt->header.oem_id,
213 madt->header.oem_table_id);
243 214
244 return 0; 215 return 0;
245} 216}
@@ -884,7 +855,7 @@ static struct {
884 DECLARE_BITMAP(pin_programmed, MP_MAX_IOAPIC_PIN + 1); 855 DECLARE_BITMAP(pin_programmed, MP_MAX_IOAPIC_PIN + 1);
885} mp_ioapic_routing[MAX_IO_APICS]; 856} mp_ioapic_routing[MAX_IO_APICS];
886 857
887static int mp_find_ioapic(int gsi) 858int mp_find_ioapic(int gsi)
888{ 859{
889 int i = 0; 860 int i = 0;
890 861
@@ -899,6 +870,16 @@ static int mp_find_ioapic(int gsi)
899 return -1; 870 return -1;
900} 871}
901 872
873int mp_find_ioapic_pin(int ioapic, int gsi)
874{
875 if (WARN_ON(ioapic == -1))
876 return -1;
877 if (WARN_ON(gsi > mp_ioapic_routing[ioapic].gsi_end))
878 return -1;
879
880 return gsi - mp_ioapic_routing[ioapic].gsi_base;
881}
882
902static u8 __init uniq_ioapic_id(u8 id) 883static u8 __init uniq_ioapic_id(u8 id)
903{ 884{
904#ifdef CONFIG_X86_32 885#ifdef CONFIG_X86_32
@@ -912,8 +893,8 @@ static u8 __init uniq_ioapic_id(u8 id)
912 DECLARE_BITMAP(used, 256); 893 DECLARE_BITMAP(used, 256);
913 bitmap_zero(used, 256); 894 bitmap_zero(used, 256);
914 for (i = 0; i < nr_ioapics; i++) { 895 for (i = 0; i < nr_ioapics; i++) {
915 struct mp_config_ioapic *ia = &mp_ioapics[i]; 896 struct mpc_ioapic *ia = &mp_ioapics[i];
916 __set_bit(ia->mp_apicid, used); 897 __set_bit(ia->apicid, used);
917 } 898 }
918 if (!test_bit(id, used)) 899 if (!test_bit(id, used))
919 return id; 900 return id;
@@ -945,29 +926,29 @@ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base)
945 926
946 idx = nr_ioapics; 927 idx = nr_ioapics;
947 928
948 mp_ioapics[idx].mp_type = MP_IOAPIC; 929 mp_ioapics[idx].type = MP_IOAPIC;
949 mp_ioapics[idx].mp_flags = MPC_APIC_USABLE; 930 mp_ioapics[idx].flags = MPC_APIC_USABLE;
950 mp_ioapics[idx].mp_apicaddr = address; 931 mp_ioapics[idx].apicaddr = address;
951 932
952 set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); 933 set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
953 mp_ioapics[idx].mp_apicid = uniq_ioapic_id(id); 934 mp_ioapics[idx].apicid = uniq_ioapic_id(id);
954#ifdef CONFIG_X86_32 935#ifdef CONFIG_X86_32
955 mp_ioapics[idx].mp_apicver = io_apic_get_version(idx); 936 mp_ioapics[idx].apicver = io_apic_get_version(idx);
956#else 937#else
957 mp_ioapics[idx].mp_apicver = 0; 938 mp_ioapics[idx].apicver = 0;
958#endif 939#endif
959 /* 940 /*
960 * Build basic GSI lookup table to facilitate gsi->io_apic lookups 941 * Build basic GSI lookup table to facilitate gsi->io_apic lookups
961 * and to prevent reprogramming of IOAPIC pins (PCI GSIs). 942 * and to prevent reprogramming of IOAPIC pins (PCI GSIs).
962 */ 943 */
963 mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mp_apicid; 944 mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].apicid;
964 mp_ioapic_routing[idx].gsi_base = gsi_base; 945 mp_ioapic_routing[idx].gsi_base = gsi_base;
965 mp_ioapic_routing[idx].gsi_end = gsi_base + 946 mp_ioapic_routing[idx].gsi_end = gsi_base +
966 io_apic_get_redir_entries(idx); 947 io_apic_get_redir_entries(idx);
967 948
968 printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%lx, " 949 printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, "
969 "GSI %d-%d\n", idx, mp_ioapics[idx].mp_apicid, 950 "GSI %d-%d\n", idx, mp_ioapics[idx].apicid,
970 mp_ioapics[idx].mp_apicver, mp_ioapics[idx].mp_apicaddr, 951 mp_ioapics[idx].apicver, mp_ioapics[idx].apicaddr,
971 mp_ioapic_routing[idx].gsi_base, mp_ioapic_routing[idx].gsi_end); 952 mp_ioapic_routing[idx].gsi_base, mp_ioapic_routing[idx].gsi_end);
972 953
973 nr_ioapics++; 954 nr_ioapics++;
@@ -996,19 +977,19 @@ int __init acpi_probe_gsi(void)
996 return max_gsi + 1; 977 return max_gsi + 1;
997} 978}
998 979
999static void assign_to_mp_irq(struct mp_config_intsrc *m, 980static void assign_to_mp_irq(struct mpc_intsrc *m,
1000 struct mp_config_intsrc *mp_irq) 981 struct mpc_intsrc *mp_irq)
1001{ 982{
1002 memcpy(mp_irq, m, sizeof(struct mp_config_intsrc)); 983 memcpy(mp_irq, m, sizeof(struct mpc_intsrc));
1003} 984}
1004 985
1005static int mp_irq_cmp(struct mp_config_intsrc *mp_irq, 986static int mp_irq_cmp(struct mpc_intsrc *mp_irq,
1006 struct mp_config_intsrc *m) 987 struct mpc_intsrc *m)
1007{ 988{
1008 return memcmp(mp_irq, m, sizeof(struct mp_config_intsrc)); 989 return memcmp(mp_irq, m, sizeof(struct mpc_intsrc));
1009} 990}
1010 991
1011static void save_mp_irq(struct mp_config_intsrc *m) 992static void save_mp_irq(struct mpc_intsrc *m)
1012{ 993{
1013 int i; 994 int i;
1014 995
@@ -1026,7 +1007,7 @@ void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi)
1026{ 1007{
1027 int ioapic; 1008 int ioapic;
1028 int pin; 1009 int pin;
1029 struct mp_config_intsrc mp_irq; 1010 struct mpc_intsrc mp_irq;
1030 1011
1031 /* 1012 /*
1032 * Convert 'gsi' to 'ioapic.pin'. 1013 * Convert 'gsi' to 'ioapic.pin'.
@@ -1034,7 +1015,7 @@ void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi)
1034 ioapic = mp_find_ioapic(gsi); 1015 ioapic = mp_find_ioapic(gsi);
1035 if (ioapic < 0) 1016 if (ioapic < 0)
1036 return; 1017 return;
1037 pin = gsi - mp_ioapic_routing[ioapic].gsi_base; 1018 pin = mp_find_ioapic_pin(ioapic, gsi);
1038 1019
1039 /* 1020 /*
1040 * TBD: This check is for faulty timer entries, where the override 1021 * TBD: This check is for faulty timer entries, where the override
@@ -1044,13 +1025,13 @@ void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi)
1044 if ((bus_irq == 0) && (trigger == 3)) 1025 if ((bus_irq == 0) && (trigger == 3))
1045 trigger = 1; 1026 trigger = 1;
1046 1027
1047 mp_irq.mp_type = MP_INTSRC; 1028 mp_irq.type = MP_INTSRC;
1048 mp_irq.mp_irqtype = mp_INT; 1029 mp_irq.irqtype = mp_INT;
1049 mp_irq.mp_irqflag = (trigger << 2) | polarity; 1030 mp_irq.irqflag = (trigger << 2) | polarity;
1050 mp_irq.mp_srcbus = MP_ISA_BUS; 1031 mp_irq.srcbus = MP_ISA_BUS;
1051 mp_irq.mp_srcbusirq = bus_irq; /* IRQ */ 1032 mp_irq.srcbusirq = bus_irq; /* IRQ */
1052 mp_irq.mp_dstapic = mp_ioapics[ioapic].mp_apicid; /* APIC ID */ 1033 mp_irq.dstapic = mp_ioapics[ioapic].apicid; /* APIC ID */
1053 mp_irq.mp_dstirq = pin; /* INTIN# */ 1034 mp_irq.dstirq = pin; /* INTIN# */
1054 1035
1055 save_mp_irq(&mp_irq); 1036 save_mp_irq(&mp_irq);
1056} 1037}
@@ -1060,7 +1041,7 @@ void __init mp_config_acpi_legacy_irqs(void)
1060 int i; 1041 int i;
1061 int ioapic; 1042 int ioapic;
1062 unsigned int dstapic; 1043 unsigned int dstapic;
1063 struct mp_config_intsrc mp_irq; 1044 struct mpc_intsrc mp_irq;
1064 1045
1065#if defined (CONFIG_MCA) || defined (CONFIG_EISA) 1046#if defined (CONFIG_MCA) || defined (CONFIG_EISA)
1066 /* 1047 /*
@@ -1085,7 +1066,7 @@ void __init mp_config_acpi_legacy_irqs(void)
1085 ioapic = mp_find_ioapic(0); 1066 ioapic = mp_find_ioapic(0);
1086 if (ioapic < 0) 1067 if (ioapic < 0)
1087 return; 1068 return;
1088 dstapic = mp_ioapics[ioapic].mp_apicid; 1069 dstapic = mp_ioapics[ioapic].apicid;
1089 1070
1090 /* 1071 /*
1091 * Use the default configuration for the IRQs 0-15. Unless 1072 * Use the default configuration for the IRQs 0-15. Unless
@@ -1095,16 +1076,14 @@ void __init mp_config_acpi_legacy_irqs(void)
1095 int idx; 1076 int idx;
1096 1077
1097 for (idx = 0; idx < mp_irq_entries; idx++) { 1078 for (idx = 0; idx < mp_irq_entries; idx++) {
1098 struct mp_config_intsrc *irq = mp_irqs + idx; 1079 struct mpc_intsrc *irq = mp_irqs + idx;
1099 1080
1100 /* Do we already have a mapping for this ISA IRQ? */ 1081 /* Do we already have a mapping for this ISA IRQ? */
1101 if (irq->mp_srcbus == MP_ISA_BUS 1082 if (irq->srcbus == MP_ISA_BUS && irq->srcbusirq == i)
1102 && irq->mp_srcbusirq == i)
1103 break; 1083 break;
1104 1084
1105 /* Do we already have a mapping for this IOAPIC pin */ 1085 /* Do we already have a mapping for this IOAPIC pin */
1106 if (irq->mp_dstapic == dstapic && 1086 if (irq->dstapic == dstapic && irq->dstirq == i)
1107 irq->mp_dstirq == i)
1108 break; 1087 break;
1109 } 1088 }
1110 1089
@@ -1113,13 +1092,13 @@ void __init mp_config_acpi_legacy_irqs(void)
1113 continue; /* IRQ already used */ 1092 continue; /* IRQ already used */
1114 } 1093 }
1115 1094
1116 mp_irq.mp_type = MP_INTSRC; 1095 mp_irq.type = MP_INTSRC;
1117 mp_irq.mp_irqflag = 0; /* Conforming */ 1096 mp_irq.irqflag = 0; /* Conforming */
1118 mp_irq.mp_srcbus = MP_ISA_BUS; 1097 mp_irq.srcbus = MP_ISA_BUS;
1119 mp_irq.mp_dstapic = dstapic; 1098 mp_irq.dstapic = dstapic;
1120 mp_irq.mp_irqtype = mp_INT; 1099 mp_irq.irqtype = mp_INT;
1121 mp_irq.mp_srcbusirq = i; /* Identity mapped */ 1100 mp_irq.srcbusirq = i; /* Identity mapped */
1122 mp_irq.mp_dstirq = i; 1101 mp_irq.dstirq = i;
1123 1102
1124 save_mp_irq(&mp_irq); 1103 save_mp_irq(&mp_irq);
1125 } 1104 }
@@ -1156,7 +1135,7 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity)
1156 return gsi; 1135 return gsi;
1157 } 1136 }
1158 1137
1159 ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_base; 1138 ioapic_pin = mp_find_ioapic_pin(ioapic, gsi);
1160 1139
1161#ifdef CONFIG_X86_32 1140#ifdef CONFIG_X86_32
1162 if (ioapic_renumber_irq) 1141 if (ioapic_renumber_irq)
@@ -1230,22 +1209,22 @@ int mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin,
1230 u32 gsi, int triggering, int polarity) 1209 u32 gsi, int triggering, int polarity)
1231{ 1210{
1232#ifdef CONFIG_X86_MPPARSE 1211#ifdef CONFIG_X86_MPPARSE
1233 struct mp_config_intsrc mp_irq; 1212 struct mpc_intsrc mp_irq;
1234 int ioapic; 1213 int ioapic;
1235 1214
1236 if (!acpi_ioapic) 1215 if (!acpi_ioapic)
1237 return 0; 1216 return 0;
1238 1217
1239 /* print the entry should happen on mptable identically */ 1218 /* print the entry should happen on mptable identically */
1240 mp_irq.mp_type = MP_INTSRC; 1219 mp_irq.type = MP_INTSRC;
1241 mp_irq.mp_irqtype = mp_INT; 1220 mp_irq.irqtype = mp_INT;
1242 mp_irq.mp_irqflag = (triggering == ACPI_EDGE_SENSITIVE ? 4 : 0x0c) | 1221 mp_irq.irqflag = (triggering == ACPI_EDGE_SENSITIVE ? 4 : 0x0c) |
1243 (polarity == ACPI_ACTIVE_HIGH ? 1 : 3); 1222 (polarity == ACPI_ACTIVE_HIGH ? 1 : 3);
1244 mp_irq.mp_srcbus = number; 1223 mp_irq.srcbus = number;
1245 mp_irq.mp_srcbusirq = (((devfn >> 3) & 0x1f) << 2) | ((pin - 1) & 3); 1224 mp_irq.srcbusirq = (((devfn >> 3) & 0x1f) << 2) | ((pin - 1) & 3);
1246 ioapic = mp_find_ioapic(gsi); 1225 ioapic = mp_find_ioapic(gsi);
1247 mp_irq.mp_dstapic = mp_ioapic_routing[ioapic].apic_id; 1226 mp_irq.dstapic = mp_ioapic_routing[ioapic].apic_id;
1248 mp_irq.mp_dstirq = gsi - mp_ioapic_routing[ioapic].gsi_base; 1227 mp_irq.dstirq = mp_find_ioapic_pin(ioapic, gsi);
1249 1228
1250 save_mp_irq(&mp_irq); 1229 save_mp_irq(&mp_irq);
1251#endif 1230#endif
@@ -1372,7 +1351,7 @@ static void __init acpi_process_madt(void)
1372 if (!error) { 1351 if (!error) {
1373 acpi_lapic = 1; 1352 acpi_lapic = 1;
1374 1353
1375#ifdef CONFIG_X86_GENERICARCH 1354#ifdef CONFIG_X86_BIGSMP
1376 generic_bigsmp_probe(); 1355 generic_bigsmp_probe();
1377#endif 1356#endif
1378 /* 1357 /*
@@ -1384,9 +1363,8 @@ static void __init acpi_process_madt(void)
1384 acpi_ioapic = 1; 1363 acpi_ioapic = 1;
1385 1364
1386 smp_found_config = 1; 1365 smp_found_config = 1;
1387#ifdef CONFIG_X86_32 1366 if (apic->setup_apic_routing)
1388 setup_apic_routing(); 1367 apic->setup_apic_routing();
1389#endif
1390 } 1368 }
1391 } 1369 }
1392 if (error == -EINVAL) { 1370 if (error == -EINVAL) {
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index a60c1f3bcb87..7c243a2c5115 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -101,6 +101,7 @@ int acpi_save_state_mem(void)
101 stack_start.sp = temp_stack + sizeof(temp_stack); 101 stack_start.sp = temp_stack + sizeof(temp_stack);
102 early_gdt_descr.address = 102 early_gdt_descr.address =
103 (unsigned long)get_cpu_gdt_table(smp_processor_id()); 103 (unsigned long)get_cpu_gdt_table(smp_processor_id());
104 initial_gs = per_cpu_offset(smp_processor_id());
104#endif 105#endif
105 initial_code = (unsigned long)wakeup_long64; 106 initial_code = (unsigned long)wakeup_long64;
106 saved_magic = 0x123456789abcdef0; 107 saved_magic = 0x123456789abcdef0;
diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c
index 115449f869ee..cf2ca19e62da 100644
--- a/arch/x86/kernel/apic.c
+++ b/arch/x86/kernel/apic.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Local APIC handling, local APIC timers 2 * Local APIC handling, local APIC timers
3 * 3 *
4 * (c) 1999, 2000 Ingo Molnar <mingo@redhat.com> 4 * (c) 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
5 * 5 *
6 * Fixes 6 * Fixes
7 * Maciej W. Rozycki : Bits for genuine 82489DX APICs; 7 * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
@@ -14,51 +14,71 @@
14 * Mikael Pettersson : PM converted to driver model. 14 * Mikael Pettersson : PM converted to driver model.
15 */ 15 */
16 16
17#include <linux/init.h>
18
19#include <linux/mm.h>
20#include <linux/delay.h>
21#include <linux/bootmem.h>
22#include <linux/interrupt.h>
23#include <linux/mc146818rtc.h>
24#include <linux/kernel_stat.h> 17#include <linux/kernel_stat.h>
25#include <linux/sysdev.h> 18#include <linux/mc146818rtc.h>
26#include <linux/ioport.h>
27#include <linux/cpu.h>
28#include <linux/clockchips.h>
29#include <linux/acpi_pmtmr.h> 19#include <linux/acpi_pmtmr.h>
20#include <linux/clockchips.h>
21#include <linux/interrupt.h>
22#include <linux/bootmem.h>
23#include <linux/ftrace.h>
24#include <linux/ioport.h>
30#include <linux/module.h> 25#include <linux/module.h>
31#include <linux/dmi.h> 26#include <linux/sysdev.h>
27#include <linux/delay.h>
28#include <linux/timex.h>
32#include <linux/dmar.h> 29#include <linux/dmar.h>
33#include <linux/ftrace.h> 30#include <linux/init.h>
34#include <linux/smp.h> 31#include <linux/cpu.h>
32#include <linux/dmi.h>
35#include <linux/nmi.h> 33#include <linux/nmi.h>
36#include <linux/timex.h> 34#include <linux/smp.h>
35#include <linux/mm.h>
37 36
38#include <asm/atomic.h>
39#include <asm/mtrr.h>
40#include <asm/mpspec.h>
41#include <asm/desc.h>
42#include <asm/arch_hooks.h> 37#include <asm/arch_hooks.h>
43#include <asm/hpet.h>
44#include <asm/pgalloc.h> 38#include <asm/pgalloc.h>
39#include <asm/genapic.h>
40#include <asm/atomic.h>
41#include <asm/mpspec.h>
45#include <asm/i8253.h> 42#include <asm/i8253.h>
46#include <asm/idle.h> 43#include <asm/i8259.h>
47#include <asm/proto.h> 44#include <asm/proto.h>
48#include <asm/apic.h> 45#include <asm/apic.h>
49#include <asm/i8259.h> 46#include <asm/desc.h>
47#include <asm/hpet.h>
48#include <asm/idle.h>
49#include <asm/mtrr.h>
50#include <asm/smp.h> 50#include <asm/smp.h>
51 51
52#include <mach_apic.h> 52unsigned int num_processors;
53#include <mach_apicdef.h> 53
54#include <mach_ipi.h> 54unsigned disabled_cpus __cpuinitdata;
55
56/* Processor that is doing the boot up */
57unsigned int boot_cpu_physical_apicid = -1U;
55 58
56/* 59/*
57 * Sanity check 60 * The highest APIC ID seen during enumeration.
61 *
62 * This determines the messaging protocol we can use: if all APIC IDs
63 * are in the 0 ... 7 range, then we can use logical addressing which
64 * has some performance advantages (better broadcasting).
65 *
66 * If there's an APIC ID above 8, we use physical addressing.
58 */ 67 */
59#if ((SPURIOUS_APIC_VECTOR & 0x0F) != 0x0F) 68unsigned int max_physical_apicid;
60# error SPURIOUS_APIC_VECTOR definition error 69
61#endif 70/*
71 * Bitmask of physically existing CPUs:
72 */
73physid_mask_t phys_cpu_present_map;
74
75/*
76 * Map cpu index to physical APIC ID
77 */
78DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID);
79DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID);
80EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
81EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
62 82
63#ifdef CONFIG_X86_32 83#ifdef CONFIG_X86_32
64/* 84/*
@@ -457,7 +477,7 @@ static void lapic_timer_setup(enum clock_event_mode mode,
457static void lapic_timer_broadcast(const struct cpumask *mask) 477static void lapic_timer_broadcast(const struct cpumask *mask)
458{ 478{
459#ifdef CONFIG_SMP 479#ifdef CONFIG_SMP
460 send_IPI_mask(mask, LOCAL_TIMER_VECTOR); 480 apic->send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
461#endif 481#endif
462} 482}
463 483
@@ -535,7 +555,8 @@ static void __init lapic_cal_handler(struct clock_event_device *dev)
535 } 555 }
536} 556}
537 557
538static int __init calibrate_by_pmtimer(long deltapm, long *delta) 558static int __init
559calibrate_by_pmtimer(long deltapm, long *delta, long *deltatsc)
539{ 560{
540 const long pm_100ms = PMTMR_TICKS_PER_SEC / 10; 561 const long pm_100ms = PMTMR_TICKS_PER_SEC / 10;
541 const long pm_thresh = pm_100ms / 100; 562 const long pm_thresh = pm_100ms / 100;
@@ -546,7 +567,7 @@ static int __init calibrate_by_pmtimer(long deltapm, long *delta)
546 return -1; 567 return -1;
547#endif 568#endif
548 569
549 apic_printk(APIC_VERBOSE, "... PM timer delta = %ld\n", deltapm); 570 apic_printk(APIC_VERBOSE, "... PM-Timer delta = %ld\n", deltapm);
550 571
551 /* Check, if the PM timer is available */ 572 /* Check, if the PM timer is available */
552 if (!deltapm) 573 if (!deltapm)
@@ -556,19 +577,30 @@ static int __init calibrate_by_pmtimer(long deltapm, long *delta)
556 577
557 if (deltapm > (pm_100ms - pm_thresh) && 578 if (deltapm > (pm_100ms - pm_thresh) &&
558 deltapm < (pm_100ms + pm_thresh)) { 579 deltapm < (pm_100ms + pm_thresh)) {
559 apic_printk(APIC_VERBOSE, "... PM timer result ok\n"); 580 apic_printk(APIC_VERBOSE, "... PM-Timer result ok\n");
560 } else { 581 return 0;
561 res = (((u64)deltapm) * mult) >> 22; 582 }
562 do_div(res, 1000000); 583
563 pr_warning("APIC calibration not consistent " 584 res = (((u64)deltapm) * mult) >> 22;
564 "with PM Timer: %ldms instead of 100ms\n", 585 do_div(res, 1000000);
565 (long)res); 586 pr_warning("APIC calibration not consistent "
566 /* Correct the lapic counter value */ 587 "with PM-Timer: %ldms instead of 100ms\n",(long)res);
567 res = (((u64)(*delta)) * pm_100ms); 588
589 /* Correct the lapic counter value */
590 res = (((u64)(*delta)) * pm_100ms);
591 do_div(res, deltapm);
592 pr_info("APIC delta adjusted to PM-Timer: "
593 "%lu (%ld)\n", (unsigned long)res, *delta);
594 *delta = (long)res;
595
596 /* Correct the tsc counter value */
597 if (cpu_has_tsc) {
598 res = (((u64)(*deltatsc)) * pm_100ms);
568 do_div(res, deltapm); 599 do_div(res, deltapm);
569 pr_info("APIC delta adjusted to PM-Timer: " 600 apic_printk(APIC_VERBOSE, "TSC delta adjusted to "
570 "%lu (%ld)\n", (unsigned long)res, *delta); 601 "PM-Timer: %lu (%ld) \n",
571 *delta = (long)res; 602 (unsigned long)res, *deltatsc);
603 *deltatsc = (long)res;
572 } 604 }
573 605
574 return 0; 606 return 0;
@@ -579,7 +611,7 @@ static int __init calibrate_APIC_clock(void)
579 struct clock_event_device *levt = &__get_cpu_var(lapic_events); 611 struct clock_event_device *levt = &__get_cpu_var(lapic_events);
580 void (*real_handler)(struct clock_event_device *dev); 612 void (*real_handler)(struct clock_event_device *dev);
581 unsigned long deltaj; 613 unsigned long deltaj;
582 long delta; 614 long delta, deltatsc;
583 int pm_referenced = 0; 615 int pm_referenced = 0;
584 616
585 local_irq_disable(); 617 local_irq_disable();
@@ -609,9 +641,11 @@ static int __init calibrate_APIC_clock(void)
609 delta = lapic_cal_t1 - lapic_cal_t2; 641 delta = lapic_cal_t1 - lapic_cal_t2;
610 apic_printk(APIC_VERBOSE, "... lapic delta = %ld\n", delta); 642 apic_printk(APIC_VERBOSE, "... lapic delta = %ld\n", delta);
611 643
644 deltatsc = (long)(lapic_cal_tsc2 - lapic_cal_tsc1);
645
612 /* we trust the PM based calibration if possible */ 646 /* we trust the PM based calibration if possible */
613 pm_referenced = !calibrate_by_pmtimer(lapic_cal_pm2 - lapic_cal_pm1, 647 pm_referenced = !calibrate_by_pmtimer(lapic_cal_pm2 - lapic_cal_pm1,
614 &delta); 648 &delta, &deltatsc);
615 649
616 /* Calculate the scaled math multiplication factor */ 650 /* Calculate the scaled math multiplication factor */
617 lapic_clockevent.mult = div_sc(delta, TICK_NSEC * LAPIC_CAL_LOOPS, 651 lapic_clockevent.mult = div_sc(delta, TICK_NSEC * LAPIC_CAL_LOOPS,
@@ -629,11 +663,10 @@ static int __init calibrate_APIC_clock(void)
629 calibration_result); 663 calibration_result);
630 664
631 if (cpu_has_tsc) { 665 if (cpu_has_tsc) {
632 delta = (long)(lapic_cal_tsc2 - lapic_cal_tsc1);
633 apic_printk(APIC_VERBOSE, "..... CPU clock speed is " 666 apic_printk(APIC_VERBOSE, "..... CPU clock speed is "
634 "%ld.%04ld MHz.\n", 667 "%ld.%04ld MHz.\n",
635 (delta / LAPIC_CAL_LOOPS) / (1000000 / HZ), 668 (deltatsc / LAPIC_CAL_LOOPS) / (1000000 / HZ),
636 (delta / LAPIC_CAL_LOOPS) % (1000000 / HZ)); 669 (deltatsc / LAPIC_CAL_LOOPS) % (1000000 / HZ));
637 } 670 }
638 671
639 apic_printk(APIC_VERBOSE, "..... host bus clock speed is " 672 apic_printk(APIC_VERBOSE, "..... host bus clock speed is "
@@ -991,11 +1024,11 @@ int __init verify_local_APIC(void)
991 */ 1024 */
992 reg0 = apic_read(APIC_ID); 1025 reg0 = apic_read(APIC_ID);
993 apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg0); 1026 apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg0);
994 apic_write(APIC_ID, reg0 ^ APIC_ID_MASK); 1027 apic_write(APIC_ID, reg0 ^ apic->apic_id_mask);
995 reg1 = apic_read(APIC_ID); 1028 reg1 = apic_read(APIC_ID);
996 apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg1); 1029 apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg1);
997 apic_write(APIC_ID, reg0); 1030 apic_write(APIC_ID, reg0);
998 if (reg1 != (reg0 ^ APIC_ID_MASK)) 1031 if (reg1 != (reg0 ^ apic->apic_id_mask))
999 return 0; 1032 return 0;
1000 1033
1001 /* 1034 /*
@@ -1089,7 +1122,7 @@ static void __cpuinit lapic_setup_esr(void)
1089 return; 1122 return;
1090 } 1123 }
1091 1124
1092 if (esr_disable) { 1125 if (apic->disable_esr) {
1093 /* 1126 /*
1094 * Something untraceable is creating bad interrupts on 1127 * Something untraceable is creating bad interrupts on
1095 * secondary quads ... for the moment, just leave the 1128 * secondary quads ... for the moment, just leave the
@@ -1130,9 +1163,14 @@ void __cpuinit setup_local_APIC(void)
1130 unsigned int value; 1163 unsigned int value;
1131 int i, j; 1164 int i, j;
1132 1165
1166 if (disable_apic) {
1167 arch_disable_smp_support();
1168 return;
1169 }
1170
1133#ifdef CONFIG_X86_32 1171#ifdef CONFIG_X86_32
1134 /* Pound the ESR really hard over the head with a big hammer - mbligh */ 1172 /* Pound the ESR really hard over the head with a big hammer - mbligh */
1135 if (lapic_is_integrated() && esr_disable) { 1173 if (lapic_is_integrated() && apic->disable_esr) {
1136 apic_write(APIC_ESR, 0); 1174 apic_write(APIC_ESR, 0);
1137 apic_write(APIC_ESR, 0); 1175 apic_write(APIC_ESR, 0);
1138 apic_write(APIC_ESR, 0); 1176 apic_write(APIC_ESR, 0);
@@ -1146,7 +1184,7 @@ void __cpuinit setup_local_APIC(void)
1146 * Double-check whether this APIC is really registered. 1184 * Double-check whether this APIC is really registered.
1147 * This is meaningless in clustered apic mode, so we skip it. 1185 * This is meaningless in clustered apic mode, so we skip it.
1148 */ 1186 */
1149 if (!apic_id_registered()) 1187 if (!apic->apic_id_registered())
1150 BUG(); 1188 BUG();
1151 1189
1152 /* 1190 /*
@@ -1154,7 +1192,7 @@ void __cpuinit setup_local_APIC(void)
1154 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel 1192 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
1155 * document number 292116). So here it goes... 1193 * document number 292116). So here it goes...
1156 */ 1194 */
1157 init_apic_ldr(); 1195 apic->init_apic_ldr();
1158 1196
1159 /* 1197 /*
1160 * Set Task Priority to 'accept all'. We never change this 1198 * Set Task Priority to 'accept all'. We never change this
@@ -1570,11 +1608,11 @@ int apic_version[MAX_APICS];
1570 1608
1571int __init APIC_init_uniprocessor(void) 1609int __init APIC_init_uniprocessor(void)
1572{ 1610{
1573#ifdef CONFIG_X86_64
1574 if (disable_apic) { 1611 if (disable_apic) {
1575 pr_info("Apic disabled\n"); 1612 pr_info("Apic disabled\n");
1576 return -1; 1613 return -1;
1577 } 1614 }
1615#ifdef CONFIG_X86_64
1578 if (!cpu_has_apic) { 1616 if (!cpu_has_apic) {
1579 disable_apic = 1; 1617 disable_apic = 1;
1580 pr_info("Apic disabled by BIOS\n"); 1618 pr_info("Apic disabled by BIOS\n");
@@ -1600,7 +1638,7 @@ int __init APIC_init_uniprocessor(void)
1600 enable_IR_x2apic(); 1638 enable_IR_x2apic();
1601#endif 1639#endif
1602#ifdef CONFIG_X86_64 1640#ifdef CONFIG_X86_64
1603 setup_apic_routing(); 1641 default_setup_apic_routing();
1604#endif 1642#endif
1605 1643
1606 verify_local_APIC(); 1644 verify_local_APIC();
@@ -1738,7 +1776,8 @@ void __init connect_bsp_APIC(void)
1738 outb(0x01, 0x23); 1776 outb(0x01, 0x23);
1739 } 1777 }
1740#endif 1778#endif
1741 enable_apic_mode(); 1779 if (apic->enable_apic_mode)
1780 apic->enable_apic_mode();
1742} 1781}
1743 1782
1744/** 1783/**
@@ -1876,29 +1915,39 @@ void __cpuinit generic_processor_info(int apicid, int version)
1876 } 1915 }
1877#endif 1916#endif
1878 1917
1879#if defined(CONFIG_X86_SMP) || defined(CONFIG_X86_64) 1918#if defined(CONFIG_SMP) || defined(CONFIG_X86_64)
1880 /* are we being called early in kernel startup? */ 1919 early_per_cpu(x86_cpu_to_apicid, cpu) = apicid;
1881 if (early_per_cpu_ptr(x86_cpu_to_apicid)) { 1920 early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
1882 u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
1883 u16 *bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
1884
1885 cpu_to_apicid[cpu] = apicid;
1886 bios_cpu_apicid[cpu] = apicid;
1887 } else {
1888 per_cpu(x86_cpu_to_apicid, cpu) = apicid;
1889 per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
1890 }
1891#endif 1921#endif
1892 1922
1893 set_cpu_possible(cpu, true); 1923 set_cpu_possible(cpu, true);
1894 set_cpu_present(cpu, true); 1924 set_cpu_present(cpu, true);
1895} 1925}
1896 1926
1897#ifdef CONFIG_X86_64
1898int hard_smp_processor_id(void) 1927int hard_smp_processor_id(void)
1899{ 1928{
1900 return read_apic_id(); 1929 return read_apic_id();
1901} 1930}
1931
1932void default_init_apic_ldr(void)
1933{
1934 unsigned long val;
1935
1936 apic_write(APIC_DFR, APIC_DFR_VALUE);
1937 val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
1938 val |= SET_APIC_LOGICAL_ID(1UL << smp_processor_id());
1939 apic_write(APIC_LDR, val);
1940}
1941
1942#ifdef CONFIG_X86_32
1943int default_apicid_to_node(int logical_apicid)
1944{
1945#ifdef CONFIG_SMP
1946 return apicid_2_node[hard_smp_processor_id()];
1947#else
1948 return 0;
1949#endif
1950}
1902#endif 1951#endif
1903 1952
1904/* 1953/*
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index 98807bb095ad..37ba5f85b718 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -301,7 +301,7 @@ extern int (*console_blank_hook)(int);
301 */ 301 */
302#define APM_ZERO_SEGS 302#define APM_ZERO_SEGS
303 303
304#include "apm.h" 304#include <asm/apm.h>
305 305
306/* 306/*
307 * Define to re-initialize the interrupt 0 timer to 100 Hz after a suspend. 307 * Define to re-initialize the interrupt 0 timer to 100 Hz after a suspend.
diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
index ee4df08feee6..fbf2f33e3080 100644
--- a/arch/x86/kernel/asm-offsets_32.c
+++ b/arch/x86/kernel/asm-offsets_32.c
@@ -75,6 +75,7 @@ void foo(void)
75 OFFSET(PT_DS, pt_regs, ds); 75 OFFSET(PT_DS, pt_regs, ds);
76 OFFSET(PT_ES, pt_regs, es); 76 OFFSET(PT_ES, pt_regs, es);
77 OFFSET(PT_FS, pt_regs, fs); 77 OFFSET(PT_FS, pt_regs, fs);
78 OFFSET(PT_GS, pt_regs, gs);
78 OFFSET(PT_ORIG_EAX, pt_regs, orig_ax); 79 OFFSET(PT_ORIG_EAX, pt_regs, orig_ax);
79 OFFSET(PT_EIP, pt_regs, ip); 80 OFFSET(PT_EIP, pt_regs, ip);
80 OFFSET(PT_CS, pt_regs, cs); 81 OFFSET(PT_CS, pt_regs, cs);
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
index 1d41d3f1edbc..8793ab33e2c1 100644
--- a/arch/x86/kernel/asm-offsets_64.c
+++ b/arch/x86/kernel/asm-offsets_64.c
@@ -11,7 +11,6 @@
11#include <linux/hardirq.h> 11#include <linux/hardirq.h>
12#include <linux/suspend.h> 12#include <linux/suspend.h>
13#include <linux/kbuild.h> 13#include <linux/kbuild.h>
14#include <asm/pda.h>
15#include <asm/processor.h> 14#include <asm/processor.h>
16#include <asm/segment.h> 15#include <asm/segment.h>
17#include <asm/thread_info.h> 16#include <asm/thread_info.h>
@@ -48,16 +47,6 @@ int main(void)
48#endif 47#endif
49 BLANK(); 48 BLANK();
50#undef ENTRY 49#undef ENTRY
51#define ENTRY(entry) DEFINE(pda_ ## entry, offsetof(struct x8664_pda, entry))
52 ENTRY(kernelstack);
53 ENTRY(oldrsp);
54 ENTRY(pcurrent);
55 ENTRY(irqcount);
56 ENTRY(cpunumber);
57 ENTRY(irqstackptr);
58 ENTRY(data_offset);
59 BLANK();
60#undef ENTRY
61#ifdef CONFIG_PARAVIRT 50#ifdef CONFIG_PARAVIRT
62 BLANK(); 51 BLANK();
63 OFFSET(PARAVIRT_enabled, pv_info, paravirt_enabled); 52 OFFSET(PARAVIRT_enabled, pv_info, paravirt_enabled);
diff --git a/arch/x86/kernel/bigsmp_32.c b/arch/x86/kernel/bigsmp_32.c
new file mode 100644
index 000000000000..47a62f46afdb
--- /dev/null
+++ b/arch/x86/kernel/bigsmp_32.c
@@ -0,0 +1,266 @@
1/*
2 * APIC driver for "bigsmp" XAPIC machines with more than 8 virtual CPUs.
3 * Drives the local APIC in "clustered mode".
4 */
5#define APIC_DEFINITION 1
6#include <linux/threads.h>
7#include <linux/cpumask.h>
8#include <asm/mpspec.h>
9#include <asm/genapic.h>
10#include <asm/fixmap.h>
11#include <asm/apicdef.h>
12#include <asm/ipi.h>
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/dmi.h>
16#include <linux/smp.h>
17
18
19static inline unsigned bigsmp_get_apic_id(unsigned long x)
20{
21 return (x >> 24) & 0xFF;
22}
23
24#define xapic_phys_to_log_apicid(cpu) (per_cpu(x86_bios_cpu_apicid, cpu))
25
26static inline int bigsmp_apic_id_registered(void)
27{
28 return 1;
29}
30
31static inline const cpumask_t *bigsmp_target_cpus(void)
32{
33#ifdef CONFIG_SMP
34 return &cpu_online_map;
35#else
36 return &cpumask_of_cpu(0);
37#endif
38}
39
40#define APIC_DFR_VALUE (APIC_DFR_FLAT)
41
42static inline unsigned long
43bigsmp_check_apicid_used(physid_mask_t bitmap, int apicid)
44{
45 return 0;
46}
47
48static inline unsigned long bigsmp_check_apicid_present(int bit)
49{
50 return 1;
51}
52
53static inline unsigned long calculate_ldr(int cpu)
54{
55 unsigned long val, id;
56 val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
57 id = xapic_phys_to_log_apicid(cpu);
58 val |= SET_APIC_LOGICAL_ID(id);
59 return val;
60}
61
62/*
63 * Set up the logical destination ID.
64 *
65 * Intel recommends to set DFR, LDR and TPR before enabling
66 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
67 * document number 292116). So here it goes...
68 */
69static inline void bigsmp_init_apic_ldr(void)
70{
71 unsigned long val;
72 int cpu = smp_processor_id();
73
74 apic_write(APIC_DFR, APIC_DFR_VALUE);
75 val = calculate_ldr(cpu);
76 apic_write(APIC_LDR, val);
77}
78
79static inline void bigsmp_setup_apic_routing(void)
80{
81 printk("Enabling APIC mode: %s. Using %d I/O APICs\n",
82 "Physflat", nr_ioapics);
83}
84
85static inline int bigsmp_apicid_to_node(int logical_apicid)
86{
87 return apicid_2_node[hard_smp_processor_id()];
88}
89
90static inline int bigsmp_cpu_present_to_apicid(int mps_cpu)
91{
92 if (mps_cpu < nr_cpu_ids)
93 return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
94
95 return BAD_APICID;
96}
97
98static inline physid_mask_t bigsmp_apicid_to_cpu_present(int phys_apicid)
99{
100 return physid_mask_of_physid(phys_apicid);
101}
102
103extern u8 cpu_2_logical_apicid[];
104/* Mapping from cpu number to logical apicid */
105static inline int bigsmp_cpu_to_logical_apicid(int cpu)
106{
107 if (cpu >= nr_cpu_ids)
108 return BAD_APICID;
109 return cpu_physical_id(cpu);
110}
111
112static inline physid_mask_t bigsmp_ioapic_phys_id_map(physid_mask_t phys_map)
113{
114 /* For clustered we don't have a good way to do this yet - hack */
115 return physids_promote(0xFFL);
116}
117
118static inline void bigsmp_setup_portio_remap(void)
119{
120}
121
122static inline int bigsmp_check_phys_apicid_present(int boot_cpu_physical_apicid)
123{
124 return 1;
125}
126
127/* As we are using single CPU as destination, pick only one CPU here */
128static inline unsigned int bigsmp_cpu_mask_to_apicid(const cpumask_t *cpumask)
129{
130 return bigsmp_cpu_to_logical_apicid(first_cpu(*cpumask));
131}
132
133static inline unsigned int
134bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
135 const struct cpumask *andmask)
136{
137 int cpu;
138
139 /*
140 * We're using fixed IRQ delivery, can only return one phys APIC ID.
141 * May as well be the first.
142 */
143 for_each_cpu_and(cpu, cpumask, andmask) {
144 if (cpumask_test_cpu(cpu, cpu_online_mask))
145 break;
146 }
147 if (cpu < nr_cpu_ids)
148 return bigsmp_cpu_to_logical_apicid(cpu);
149
150 return BAD_APICID;
151}
152
153static inline int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb)
154{
155 return cpuid_apic >> index_msb;
156}
157
158static inline void bigsmp_send_IPI_mask(const struct cpumask *mask, int vector)
159{
160 default_send_IPI_mask_sequence_phys(mask, vector);
161}
162
163static inline void bigsmp_send_IPI_allbutself(int vector)
164{
165 default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector);
166}
167
168static inline void bigsmp_send_IPI_all(int vector)
169{
170 bigsmp_send_IPI_mask(cpu_online_mask, vector);
171}
172
173static int dmi_bigsmp; /* can be set by dmi scanners */
174
175static int hp_ht_bigsmp(const struct dmi_system_id *d)
176{
177 printk(KERN_NOTICE "%s detected: force use of apic=bigsmp\n", d->ident);
178 dmi_bigsmp = 1;
179 return 0;
180}
181
182
183static const struct dmi_system_id bigsmp_dmi_table[] = {
184 { hp_ht_bigsmp, "HP ProLiant DL760 G2",
185 { DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
186 DMI_MATCH(DMI_BIOS_VERSION, "P44-"),}
187 },
188
189 { hp_ht_bigsmp, "HP ProLiant DL740",
190 { DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
191 DMI_MATCH(DMI_BIOS_VERSION, "P47-"),}
192 },
193 { }
194};
195
196static void bigsmp_vector_allocation_domain(int cpu, cpumask_t *retmask)
197{
198 cpus_clear(*retmask);
199 cpu_set(cpu, *retmask);
200}
201
202static int probe_bigsmp(void)
203{
204 if (def_to_bigsmp)
205 dmi_bigsmp = 1;
206 else
207 dmi_check_system(bigsmp_dmi_table);
208 return dmi_bigsmp;
209}
210
211struct genapic apic_bigsmp = {
212
213 .name = "bigsmp",
214 .probe = probe_bigsmp,
215 .acpi_madt_oem_check = NULL,
216 .apic_id_registered = bigsmp_apic_id_registered,
217
218 .irq_delivery_mode = dest_Fixed,
219 /* phys delivery to target CPU: */
220 .irq_dest_mode = 0,
221
222 .target_cpus = bigsmp_target_cpus,
223 .disable_esr = 1,
224 .dest_logical = 0,
225 .check_apicid_used = bigsmp_check_apicid_used,
226 .check_apicid_present = bigsmp_check_apicid_present,
227
228 .vector_allocation_domain = bigsmp_vector_allocation_domain,
229 .init_apic_ldr = bigsmp_init_apic_ldr,
230
231 .ioapic_phys_id_map = bigsmp_ioapic_phys_id_map,
232 .setup_apic_routing = bigsmp_setup_apic_routing,
233 .multi_timer_check = NULL,
234 .apicid_to_node = bigsmp_apicid_to_node,
235 .cpu_to_logical_apicid = bigsmp_cpu_to_logical_apicid,
236 .cpu_present_to_apicid = bigsmp_cpu_present_to_apicid,
237 .apicid_to_cpu_present = bigsmp_apicid_to_cpu_present,
238 .setup_portio_remap = NULL,
239 .check_phys_apicid_present = bigsmp_check_phys_apicid_present,
240 .enable_apic_mode = NULL,
241 .phys_pkg_id = bigsmp_phys_pkg_id,
242 .mps_oem_check = NULL,
243
244 .get_apic_id = bigsmp_get_apic_id,
245 .set_apic_id = NULL,
246 .apic_id_mask = 0xFF << 24,
247
248 .cpu_mask_to_apicid = bigsmp_cpu_mask_to_apicid,
249 .cpu_mask_to_apicid_and = bigsmp_cpu_mask_to_apicid_and,
250
251 .send_IPI_mask = bigsmp_send_IPI_mask,
252 .send_IPI_mask_allbutself = NULL,
253 .send_IPI_allbutself = bigsmp_send_IPI_allbutself,
254 .send_IPI_all = bigsmp_send_IPI_all,
255 .send_IPI_self = default_send_IPI_self,
256
257 .wakeup_cpu = NULL,
258 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
259 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
260
261 .wait_for_init_deassert = default_wait_for_init_deassert,
262
263 .smp_callin_clear_local_apic = NULL,
264 .store_NMI_vector = NULL,
265 .inquire_remote_apic = default_inquire_remote_apic,
266};
diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c
index 2cf23634b6d9..e48640cfac0c 100644
--- a/arch/x86/kernel/cpu/addon_cpuid_features.c
+++ b/arch/x86/kernel/cpu/addon_cpuid_features.c
@@ -7,7 +7,7 @@
7#include <asm/pat.h> 7#include <asm/pat.h>
8#include <asm/processor.h> 8#include <asm/processor.h>
9 9
10#include <mach_apic.h> 10#include <asm/genapic.h>
11 11
12struct cpuid_bit { 12struct cpuid_bit {
13 u16 feature; 13 u16 feature;
@@ -69,7 +69,7 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
69 */ 69 */
70void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c) 70void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c)
71{ 71{
72#ifdef CONFIG_X86_SMP 72#ifdef CONFIG_SMP
73 unsigned int eax, ebx, ecx, edx, sub_index; 73 unsigned int eax, ebx, ecx, edx, sub_index;
74 unsigned int ht_mask_width, core_plus_mask_width; 74 unsigned int ht_mask_width, core_plus_mask_width;
75 unsigned int core_select_mask, core_level_siblings; 75 unsigned int core_select_mask, core_level_siblings;
@@ -116,22 +116,14 @@ void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c)
116 116
117 core_select_mask = (~(-1 << core_plus_mask_width)) >> ht_mask_width; 117 core_select_mask = (~(-1 << core_plus_mask_width)) >> ht_mask_width;
118 118
119#ifdef CONFIG_X86_32 119 c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, ht_mask_width)
120 c->cpu_core_id = phys_pkg_id(c->initial_apicid, ht_mask_width)
121 & core_select_mask; 120 & core_select_mask;
122 c->phys_proc_id = phys_pkg_id(c->initial_apicid, core_plus_mask_width); 121 c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, core_plus_mask_width);
123 /* 122 /*
124 * Reinit the apicid, now that we have extended initial_apicid. 123 * Reinit the apicid, now that we have extended initial_apicid.
125 */ 124 */
126 c->apicid = phys_pkg_id(c->initial_apicid, 0); 125 c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
127#else 126
128 c->cpu_core_id = phys_pkg_id(ht_mask_width) & core_select_mask;
129 c->phys_proc_id = phys_pkg_id(core_plus_mask_width);
130 /*
131 * Reinit the apicid, now that we have extended initial_apicid.
132 */
133 c->apicid = phys_pkg_id(0);
134#endif
135 c->x86_max_cores = (core_level_siblings / smp_num_siblings); 127 c->x86_max_cores = (core_level_siblings / smp_num_siblings);
136 128
137 129
@@ -143,37 +135,3 @@ void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c)
143 return; 135 return;
144#endif 136#endif
145} 137}
146
147#ifdef CONFIG_X86_PAT
148void __cpuinit validate_pat_support(struct cpuinfo_x86 *c)
149{
150 if (!cpu_has_pat)
151 pat_disable("PAT not supported by CPU.");
152
153 switch (c->x86_vendor) {
154 case X86_VENDOR_INTEL:
155 /*
156 * There is a known erratum on Pentium III and Core Solo
157 * and Core Duo CPUs.
158 * " Page with PAT set to WC while associated MTRR is UC
159 * may consolidate to UC "
160 * Because of this erratum, it is better to stick with
161 * setting WC in MTRR rather than using PAT on these CPUs.
162 *
163 * Enable PAT WC only on P4, Core 2 or later CPUs.
164 */
165 if (c->x86 > 0x6 || (c->x86 == 6 && c->x86_model >= 15))
166 return;
167
168 pat_disable("PAT WC disabled due to known CPU erratum.");
169 return;
170
171 case X86_VENDOR_AMD:
172 case X86_VENDOR_CENTAUR:
173 case X86_VENDOR_TRANSMETA:
174 return;
175 }
176
177 pat_disable("PAT disabled. Not yet verified on this CPU type.");
178}
179#endif
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 7c878f6aa919..ff4d7b9e32e4 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -12,7 +12,7 @@
12# include <asm/cacheflush.h> 12# include <asm/cacheflush.h>
13#endif 13#endif
14 14
15#include <mach_apic.h> 15#include <asm/genapic.h>
16 16
17#include "cpu.h" 17#include "cpu.h"
18 18
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 83492b1f93b1..e8f4a386bd9d 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -21,14 +21,16 @@
21#include <asm/asm.h> 21#include <asm/asm.h>
22#include <asm/numa.h> 22#include <asm/numa.h>
23#include <asm/smp.h> 23#include <asm/smp.h>
24#include <asm/cpu.h>
25#include <asm/cpumask.h>
24#ifdef CONFIG_X86_LOCAL_APIC 26#ifdef CONFIG_X86_LOCAL_APIC
25#include <asm/mpspec.h> 27#include <asm/mpspec.h>
26#include <asm/apic.h> 28#include <asm/apic.h>
27#include <mach_apic.h>
28#include <asm/genapic.h> 29#include <asm/genapic.h>
30#include <asm/genapic.h>
31#include <asm/uv/uv.h>
29#endif 32#endif
30 33
31#include <asm/pda.h>
32#include <asm/pgtable.h> 34#include <asm/pgtable.h>
33#include <asm/processor.h> 35#include <asm/processor.h>
34#include <asm/desc.h> 36#include <asm/desc.h>
@@ -37,6 +39,7 @@
37#include <asm/sections.h> 39#include <asm/sections.h>
38#include <asm/setup.h> 40#include <asm/setup.h>
39#include <asm/hypervisor.h> 41#include <asm/hypervisor.h>
42#include <asm/stackprotector.h>
40 43
41#include "cpu.h" 44#include "cpu.h"
42 45
@@ -50,6 +53,15 @@ cpumask_var_t cpu_initialized_mask;
50/* representing cpus for which sibling maps can be computed */ 53/* representing cpus for which sibling maps can be computed */
51cpumask_var_t cpu_sibling_setup_mask; 54cpumask_var_t cpu_sibling_setup_mask;
52 55
56/* correctly size the local cpu masks */
57void __init setup_cpu_local_masks(void)
58{
59 alloc_bootmem_cpumask_var(&cpu_initialized_mask);
60 alloc_bootmem_cpumask_var(&cpu_callin_mask);
61 alloc_bootmem_cpumask_var(&cpu_callout_mask);
62 alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
63}
64
53#else /* CONFIG_X86_32 */ 65#else /* CONFIG_X86_32 */
54 66
55cpumask_t cpu_callin_map; 67cpumask_t cpu_callin_map;
@@ -62,23 +74,23 @@ cpumask_t cpu_sibling_setup_map;
62 74
63static struct cpu_dev *this_cpu __cpuinitdata; 75static struct cpu_dev *this_cpu __cpuinitdata;
64 76
77DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
65#ifdef CONFIG_X86_64 78#ifdef CONFIG_X86_64
66/* We need valid kernel segments for data and code in long mode too 79 /*
67 * IRET will check the segment types kkeil 2000/10/28 80 * We need valid kernel segments for data and code in long mode too
68 * Also sysret mandates a special GDT layout 81 * IRET will check the segment types kkeil 2000/10/28
69 */ 82 * Also sysret mandates a special GDT layout
70/* The TLS descriptors are currently at a different place compared to i386. 83 *
71 Hopefully nobody expects them at a fixed place (Wine?) */ 84 * The TLS descriptors are currently at a different place compared to i386.
72DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = { 85 * Hopefully nobody expects them at a fixed place (Wine?)
86 */
73 [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } }, 87 [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } },
74 [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } }, 88 [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } },
75 [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } }, 89 [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } },
76 [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } }, 90 [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } },
77 [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } }, 91 [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } },
78 [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } }, 92 [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } },
79} };
80#else 93#else
81DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
82 [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } }, 94 [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } },
83 [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } }, 95 [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } },
84 [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } }, 96 [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } },
@@ -110,9 +122,10 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
110 [GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } }, 122 [GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } },
111 123
112 [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } }, 124 [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } },
113 [GDT_ENTRY_PERCPU] = { { { 0x00000000, 0x00000000 } } }, 125 [GDT_ENTRY_PERCPU] = { { { 0x0000ffff, 0x00cf9200 } } },
114} }; 126 GDT_STACK_CANARY_INIT
115#endif 127#endif
128} };
116EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); 129EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
117 130
118#ifdef CONFIG_X86_32 131#ifdef CONFIG_X86_32
@@ -213,6 +226,49 @@ static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
213#endif 226#endif
214 227
215/* 228/*
229 * Some CPU features depend on higher CPUID levels, which may not always
230 * be available due to CPUID level capping or broken virtualization
231 * software. Add those features to this table to auto-disable them.
232 */
233struct cpuid_dependent_feature {
234 u32 feature;
235 u32 level;
236};
237static const struct cpuid_dependent_feature __cpuinitconst
238cpuid_dependent_features[] = {
239 { X86_FEATURE_MWAIT, 0x00000005 },
240 { X86_FEATURE_DCA, 0x00000009 },
241 { X86_FEATURE_XSAVE, 0x0000000d },
242 { 0, 0 }
243};
244
245static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
246{
247 const struct cpuid_dependent_feature *df;
248 for (df = cpuid_dependent_features; df->feature; df++) {
249 /*
250 * Note: cpuid_level is set to -1 if unavailable, but
251 * extended_extended_level is set to 0 if unavailable
252 * and the legitimate extended levels are all negative
253 * when signed; hence the weird messing around with
254 * signs here...
255 */
256 if (cpu_has(c, df->feature) &&
257 ((s32)df->feature < 0 ?
258 (u32)df->feature > (u32)c->extended_cpuid_level :
259 (s32)df->feature > (s32)c->cpuid_level)) {
260 clear_cpu_cap(c, df->feature);
261 if (warn)
262 printk(KERN_WARNING
263 "CPU: CPU feature %s disabled "
264 "due to lack of CPUID level 0x%x\n",
265 x86_cap_flags[df->feature],
266 df->level);
267 }
268 }
269}
270
271/*
216 * Naming convention should be: <Name> [(<Codename>)] 272 * Naming convention should be: <Name> [(<Codename>)]
217 * This table only is used unless init_<vendor>() below doesn't set it; 273 * This table only is used unless init_<vendor>() below doesn't set it;
218 * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used 274 * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used
@@ -242,18 +298,29 @@ static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c)
242 298
243__u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata; 299__u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata;
244 300
301void load_percpu_segment(int cpu)
302{
303#ifdef CONFIG_X86_32
304 loadsegment(fs, __KERNEL_PERCPU);
305#else
306 loadsegment(gs, 0);
307 wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu));
308#endif
309 load_stack_canary_segment();
310}
311
245/* Current gdt points %fs at the "master" per-cpu area: after this, 312/* Current gdt points %fs at the "master" per-cpu area: after this,
246 * it's on the real one. */ 313 * it's on the real one. */
247void switch_to_new_gdt(void) 314void switch_to_new_gdt(int cpu)
248{ 315{
249 struct desc_ptr gdt_descr; 316 struct desc_ptr gdt_descr;
250 317
251 gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id()); 318 gdt_descr.address = (long)get_cpu_gdt_table(cpu);
252 gdt_descr.size = GDT_SIZE - 1; 319 gdt_descr.size = GDT_SIZE - 1;
253 load_gdt(&gdt_descr); 320 load_gdt(&gdt_descr);
254#ifdef CONFIG_X86_32 321 /* Reload the per-cpu base */
255 asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory"); 322
256#endif 323 load_percpu_segment(cpu);
257} 324}
258 325
259static struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; 326static struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
@@ -383,11 +450,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
383 } 450 }
384 451
385 index_msb = get_count_order(smp_num_siblings); 452 index_msb = get_count_order(smp_num_siblings);
386#ifdef CONFIG_X86_64 453 c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
387 c->phys_proc_id = phys_pkg_id(index_msb);
388#else
389 c->phys_proc_id = phys_pkg_id(c->initial_apicid, index_msb);
390#endif
391 454
392 smp_num_siblings = smp_num_siblings / c->x86_max_cores; 455 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
393 456
@@ -395,13 +458,8 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
395 458
396 core_bits = get_count_order(c->x86_max_cores); 459 core_bits = get_count_order(c->x86_max_cores);
397 460
398#ifdef CONFIG_X86_64 461 c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
399 c->cpu_core_id = phys_pkg_id(index_msb) &
400 ((1 << core_bits) - 1);
401#else
402 c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) &
403 ((1 << core_bits) - 1); 462 ((1 << core_bits) - 1);
404#endif
405 } 463 }
406 464
407out: 465out:
@@ -570,11 +628,10 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
570 if (this_cpu->c_early_init) 628 if (this_cpu->c_early_init)
571 this_cpu->c_early_init(c); 629 this_cpu->c_early_init(c);
572 630
573 validate_pat_support(c);
574
575#ifdef CONFIG_SMP 631#ifdef CONFIG_SMP
576 c->cpu_index = boot_cpu_id; 632 c->cpu_index = boot_cpu_id;
577#endif 633#endif
634 filter_cpuid_features(c, false);
578} 635}
579 636
580void __init early_cpu_init(void) 637void __init early_cpu_init(void)
@@ -637,7 +694,7 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
637 c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF; 694 c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF;
638#ifdef CONFIG_X86_32 695#ifdef CONFIG_X86_32
639# ifdef CONFIG_X86_HT 696# ifdef CONFIG_X86_HT
640 c->apicid = phys_pkg_id(c->initial_apicid, 0); 697 c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
641# else 698# else
642 c->apicid = c->initial_apicid; 699 c->apicid = c->initial_apicid;
643# endif 700# endif
@@ -684,7 +741,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
684 this_cpu->c_identify(c); 741 this_cpu->c_identify(c);
685 742
686#ifdef CONFIG_X86_64 743#ifdef CONFIG_X86_64
687 c->apicid = phys_pkg_id(0); 744 c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
688#endif 745#endif
689 746
690 /* 747 /*
@@ -708,6 +765,9 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
708 * we do "generic changes." 765 * we do "generic changes."
709 */ 766 */
710 767
768 /* Filter out anything that depends on CPUID levels we don't have */
769 filter_cpuid_features(c, true);
770
711 /* If the model name is still unset, do table lookup. */ 771 /* If the model name is still unset, do table lookup. */
712 if (!c->x86_model_id[0]) { 772 if (!c->x86_model_id[0]) {
713 char *p; 773 char *p;
@@ -877,54 +937,22 @@ static __init int setup_disablecpuid(char *arg)
877__setup("clearcpuid=", setup_disablecpuid); 937__setup("clearcpuid=", setup_disablecpuid);
878 938
879#ifdef CONFIG_X86_64 939#ifdef CONFIG_X86_64
880struct x8664_pda **_cpu_pda __read_mostly;
881EXPORT_SYMBOL(_cpu_pda);
882
883struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table }; 940struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
884 941
885static char boot_cpu_stack[IRQSTACKSIZE] __page_aligned_bss; 942DEFINE_PER_CPU_FIRST(union irq_stack_union,
886 943 irq_stack_union) __aligned(PAGE_SIZE);
887void __cpuinit pda_init(int cpu) 944DEFINE_PER_CPU(char *, irq_stack_ptr) =
888{ 945 init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64;
889 struct x8664_pda *pda = cpu_pda(cpu);
890 946
891 /* Setup up data that may be needed in __get_free_pages early */ 947DEFINE_PER_CPU(unsigned long, kernel_stack) =
892 loadsegment(fs, 0); 948 (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
893 loadsegment(gs, 0); 949EXPORT_PER_CPU_SYMBOL(kernel_stack);
894 /* Memory clobbers used to order PDA accessed */
895 mb();
896 wrmsrl(MSR_GS_BASE, pda);
897 mb();
898
899 pda->cpunumber = cpu;
900 pda->irqcount = -1;
901 pda->kernelstack = (unsigned long)stack_thread_info() -
902 PDA_STACKOFFSET + THREAD_SIZE;
903 pda->active_mm = &init_mm;
904 pda->mmu_state = 0;
905
906 if (cpu == 0) {
907 /* others are initialized in smpboot.c */
908 pda->pcurrent = &init_task;
909 pda->irqstackptr = boot_cpu_stack;
910 pda->irqstackptr += IRQSTACKSIZE - 64;
911 } else {
912 if (!pda->irqstackptr) {
913 pda->irqstackptr = (char *)
914 __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER);
915 if (!pda->irqstackptr)
916 panic("cannot allocate irqstack for cpu %d",
917 cpu);
918 pda->irqstackptr += IRQSTACKSIZE - 64;
919 }
920 950
921 if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE) 951DEFINE_PER_CPU(unsigned int, irq_count) = -1;
922 pda->nodenumber = cpu_to_node(cpu);
923 }
924}
925 952
926static char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + 953static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
927 DEBUG_STKSZ] __page_aligned_bss; 954 [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ])
955 __aligned(PAGE_SIZE);
928 956
929extern asmlinkage void ignore_sysret(void); 957extern asmlinkage void ignore_sysret(void);
930 958
@@ -957,16 +985,21 @@ unsigned long kernel_eflags;
957 */ 985 */
958DEFINE_PER_CPU(struct orig_ist, orig_ist); 986DEFINE_PER_CPU(struct orig_ist, orig_ist);
959 987
960#else 988#else /* x86_64 */
989
990#ifdef CONFIG_CC_STACKPROTECTOR
991DEFINE_PER_CPU(unsigned long, stack_canary);
992#endif
961 993
962/* Make sure %fs is initialized properly in idle threads */ 994/* Make sure %fs and %gs are initialized properly in idle threads */
963struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs) 995struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
964{ 996{
965 memset(regs, 0, sizeof(struct pt_regs)); 997 memset(regs, 0, sizeof(struct pt_regs));
966 regs->fs = __KERNEL_PERCPU; 998 regs->fs = __KERNEL_PERCPU;
999 regs->gs = __KERNEL_STACK_CANARY;
967 return regs; 1000 return regs;
968} 1001}
969#endif 1002#endif /* x86_64 */
970 1003
971/* 1004/*
972 * cpu_init() initializes state that is per-CPU. Some data is already 1005 * cpu_init() initializes state that is per-CPU. Some data is already
@@ -982,15 +1015,14 @@ void __cpuinit cpu_init(void)
982 struct tss_struct *t = &per_cpu(init_tss, cpu); 1015 struct tss_struct *t = &per_cpu(init_tss, cpu);
983 struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu); 1016 struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu);
984 unsigned long v; 1017 unsigned long v;
985 char *estacks = NULL;
986 struct task_struct *me; 1018 struct task_struct *me;
987 int i; 1019 int i;
988 1020
989 /* CPU 0 is initialised in head64.c */ 1021#ifdef CONFIG_NUMA
990 if (cpu != 0) 1022 if (cpu != 0 && percpu_read(node_number) == 0 &&
991 pda_init(cpu); 1023 cpu_to_node(cpu) != NUMA_NO_NODE)
992 else 1024 percpu_write(node_number, cpu_to_node(cpu));
993 estacks = boot_exception_stacks; 1025#endif
994 1026
995 me = current; 1027 me = current;
996 1028
@@ -1006,7 +1038,9 @@ void __cpuinit cpu_init(void)
1006 * and set up the GDT descriptor: 1038 * and set up the GDT descriptor:
1007 */ 1039 */
1008 1040
1009 switch_to_new_gdt(); 1041 switch_to_new_gdt(cpu);
1042 loadsegment(fs, 0);
1043
1010 load_idt((const struct desc_ptr *)&idt_descr); 1044 load_idt((const struct desc_ptr *)&idt_descr);
1011 1045
1012 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); 1046 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
@@ -1024,18 +1058,13 @@ void __cpuinit cpu_init(void)
1024 * set up and load the per-CPU TSS 1058 * set up and load the per-CPU TSS
1025 */ 1059 */
1026 if (!orig_ist->ist[0]) { 1060 if (!orig_ist->ist[0]) {
1027 static const unsigned int order[N_EXCEPTION_STACKS] = { 1061 static const unsigned int sizes[N_EXCEPTION_STACKS] = {
1028 [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER, 1062 [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ,
1029 [DEBUG_STACK - 1] = DEBUG_STACK_ORDER 1063 [DEBUG_STACK - 1] = DEBUG_STKSZ
1030 }; 1064 };
1065 char *estacks = per_cpu(exception_stacks, cpu);
1031 for (v = 0; v < N_EXCEPTION_STACKS; v++) { 1066 for (v = 0; v < N_EXCEPTION_STACKS; v++) {
1032 if (cpu) { 1067 estacks += sizes[v];
1033 estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]);
1034 if (!estacks)
1035 panic("Cannot allocate exception "
1036 "stack %ld %d\n", v, cpu);
1037 }
1038 estacks += PAGE_SIZE << order[v];
1039 orig_ist->ist[v] = t->x86_tss.ist[v] = 1068 orig_ist->ist[v] = t->x86_tss.ist[v] =
1040 (unsigned long)estacks; 1069 (unsigned long)estacks;
1041 } 1070 }
@@ -1069,22 +1098,19 @@ void __cpuinit cpu_init(void)
1069 */ 1098 */
1070 if (kgdb_connected && arch_kgdb_ops.correct_hw_break) 1099 if (kgdb_connected && arch_kgdb_ops.correct_hw_break)
1071 arch_kgdb_ops.correct_hw_break(); 1100 arch_kgdb_ops.correct_hw_break();
1072 else { 1101 else
1073#endif 1102#endif
1074 /* 1103 {
1075 * Clear all 6 debug registers: 1104 /*
1076 */ 1105 * Clear all 6 debug registers:
1077 1106 */
1078 set_debugreg(0UL, 0); 1107 set_debugreg(0UL, 0);
1079 set_debugreg(0UL, 1); 1108 set_debugreg(0UL, 1);
1080 set_debugreg(0UL, 2); 1109 set_debugreg(0UL, 2);
1081 set_debugreg(0UL, 3); 1110 set_debugreg(0UL, 3);
1082 set_debugreg(0UL, 6); 1111 set_debugreg(0UL, 6);
1083 set_debugreg(0UL, 7); 1112 set_debugreg(0UL, 7);
1084#ifdef CONFIG_KGDB
1085 /* If the kgdb is connected no debug regs should be altered. */
1086 } 1113 }
1087#endif
1088 1114
1089 fpu_init(); 1115 fpu_init();
1090 1116
@@ -1114,7 +1140,7 @@ void __cpuinit cpu_init(void)
1114 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); 1140 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
1115 1141
1116 load_idt(&idt_descr); 1142 load_idt(&idt_descr);
1117 switch_to_new_gdt(); 1143 switch_to_new_gdt(cpu);
1118 1144
1119 /* 1145 /*
1120 * Set up and load the per-CPU TSS and LDT 1146 * Set up and load the per-CPU TSS and LDT
@@ -1135,9 +1161,6 @@ void __cpuinit cpu_init(void)
1135 __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss); 1161 __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
1136#endif 1162#endif
1137 1163
1138 /* Clear %gs. */
1139 asm volatile ("mov %0, %%gs" : : "r" (0));
1140
1141 /* Clear all 6 debug registers: */ 1164 /* Clear all 6 debug registers: */
1142 set_debugreg(0, 0); 1165 set_debugreg(0, 0);
1143 set_debugreg(0, 1); 1166 set_debugreg(0, 1);
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 24ff26a38ade..1f137a87d4bd 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -24,7 +24,7 @@
24#ifdef CONFIG_X86_LOCAL_APIC 24#ifdef CONFIG_X86_LOCAL_APIC
25#include <asm/mpspec.h> 25#include <asm/mpspec.h>
26#include <asm/apic.h> 26#include <asm/apic.h>
27#include <mach_apic.h> 27#include <asm/genapic.h>
28#endif 28#endif
29 29
30static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) 30static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
@@ -63,6 +63,18 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
63 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); 63 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
64 } 64 }
65 65
66 /*
67 * There is a known erratum on Pentium III and Core Solo
68 * and Core Duo CPUs.
69 * " Page with PAT set to WC while associated MTRR is UC
70 * may consolidate to UC "
71 * Because of this erratum, it is better to stick with
72 * setting WC in MTRR rather than using PAT on these CPUs.
73 *
74 * Enable PAT WC only on P4, Core 2 or later CPUs.
75 */
76 if (c->x86 == 6 && c->x86_model < 15)
77 clear_cpu_cap(c, X86_FEATURE_PAT);
66} 78}
67 79
68#ifdef CONFIG_X86_32 80#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index da299eb85fc0..7293508d8f5c 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -147,7 +147,16 @@ struct _cpuid4_info {
147 union _cpuid4_leaf_ecx ecx; 147 union _cpuid4_leaf_ecx ecx;
148 unsigned long size; 148 unsigned long size;
149 unsigned long can_disable; 149 unsigned long can_disable;
150 cpumask_t shared_cpu_map; /* future?: only cpus/node is needed */ 150 DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
151};
152
153/* subset of above _cpuid4_info w/o shared_cpu_map */
154struct _cpuid4_info_regs {
155 union _cpuid4_leaf_eax eax;
156 union _cpuid4_leaf_ebx ebx;
157 union _cpuid4_leaf_ecx ecx;
158 unsigned long size;
159 unsigned long can_disable;
151}; 160};
152 161
153#ifdef CONFIG_PCI 162#ifdef CONFIG_PCI
@@ -278,7 +287,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
278} 287}
279 288
280static void __cpuinit 289static void __cpuinit
281amd_check_l3_disable(int index, struct _cpuid4_info *this_leaf) 290amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
282{ 291{
283 if (index < 3) 292 if (index < 3)
284 return; 293 return;
@@ -286,7 +295,8 @@ amd_check_l3_disable(int index, struct _cpuid4_info *this_leaf)
286} 295}
287 296
288static int 297static int
289__cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf) 298__cpuinit cpuid4_cache_lookup_regs(int index,
299 struct _cpuid4_info_regs *this_leaf)
290{ 300{
291 union _cpuid4_leaf_eax eax; 301 union _cpuid4_leaf_eax eax;
292 union _cpuid4_leaf_ebx ebx; 302 union _cpuid4_leaf_ebx ebx;
@@ -314,6 +324,15 @@ __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
314 return 0; 324 return 0;
315} 325}
316 326
327static int
328__cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
329{
330 struct _cpuid4_info_regs *leaf_regs =
331 (struct _cpuid4_info_regs *)this_leaf;
332
333 return cpuid4_cache_lookup_regs(index, leaf_regs);
334}
335
317static int __cpuinit find_num_cache_leaves(void) 336static int __cpuinit find_num_cache_leaves(void)
318{ 337{
319 unsigned int eax, ebx, ecx, edx; 338 unsigned int eax, ebx, ecx, edx;
@@ -353,11 +372,10 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
353 * parameters cpuid leaf to find the cache details 372 * parameters cpuid leaf to find the cache details
354 */ 373 */
355 for (i = 0; i < num_cache_leaves; i++) { 374 for (i = 0; i < num_cache_leaves; i++) {
356 struct _cpuid4_info this_leaf; 375 struct _cpuid4_info_regs this_leaf;
357
358 int retval; 376 int retval;
359 377
360 retval = cpuid4_cache_lookup(i, &this_leaf); 378 retval = cpuid4_cache_lookup_regs(i, &this_leaf);
361 if (retval >= 0) { 379 if (retval >= 0) {
362 switch(this_leaf.eax.split.level) { 380 switch(this_leaf.eax.split.level) {
363 case 1: 381 case 1:
@@ -506,17 +524,20 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
506 num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing; 524 num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
507 525
508 if (num_threads_sharing == 1) 526 if (num_threads_sharing == 1)
509 cpu_set(cpu, this_leaf->shared_cpu_map); 527 cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map));
510 else { 528 else {
511 index_msb = get_count_order(num_threads_sharing); 529 index_msb = get_count_order(num_threads_sharing);
512 530
513 for_each_online_cpu(i) { 531 for_each_online_cpu(i) {
514 if (cpu_data(i).apicid >> index_msb == 532 if (cpu_data(i).apicid >> index_msb ==
515 c->apicid >> index_msb) { 533 c->apicid >> index_msb) {
516 cpu_set(i, this_leaf->shared_cpu_map); 534 cpumask_set_cpu(i,
535 to_cpumask(this_leaf->shared_cpu_map));
517 if (i != cpu && per_cpu(cpuid4_info, i)) { 536 if (i != cpu && per_cpu(cpuid4_info, i)) {
518 sibling_leaf = CPUID4_INFO_IDX(i, index); 537 sibling_leaf =
519 cpu_set(cpu, sibling_leaf->shared_cpu_map); 538 CPUID4_INFO_IDX(i, index);
539 cpumask_set_cpu(cpu, to_cpumask(
540 sibling_leaf->shared_cpu_map));
520 } 541 }
521 } 542 }
522 } 543 }
@@ -528,9 +549,10 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
528 int sibling; 549 int sibling;
529 550
530 this_leaf = CPUID4_INFO_IDX(cpu, index); 551 this_leaf = CPUID4_INFO_IDX(cpu, index);
531 for_each_cpu_mask_nr(sibling, this_leaf->shared_cpu_map) { 552 for_each_cpu(sibling, to_cpumask(this_leaf->shared_cpu_map)) {
532 sibling_leaf = CPUID4_INFO_IDX(sibling, index); 553 sibling_leaf = CPUID4_INFO_IDX(sibling, index);
533 cpu_clear(cpu, sibling_leaf->shared_cpu_map); 554 cpumask_clear_cpu(cpu,
555 to_cpumask(sibling_leaf->shared_cpu_map));
534 } 556 }
535} 557}
536#else 558#else
@@ -635,8 +657,9 @@ static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
635 int n = 0; 657 int n = 0;
636 658
637 if (len > 1) { 659 if (len > 1) {
638 cpumask_t *mask = &this_leaf->shared_cpu_map; 660 const struct cpumask *mask;
639 661
662 mask = to_cpumask(this_leaf->shared_cpu_map);
640 n = type? 663 n = type?
641 cpulist_scnprintf(buf, len-2, mask) : 664 cpulist_scnprintf(buf, len-2, mask) :
642 cpumask_scnprintf(buf, len-2, mask); 665 cpumask_scnprintf(buf, len-2, mask);
@@ -699,7 +722,8 @@ static struct pci_dev *get_k8_northbridge(int node)
699 722
700static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf) 723static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf)
701{ 724{
702 int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map)); 725 const struct cpumask *mask = to_cpumask(this_leaf->shared_cpu_map);
726 int node = cpu_to_node(cpumask_first(mask));
703 struct pci_dev *dev = NULL; 727 struct pci_dev *dev = NULL;
704 ssize_t ret = 0; 728 ssize_t ret = 0;
705 int i; 729 int i;
@@ -733,7 +757,8 @@ static ssize_t
733store_cache_disable(struct _cpuid4_info *this_leaf, const char *buf, 757store_cache_disable(struct _cpuid4_info *this_leaf, const char *buf,
734 size_t count) 758 size_t count)
735{ 759{
736 int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map)); 760 const struct cpumask *mask = to_cpumask(this_leaf->shared_cpu_map);
761 int node = cpu_to_node(cpumask_first(mask));
737 struct pci_dev *dev = NULL; 762 struct pci_dev *dev = NULL;
738 unsigned int ret, index, val; 763 unsigned int ret, index, val;
739 764
@@ -878,7 +903,7 @@ err_out:
878 return -ENOMEM; 903 return -ENOMEM;
879} 904}
880 905
881static cpumask_t cache_dev_map = CPU_MASK_NONE; 906static DECLARE_BITMAP(cache_dev_map, NR_CPUS);
882 907
883/* Add/Remove cache interface for CPU device */ 908/* Add/Remove cache interface for CPU device */
884static int __cpuinit cache_add_dev(struct sys_device * sys_dev) 909static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
@@ -918,7 +943,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
918 } 943 }
919 kobject_uevent(&(this_object->kobj), KOBJ_ADD); 944 kobject_uevent(&(this_object->kobj), KOBJ_ADD);
920 } 945 }
921 cpu_set(cpu, cache_dev_map); 946 cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
922 947
923 kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD); 948 kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD);
924 return 0; 949 return 0;
@@ -931,9 +956,9 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
931 956
932 if (per_cpu(cpuid4_info, cpu) == NULL) 957 if (per_cpu(cpuid4_info, cpu) == NULL)
933 return; 958 return;
934 if (!cpu_isset(cpu, cache_dev_map)) 959 if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
935 return; 960 return;
936 cpu_clear(cpu, cache_dev_map); 961 cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map));
937 962
938 for (i = 0; i < num_cache_leaves; i++) 963 for (i = 0; i < num_cache_leaves; i++)
939 kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj)); 964 kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
index 8ae8c4ff094d..4772e91e8246 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
@@ -67,7 +67,7 @@ static struct threshold_block threshold_defaults = {
67struct threshold_bank { 67struct threshold_bank {
68 struct kobject *kobj; 68 struct kobject *kobj;
69 struct threshold_block *blocks; 69 struct threshold_block *blocks;
70 cpumask_t cpus; 70 cpumask_var_t cpus;
71}; 71};
72static DEFINE_PER_CPU(struct threshold_bank *, threshold_banks[NR_BANKS]); 72static DEFINE_PER_CPU(struct threshold_bank *, threshold_banks[NR_BANKS]);
73 73
@@ -481,7 +481,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
481 481
482#ifdef CONFIG_SMP 482#ifdef CONFIG_SMP
483 if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */ 483 if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */
484 i = first_cpu(per_cpu(cpu_core_map, cpu)); 484 i = cpumask_first(&per_cpu(cpu_core_map, cpu));
485 485
486 /* first core not up yet */ 486 /* first core not up yet */
487 if (cpu_data(i).cpu_core_id) 487 if (cpu_data(i).cpu_core_id)
@@ -501,7 +501,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
501 if (err) 501 if (err)
502 goto out; 502 goto out;
503 503
504 b->cpus = per_cpu(cpu_core_map, cpu); 504 cpumask_copy(b->cpus, &per_cpu(cpu_core_map, cpu));
505 per_cpu(threshold_banks, cpu)[bank] = b; 505 per_cpu(threshold_banks, cpu)[bank] = b;
506 goto out; 506 goto out;
507 } 507 }
@@ -512,15 +512,20 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
512 err = -ENOMEM; 512 err = -ENOMEM;
513 goto out; 513 goto out;
514 } 514 }
515 if (!alloc_cpumask_var(&b->cpus, GFP_KERNEL)) {
516 kfree(b);
517 err = -ENOMEM;
518 goto out;
519 }
515 520
516 b->kobj = kobject_create_and_add(name, &per_cpu(device_mce, cpu).kobj); 521 b->kobj = kobject_create_and_add(name, &per_cpu(device_mce, cpu).kobj);
517 if (!b->kobj) 522 if (!b->kobj)
518 goto out_free; 523 goto out_free;
519 524
520#ifndef CONFIG_SMP 525#ifndef CONFIG_SMP
521 b->cpus = CPU_MASK_ALL; 526 cpumask_setall(b->cpus);
522#else 527#else
523 b->cpus = per_cpu(cpu_core_map, cpu); 528 cpumask_copy(b->cpus, &per_cpu(cpu_core_map, cpu));
524#endif 529#endif
525 530
526 per_cpu(threshold_banks, cpu)[bank] = b; 531 per_cpu(threshold_banks, cpu)[bank] = b;
@@ -529,7 +534,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
529 if (err) 534 if (err)
530 goto out_free; 535 goto out_free;
531 536
532 for_each_cpu_mask_nr(i, b->cpus) { 537 for_each_cpu(i, b->cpus) {
533 if (i == cpu) 538 if (i == cpu)
534 continue; 539 continue;
535 540
@@ -545,6 +550,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
545 550
546out_free: 551out_free:
547 per_cpu(threshold_banks, cpu)[bank] = NULL; 552 per_cpu(threshold_banks, cpu)[bank] = NULL;
553 free_cpumask_var(b->cpus);
548 kfree(b); 554 kfree(b);
549out: 555out:
550 return err; 556 return err;
@@ -619,7 +625,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank)
619#endif 625#endif
620 626
621 /* remove all sibling symlinks before unregistering */ 627 /* remove all sibling symlinks before unregistering */
622 for_each_cpu_mask_nr(i, b->cpus) { 628 for_each_cpu(i, b->cpus) {
623 if (i == cpu) 629 if (i == cpu)
624 continue; 630 continue;
625 631
@@ -632,6 +638,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank)
632free_out: 638free_out:
633 kobject_del(b->kobj); 639 kobject_del(b->kobj);
634 kobject_put(b->kobj); 640 kobject_put(b->kobj);
641 free_cpumask_var(b->cpus);
635 kfree(b); 642 kfree(b);
636 per_cpu(threshold_banks, cpu)[bank] = NULL; 643 per_cpu(threshold_banks, cpu)[bank] = NULL;
637} 644}
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c
index 4b48f251fd39..5e8c79e748a6 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c
@@ -7,6 +7,7 @@
7#include <linux/interrupt.h> 7#include <linux/interrupt.h>
8#include <linux/percpu.h> 8#include <linux/percpu.h>
9#include <asm/processor.h> 9#include <asm/processor.h>
10#include <asm/apic.h>
10#include <asm/msr.h> 11#include <asm/msr.h>
11#include <asm/mce.h> 12#include <asm/mce.h>
12#include <asm/hw_irq.h> 13#include <asm/hw_irq.h>
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index c689d19e35ab..ad7f2a696f4a 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -24,11 +24,11 @@
24#include <asm/apic.h> 24#include <asm/apic.h>
25#include <asm/hpet.h> 25#include <asm/hpet.h>
26#include <linux/kdebug.h> 26#include <linux/kdebug.h>
27#include <asm/smp.h> 27#include <asm/cpu.h>
28#include <asm/reboot.h> 28#include <asm/reboot.h>
29#include <asm/virtext.h> 29#include <asm/virtext.h>
30 30
31#include <mach_ipi.h> 31#include <asm/genapic.h>
32 32
33 33
34#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) 34#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 6b1f6f6f8661..87d103ded1c3 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -99,7 +99,7 @@ print_context_stack(struct thread_info *tinfo,
99 frame = frame->next_frame; 99 frame = frame->next_frame;
100 bp = (unsigned long) frame; 100 bp = (unsigned long) frame;
101 } else { 101 } else {
102 ops->address(data, addr, bp == 0); 102 ops->address(data, addr, 0);
103 } 103 }
104 print_ftrace_graph_addr(addr, data, ops, tinfo, graph); 104 print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
105 } 105 }
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index c302d0707048..d35db5993fd6 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -106,7 +106,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
106 const struct stacktrace_ops *ops, void *data) 106 const struct stacktrace_ops *ops, void *data)
107{ 107{
108 const unsigned cpu = get_cpu(); 108 const unsigned cpu = get_cpu();
109 unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr; 109 unsigned long *irq_stack_end =
110 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
110 unsigned used = 0; 111 unsigned used = 0;
111 struct thread_info *tinfo; 112 struct thread_info *tinfo;
112 int graph = 0; 113 int graph = 0;
@@ -160,23 +161,23 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
160 stack = (unsigned long *) estack_end[-2]; 161 stack = (unsigned long *) estack_end[-2];
161 continue; 162 continue;
162 } 163 }
163 if (irqstack_end) { 164 if (irq_stack_end) {
164 unsigned long *irqstack; 165 unsigned long *irq_stack;
165 irqstack = irqstack_end - 166 irq_stack = irq_stack_end -
166 (IRQSTACKSIZE - 64) / sizeof(*irqstack); 167 (IRQ_STACK_SIZE - 64) / sizeof(*irq_stack);
167 168
168 if (stack >= irqstack && stack < irqstack_end) { 169 if (stack >= irq_stack && stack < irq_stack_end) {
169 if (ops->stack(data, "IRQ") < 0) 170 if (ops->stack(data, "IRQ") < 0)
170 break; 171 break;
171 bp = print_context_stack(tinfo, stack, bp, 172 bp = print_context_stack(tinfo, stack, bp,
172 ops, data, irqstack_end, &graph); 173 ops, data, irq_stack_end, &graph);
173 /* 174 /*
174 * We link to the next stack (which would be 175 * We link to the next stack (which would be
175 * the process stack normally) the last 176 * the process stack normally) the last
176 * pointer (index -1 to end) in the IRQ stack: 177 * pointer (index -1 to end) in the IRQ stack:
177 */ 178 */
178 stack = (unsigned long *) (irqstack_end[-1]); 179 stack = (unsigned long *) (irq_stack_end[-1]);
179 irqstack_end = NULL; 180 irq_stack_end = NULL;
180 ops->stack(data, "EOI"); 181 ops->stack(data, "EOI");
181 continue; 182 continue;
182 } 183 }
@@ -199,10 +200,10 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
199 unsigned long *stack; 200 unsigned long *stack;
200 int i; 201 int i;
201 const int cpu = smp_processor_id(); 202 const int cpu = smp_processor_id();
202 unsigned long *irqstack_end = 203 unsigned long *irq_stack_end =
203 (unsigned long *) (cpu_pda(cpu)->irqstackptr); 204 (unsigned long *)(per_cpu(irq_stack_ptr, cpu));
204 unsigned long *irqstack = 205 unsigned long *irq_stack =
205 (unsigned long *) (cpu_pda(cpu)->irqstackptr - IRQSTACKSIZE); 206 (unsigned long *)(per_cpu(irq_stack_ptr, cpu) - IRQ_STACK_SIZE);
206 207
207 /* 208 /*
208 * debugging aid: "show_stack(NULL, NULL);" prints the 209 * debugging aid: "show_stack(NULL, NULL);" prints the
@@ -218,9 +219,9 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
218 219
219 stack = sp; 220 stack = sp;
220 for (i = 0; i < kstack_depth_to_print; i++) { 221 for (i = 0; i < kstack_depth_to_print; i++) {
221 if (stack >= irqstack && stack <= irqstack_end) { 222 if (stack >= irq_stack && stack <= irq_stack_end) {
222 if (stack == irqstack_end) { 223 if (stack == irq_stack_end) {
223 stack = (unsigned long *) (irqstack_end[-1]); 224 stack = (unsigned long *) (irq_stack_end[-1]);
224 printk(" <EOI> "); 225 printk(" <EOI> ");
225 } 226 }
226 } else { 227 } else {
@@ -241,7 +242,7 @@ void show_registers(struct pt_regs *regs)
241 int i; 242 int i;
242 unsigned long sp; 243 unsigned long sp;
243 const int cpu = smp_processor_id(); 244 const int cpu = smp_processor_id();
244 struct task_struct *cur = cpu_pda(cpu)->pcurrent; 245 struct task_struct *cur = current;
245 246
246 sp = regs->sp; 247 sp = regs->sp;
247 printk("CPU %d ", cpu); 248 printk("CPU %d ", cpu);
diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
index 504ad198e4ad..639ad98238a2 100644
--- a/arch/x86/kernel/early_printk.c
+++ b/arch/x86/kernel/early_printk.c
@@ -13,8 +13,8 @@
13#include <asm/setup.h> 13#include <asm/setup.h>
14#include <xen/hvc-console.h> 14#include <xen/hvc-console.h>
15#include <asm/pci-direct.h> 15#include <asm/pci-direct.h>
16#include <asm/pgtable.h>
17#include <asm/fixmap.h> 16#include <asm/fixmap.h>
17#include <asm/pgtable.h>
18#include <linux/usb/ehci_def.h> 18#include <linux/usb/ehci_def.h>
19 19
20/* Simple VGA output */ 20/* Simple VGA output */
diff --git a/arch/x86/kernel/efi.c b/arch/x86/kernel/efi.c
index 1119d247fe11..b205272ad394 100644
--- a/arch/x86/kernel/efi.c
+++ b/arch/x86/kernel/efi.c
@@ -366,10 +366,12 @@ void __init efi_init(void)
366 SMBIOS_TABLE_GUID)) { 366 SMBIOS_TABLE_GUID)) {
367 efi.smbios = config_tables[i].table; 367 efi.smbios = config_tables[i].table;
368 printk(" SMBIOS=0x%lx ", config_tables[i].table); 368 printk(" SMBIOS=0x%lx ", config_tables[i].table);
369#ifdef CONFIG_X86_UV
369 } else if (!efi_guidcmp(config_tables[i].guid, 370 } else if (!efi_guidcmp(config_tables[i].guid,
370 UV_SYSTEM_TABLE_GUID)) { 371 UV_SYSTEM_TABLE_GUID)) {
371 efi.uv_systab = config_tables[i].table; 372 efi.uv_systab = config_tables[i].table;
372 printk(" UVsystab=0x%lx ", config_tables[i].table); 373 printk(" UVsystab=0x%lx ", config_tables[i].table);
374#endif
373 } else if (!efi_guidcmp(config_tables[i].guid, 375 } else if (!efi_guidcmp(config_tables[i].guid,
374 HCDP_TABLE_GUID)) { 376 HCDP_TABLE_GUID)) {
375 efi.hcdp = config_tables[i].table; 377 efi.hcdp = config_tables[i].table;
diff --git a/arch/x86/kernel/efi_64.c b/arch/x86/kernel/efi_64.c
index 652c5287215f..a4ee29127fdf 100644
--- a/arch/x86/kernel/efi_64.c
+++ b/arch/x86/kernel/efi_64.c
@@ -36,6 +36,7 @@
36#include <asm/proto.h> 36#include <asm/proto.h>
37#include <asm/efi.h> 37#include <asm/efi.h>
38#include <asm/cacheflush.h> 38#include <asm/cacheflush.h>
39#include <asm/fixmap.h>
39 40
40static pgd_t save_pgd __initdata; 41static pgd_t save_pgd __initdata;
41static unsigned long efi_flags __initdata; 42static unsigned long efi_flags __initdata;
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 46469029e9d3..e99206831459 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -30,12 +30,13 @@
30 * 1C(%esp) - %ds 30 * 1C(%esp) - %ds
31 * 20(%esp) - %es 31 * 20(%esp) - %es
32 * 24(%esp) - %fs 32 * 24(%esp) - %fs
33 * 28(%esp) - orig_eax 33 * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS
34 * 2C(%esp) - %eip 34 * 2C(%esp) - orig_eax
35 * 30(%esp) - %cs 35 * 30(%esp) - %eip
36 * 34(%esp) - %eflags 36 * 34(%esp) - %cs
37 * 38(%esp) - %oldesp 37 * 38(%esp) - %eflags
38 * 3C(%esp) - %oldss 38 * 3C(%esp) - %oldesp
39 * 40(%esp) - %oldss
39 * 40 *
40 * "current" is in register %ebx during any slow entries. 41 * "current" is in register %ebx during any slow entries.
41 */ 42 */
@@ -101,121 +102,221 @@
101#define resume_userspace_sig resume_userspace 102#define resume_userspace_sig resume_userspace
102#endif 103#endif
103 104
104#define SAVE_ALL \ 105/*
105 cld; \ 106 * User gs save/restore
106 pushl %fs; \ 107 *
107 CFI_ADJUST_CFA_OFFSET 4;\ 108 * %gs is used for userland TLS and kernel only uses it for stack
108 /*CFI_REL_OFFSET fs, 0;*/\ 109 * canary which is required to be at %gs:20 by gcc. Read the comment
109 pushl %es; \ 110 * at the top of stackprotector.h for more info.
110 CFI_ADJUST_CFA_OFFSET 4;\ 111 *
111 /*CFI_REL_OFFSET es, 0;*/\ 112 * Local labels 98 and 99 are used.
112 pushl %ds; \ 113 */
113 CFI_ADJUST_CFA_OFFSET 4;\ 114#ifdef CONFIG_X86_32_LAZY_GS
114 /*CFI_REL_OFFSET ds, 0;*/\ 115
115 pushl %eax; \ 116 /* unfortunately push/pop can't be no-op */
116 CFI_ADJUST_CFA_OFFSET 4;\ 117.macro PUSH_GS
117 CFI_REL_OFFSET eax, 0;\ 118 pushl $0
118 pushl %ebp; \ 119 CFI_ADJUST_CFA_OFFSET 4
119 CFI_ADJUST_CFA_OFFSET 4;\ 120.endm
120 CFI_REL_OFFSET ebp, 0;\ 121.macro POP_GS pop=0
121 pushl %edi; \ 122 addl $(4 + \pop), %esp
122 CFI_ADJUST_CFA_OFFSET 4;\ 123 CFI_ADJUST_CFA_OFFSET -(4 + \pop)
123 CFI_REL_OFFSET edi, 0;\ 124.endm
124 pushl %esi; \ 125.macro POP_GS_EX
125 CFI_ADJUST_CFA_OFFSET 4;\ 126.endm
126 CFI_REL_OFFSET esi, 0;\ 127
127 pushl %edx; \ 128 /* all the rest are no-op */
128 CFI_ADJUST_CFA_OFFSET 4;\ 129.macro PTGS_TO_GS
129 CFI_REL_OFFSET edx, 0;\ 130.endm
130 pushl %ecx; \ 131.macro PTGS_TO_GS_EX
131 CFI_ADJUST_CFA_OFFSET 4;\ 132.endm
132 CFI_REL_OFFSET ecx, 0;\ 133.macro GS_TO_REG reg
133 pushl %ebx; \ 134.endm
134 CFI_ADJUST_CFA_OFFSET 4;\ 135.macro REG_TO_PTGS reg
135 CFI_REL_OFFSET ebx, 0;\ 136.endm
136 movl $(__USER_DS), %edx; \ 137.macro SET_KERNEL_GS reg
137 movl %edx, %ds; \ 138.endm
138 movl %edx, %es; \ 139
139 movl $(__KERNEL_PERCPU), %edx; \ 140#else /* CONFIG_X86_32_LAZY_GS */
141
142.macro PUSH_GS
143 pushl %gs
144 CFI_ADJUST_CFA_OFFSET 4
145 /*CFI_REL_OFFSET gs, 0*/
146.endm
147
148.macro POP_GS pop=0
14998: popl %gs
150 CFI_ADJUST_CFA_OFFSET -4
151 /*CFI_RESTORE gs*/
152 .if \pop <> 0
153 add $\pop, %esp
154 CFI_ADJUST_CFA_OFFSET -\pop
155 .endif
156.endm
157.macro POP_GS_EX
158.pushsection .fixup, "ax"
15999: movl $0, (%esp)
160 jmp 98b
161.section __ex_table, "a"
162 .align 4
163 .long 98b, 99b
164.popsection
165.endm
166
167.macro PTGS_TO_GS
16898: mov PT_GS(%esp), %gs
169.endm
170.macro PTGS_TO_GS_EX
171.pushsection .fixup, "ax"
17299: movl $0, PT_GS(%esp)
173 jmp 98b
174.section __ex_table, "a"
175 .align 4
176 .long 98b, 99b
177.popsection
178.endm
179
180.macro GS_TO_REG reg
181 movl %gs, \reg
182 /*CFI_REGISTER gs, \reg*/
183.endm
184.macro REG_TO_PTGS reg
185 movl \reg, PT_GS(%esp)
186 /*CFI_REL_OFFSET gs, PT_GS*/
187.endm
188.macro SET_KERNEL_GS reg
189 movl $(__KERNEL_STACK_CANARY), \reg
190 movl \reg, %gs
191.endm
192
193#endif /* CONFIG_X86_32_LAZY_GS */
194
195.macro SAVE_ALL
196 cld
197 PUSH_GS
198 pushl %fs
199 CFI_ADJUST_CFA_OFFSET 4
200 /*CFI_REL_OFFSET fs, 0;*/
201 pushl %es
202 CFI_ADJUST_CFA_OFFSET 4
203 /*CFI_REL_OFFSET es, 0;*/
204 pushl %ds
205 CFI_ADJUST_CFA_OFFSET 4
206 /*CFI_REL_OFFSET ds, 0;*/
207 pushl %eax
208 CFI_ADJUST_CFA_OFFSET 4
209 CFI_REL_OFFSET eax, 0
210 pushl %ebp
211 CFI_ADJUST_CFA_OFFSET 4
212 CFI_REL_OFFSET ebp, 0
213 pushl %edi
214 CFI_ADJUST_CFA_OFFSET 4
215 CFI_REL_OFFSET edi, 0
216 pushl %esi
217 CFI_ADJUST_CFA_OFFSET 4
218 CFI_REL_OFFSET esi, 0
219 pushl %edx
220 CFI_ADJUST_CFA_OFFSET 4
221 CFI_REL_OFFSET edx, 0
222 pushl %ecx
223 CFI_ADJUST_CFA_OFFSET 4
224 CFI_REL_OFFSET ecx, 0
225 pushl %ebx
226 CFI_ADJUST_CFA_OFFSET 4
227 CFI_REL_OFFSET ebx, 0
228 movl $(__USER_DS), %edx
229 movl %edx, %ds
230 movl %edx, %es
231 movl $(__KERNEL_PERCPU), %edx
140 movl %edx, %fs 232 movl %edx, %fs
233 SET_KERNEL_GS %edx
234.endm
141 235
142#define RESTORE_INT_REGS \ 236.macro RESTORE_INT_REGS
143 popl %ebx; \ 237 popl %ebx
144 CFI_ADJUST_CFA_OFFSET -4;\ 238 CFI_ADJUST_CFA_OFFSET -4
145 CFI_RESTORE ebx;\ 239 CFI_RESTORE ebx
146 popl %ecx; \ 240 popl %ecx
147 CFI_ADJUST_CFA_OFFSET -4;\ 241 CFI_ADJUST_CFA_OFFSET -4
148 CFI_RESTORE ecx;\ 242 CFI_RESTORE ecx
149 popl %edx; \ 243 popl %edx
150 CFI_ADJUST_CFA_OFFSET -4;\ 244 CFI_ADJUST_CFA_OFFSET -4
151 CFI_RESTORE edx;\ 245 CFI_RESTORE edx
152 popl %esi; \ 246 popl %esi
153 CFI_ADJUST_CFA_OFFSET -4;\ 247 CFI_ADJUST_CFA_OFFSET -4
154 CFI_RESTORE esi;\ 248 CFI_RESTORE esi
155 popl %edi; \ 249 popl %edi
156 CFI_ADJUST_CFA_OFFSET -4;\ 250 CFI_ADJUST_CFA_OFFSET -4
157 CFI_RESTORE edi;\ 251 CFI_RESTORE edi
158 popl %ebp; \ 252 popl %ebp
159 CFI_ADJUST_CFA_OFFSET -4;\ 253 CFI_ADJUST_CFA_OFFSET -4
160 CFI_RESTORE ebp;\ 254 CFI_RESTORE ebp
161 popl %eax; \ 255 popl %eax
162 CFI_ADJUST_CFA_OFFSET -4;\ 256 CFI_ADJUST_CFA_OFFSET -4
163 CFI_RESTORE eax 257 CFI_RESTORE eax
258.endm
164 259
165#define RESTORE_REGS \ 260.macro RESTORE_REGS pop=0
166 RESTORE_INT_REGS; \ 261 RESTORE_INT_REGS
1671: popl %ds; \ 2621: popl %ds
168 CFI_ADJUST_CFA_OFFSET -4;\ 263 CFI_ADJUST_CFA_OFFSET -4
169 /*CFI_RESTORE ds;*/\ 264 /*CFI_RESTORE ds;*/
1702: popl %es; \ 2652: popl %es
171 CFI_ADJUST_CFA_OFFSET -4;\ 266 CFI_ADJUST_CFA_OFFSET -4
172 /*CFI_RESTORE es;*/\ 267 /*CFI_RESTORE es;*/
1733: popl %fs; \ 2683: popl %fs
174 CFI_ADJUST_CFA_OFFSET -4;\ 269 CFI_ADJUST_CFA_OFFSET -4
175 /*CFI_RESTORE fs;*/\ 270 /*CFI_RESTORE fs;*/
176.pushsection .fixup,"ax"; \ 271 POP_GS \pop
1774: movl $0,(%esp); \ 272.pushsection .fixup, "ax"
178 jmp 1b; \ 2734: movl $0, (%esp)
1795: movl $0,(%esp); \ 274 jmp 1b
180 jmp 2b; \ 2755: movl $0, (%esp)
1816: movl $0,(%esp); \ 276 jmp 2b
182 jmp 3b; \ 2776: movl $0, (%esp)
183.section __ex_table,"a";\ 278 jmp 3b
184 .align 4; \ 279.section __ex_table, "a"
185 .long 1b,4b; \ 280 .align 4
186 .long 2b,5b; \ 281 .long 1b, 4b
187 .long 3b,6b; \ 282 .long 2b, 5b
283 .long 3b, 6b
188.popsection 284.popsection
285 POP_GS_EX
286.endm
189 287
190#define RING0_INT_FRAME \ 288.macro RING0_INT_FRAME
191 CFI_STARTPROC simple;\ 289 CFI_STARTPROC simple
192 CFI_SIGNAL_FRAME;\ 290 CFI_SIGNAL_FRAME
193 CFI_DEF_CFA esp, 3*4;\ 291 CFI_DEF_CFA esp, 3*4
194 /*CFI_OFFSET cs, -2*4;*/\ 292 /*CFI_OFFSET cs, -2*4;*/
195 CFI_OFFSET eip, -3*4 293 CFI_OFFSET eip, -3*4
294.endm
196 295
197#define RING0_EC_FRAME \ 296.macro RING0_EC_FRAME
198 CFI_STARTPROC simple;\ 297 CFI_STARTPROC simple
199 CFI_SIGNAL_FRAME;\ 298 CFI_SIGNAL_FRAME
200 CFI_DEF_CFA esp, 4*4;\ 299 CFI_DEF_CFA esp, 4*4
201 /*CFI_OFFSET cs, -2*4;*/\ 300 /*CFI_OFFSET cs, -2*4;*/
202 CFI_OFFSET eip, -3*4 301 CFI_OFFSET eip, -3*4
302.endm
203 303
204#define RING0_PTREGS_FRAME \ 304.macro RING0_PTREGS_FRAME
205 CFI_STARTPROC simple;\ 305 CFI_STARTPROC simple
206 CFI_SIGNAL_FRAME;\ 306 CFI_SIGNAL_FRAME
207 CFI_DEF_CFA esp, PT_OLDESP-PT_EBX;\ 307 CFI_DEF_CFA esp, PT_OLDESP-PT_EBX
208 /*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/\ 308 /*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/
209 CFI_OFFSET eip, PT_EIP-PT_OLDESP;\ 309 CFI_OFFSET eip, PT_EIP-PT_OLDESP
210 /*CFI_OFFSET es, PT_ES-PT_OLDESP;*/\ 310 /*CFI_OFFSET es, PT_ES-PT_OLDESP;*/
211 /*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/\ 311 /*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/
212 CFI_OFFSET eax, PT_EAX-PT_OLDESP;\ 312 CFI_OFFSET eax, PT_EAX-PT_OLDESP
213 CFI_OFFSET ebp, PT_EBP-PT_OLDESP;\ 313 CFI_OFFSET ebp, PT_EBP-PT_OLDESP
214 CFI_OFFSET edi, PT_EDI-PT_OLDESP;\ 314 CFI_OFFSET edi, PT_EDI-PT_OLDESP
215 CFI_OFFSET esi, PT_ESI-PT_OLDESP;\ 315 CFI_OFFSET esi, PT_ESI-PT_OLDESP
216 CFI_OFFSET edx, PT_EDX-PT_OLDESP;\ 316 CFI_OFFSET edx, PT_EDX-PT_OLDESP
217 CFI_OFFSET ecx, PT_ECX-PT_OLDESP;\ 317 CFI_OFFSET ecx, PT_ECX-PT_OLDESP
218 CFI_OFFSET ebx, PT_EBX-PT_OLDESP 318 CFI_OFFSET ebx, PT_EBX-PT_OLDESP
319.endm
219 320
220ENTRY(ret_from_fork) 321ENTRY(ret_from_fork)
221 CFI_STARTPROC 322 CFI_STARTPROC
@@ -362,6 +463,7 @@ sysenter_exit:
362 xorl %ebp,%ebp 463 xorl %ebp,%ebp
363 TRACE_IRQS_ON 464 TRACE_IRQS_ON
3641: mov PT_FS(%esp), %fs 4651: mov PT_FS(%esp), %fs
466 PTGS_TO_GS
365 ENABLE_INTERRUPTS_SYSEXIT 467 ENABLE_INTERRUPTS_SYSEXIT
366 468
367#ifdef CONFIG_AUDITSYSCALL 469#ifdef CONFIG_AUDITSYSCALL
@@ -410,6 +512,7 @@ sysexit_audit:
410 .align 4 512 .align 4
411 .long 1b,2b 513 .long 1b,2b
412.popsection 514.popsection
515 PTGS_TO_GS_EX
413ENDPROC(ia32_sysenter_target) 516ENDPROC(ia32_sysenter_target)
414 517
415 # system call handler stub 518 # system call handler stub
@@ -452,8 +555,7 @@ restore_all:
452restore_nocheck: 555restore_nocheck:
453 TRACE_IRQS_IRET 556 TRACE_IRQS_IRET
454restore_nocheck_notrace: 557restore_nocheck_notrace:
455 RESTORE_REGS 558 RESTORE_REGS 4 # skip orig_eax/error_code
456 addl $4, %esp # skip orig_eax/error_code
457 CFI_ADJUST_CFA_OFFSET -4 559 CFI_ADJUST_CFA_OFFSET -4
458irq_return: 560irq_return:
459 INTERRUPT_RETURN 561 INTERRUPT_RETURN
@@ -595,28 +697,50 @@ syscall_badsys:
595END(syscall_badsys) 697END(syscall_badsys)
596 CFI_ENDPROC 698 CFI_ENDPROC
597 699
598#define FIXUP_ESPFIX_STACK \ 700/*
599 /* since we are on a wrong stack, we cant make it a C code :( */ \ 701 * System calls that need a pt_regs pointer.
600 PER_CPU(gdt_page, %ebx); \ 702 */
601 GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah); \ 703#define PTREGSCALL(name) \
602 addl %esp, %eax; \ 704 ALIGN; \
603 pushl $__KERNEL_DS; \ 705ptregs_##name: \
604 CFI_ADJUST_CFA_OFFSET 4; \ 706 leal 4(%esp),%eax; \
605 pushl %eax; \ 707 jmp sys_##name;
606 CFI_ADJUST_CFA_OFFSET 4; \ 708
607 lss (%esp), %esp; \ 709PTREGSCALL(iopl)
608 CFI_ADJUST_CFA_OFFSET -8; 710PTREGSCALL(fork)
609#define UNWIND_ESPFIX_STACK \ 711PTREGSCALL(clone)
610 movl %ss, %eax; \ 712PTREGSCALL(vfork)
611 /* see if on espfix stack */ \ 713PTREGSCALL(execve)
612 cmpw $__ESPFIX_SS, %ax; \ 714PTREGSCALL(sigaltstack)
613 jne 27f; \ 715PTREGSCALL(sigreturn)
614 movl $__KERNEL_DS, %eax; \ 716PTREGSCALL(rt_sigreturn)
615 movl %eax, %ds; \ 717PTREGSCALL(vm86)
616 movl %eax, %es; \ 718PTREGSCALL(vm86old)
617 /* switch to normal stack */ \ 719
618 FIXUP_ESPFIX_STACK; \ 720.macro FIXUP_ESPFIX_STACK
61927:; 721 /* since we are on a wrong stack, we cant make it a C code :( */
722 PER_CPU(gdt_page, %ebx)
723 GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah)
724 addl %esp, %eax
725 pushl $__KERNEL_DS
726 CFI_ADJUST_CFA_OFFSET 4
727 pushl %eax
728 CFI_ADJUST_CFA_OFFSET 4
729 lss (%esp), %esp
730 CFI_ADJUST_CFA_OFFSET -8
731.endm
732.macro UNWIND_ESPFIX_STACK
733 movl %ss, %eax
734 /* see if on espfix stack */
735 cmpw $__ESPFIX_SS, %ax
736 jne 27f
737 movl $__KERNEL_DS, %eax
738 movl %eax, %ds
739 movl %eax, %es
740 /* switch to normal stack */
741 FIXUP_ESPFIX_STACK
74227:
743.endm
620 744
621/* 745/*
622 * Build the entry stubs and pointer table with some assembler magic. 746 * Build the entry stubs and pointer table with some assembler magic.
@@ -672,7 +796,7 @@ common_interrupt:
672ENDPROC(common_interrupt) 796ENDPROC(common_interrupt)
673 CFI_ENDPROC 797 CFI_ENDPROC
674 798
675#define BUILD_INTERRUPT(name, nr) \ 799#define BUILD_INTERRUPT3(name, nr, fn) \
676ENTRY(name) \ 800ENTRY(name) \
677 RING0_INT_FRAME; \ 801 RING0_INT_FRAME; \
678 pushl $~(nr); \ 802 pushl $~(nr); \
@@ -680,13 +804,15 @@ ENTRY(name) \
680 SAVE_ALL; \ 804 SAVE_ALL; \
681 TRACE_IRQS_OFF \ 805 TRACE_IRQS_OFF \
682 movl %esp,%eax; \ 806 movl %esp,%eax; \
683 call smp_##name; \ 807 call fn; \
684 jmp ret_from_intr; \ 808 jmp ret_from_intr; \
685 CFI_ENDPROC; \ 809 CFI_ENDPROC; \
686ENDPROC(name) 810ENDPROC(name)
687 811
812#define BUILD_INTERRUPT(name, nr) BUILD_INTERRUPT3(name, nr, smp_##name)
813
688/* The include is where all of the SMP etc. interrupts come from */ 814/* The include is where all of the SMP etc. interrupts come from */
689#include "entry_arch.h" 815#include <asm/entry_arch.h>
690 816
691ENTRY(coprocessor_error) 817ENTRY(coprocessor_error)
692 RING0_INT_FRAME 818 RING0_INT_FRAME
@@ -1068,7 +1194,10 @@ ENTRY(page_fault)
1068 CFI_ADJUST_CFA_OFFSET 4 1194 CFI_ADJUST_CFA_OFFSET 4
1069 ALIGN 1195 ALIGN
1070error_code: 1196error_code:
1071 /* the function address is in %fs's slot on the stack */ 1197 /* the function address is in %gs's slot on the stack */
1198 pushl %fs
1199 CFI_ADJUST_CFA_OFFSET 4
1200 /*CFI_REL_OFFSET fs, 0*/
1072 pushl %es 1201 pushl %es
1073 CFI_ADJUST_CFA_OFFSET 4 1202 CFI_ADJUST_CFA_OFFSET 4
1074 /*CFI_REL_OFFSET es, 0*/ 1203 /*CFI_REL_OFFSET es, 0*/
@@ -1097,20 +1226,15 @@ error_code:
1097 CFI_ADJUST_CFA_OFFSET 4 1226 CFI_ADJUST_CFA_OFFSET 4
1098 CFI_REL_OFFSET ebx, 0 1227 CFI_REL_OFFSET ebx, 0
1099 cld 1228 cld
1100 pushl %fs
1101 CFI_ADJUST_CFA_OFFSET 4
1102 /*CFI_REL_OFFSET fs, 0*/
1103 movl $(__KERNEL_PERCPU), %ecx 1229 movl $(__KERNEL_PERCPU), %ecx
1104 movl %ecx, %fs 1230 movl %ecx, %fs
1105 UNWIND_ESPFIX_STACK 1231 UNWIND_ESPFIX_STACK
1106 popl %ecx 1232 GS_TO_REG %ecx
1107 CFI_ADJUST_CFA_OFFSET -4 1233 movl PT_GS(%esp), %edi # get the function address
1108 /*CFI_REGISTER es, ecx*/
1109 movl PT_FS(%esp), %edi # get the function address
1110 movl PT_ORIG_EAX(%esp), %edx # get the error code 1234 movl PT_ORIG_EAX(%esp), %edx # get the error code
1111 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart 1235 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
1112 mov %ecx, PT_FS(%esp) 1236 REG_TO_PTGS %ecx
1113 /*CFI_REL_OFFSET fs, ES*/ 1237 SET_KERNEL_GS %ecx
1114 movl $(__USER_DS), %ecx 1238 movl $(__USER_DS), %ecx
1115 movl %ecx, %ds 1239 movl %ecx, %ds
1116 movl %ecx, %es 1240 movl %ecx, %es
@@ -1134,26 +1258,27 @@ END(page_fault)
1134 * by hand onto the new stack - while updating the return eip past 1258 * by hand onto the new stack - while updating the return eip past
1135 * the instruction that would have done it for sysenter. 1259 * the instruction that would have done it for sysenter.
1136 */ 1260 */
1137#define FIX_STACK(offset, ok, label) \ 1261.macro FIX_STACK offset ok label
1138 cmpw $__KERNEL_CS,4(%esp); \ 1262 cmpw $__KERNEL_CS, 4(%esp)
1139 jne ok; \ 1263 jne \ok
1140label: \ 1264\label:
1141 movl TSS_sysenter_sp0+offset(%esp),%esp; \ 1265 movl TSS_sysenter_sp0 + \offset(%esp), %esp
1142 CFI_DEF_CFA esp, 0; \ 1266 CFI_DEF_CFA esp, 0
1143 CFI_UNDEFINED eip; \ 1267 CFI_UNDEFINED eip
1144 pushfl; \ 1268 pushfl
1145 CFI_ADJUST_CFA_OFFSET 4; \ 1269 CFI_ADJUST_CFA_OFFSET 4
1146 pushl $__KERNEL_CS; \ 1270 pushl $__KERNEL_CS
1147 CFI_ADJUST_CFA_OFFSET 4; \ 1271 CFI_ADJUST_CFA_OFFSET 4
1148 pushl $sysenter_past_esp; \ 1272 pushl $sysenter_past_esp
1149 CFI_ADJUST_CFA_OFFSET 4; \ 1273 CFI_ADJUST_CFA_OFFSET 4
1150 CFI_REL_OFFSET eip, 0 1274 CFI_REL_OFFSET eip, 0
1275.endm
1151 1276
1152ENTRY(debug) 1277ENTRY(debug)
1153 RING0_INT_FRAME 1278 RING0_INT_FRAME
1154 cmpl $ia32_sysenter_target,(%esp) 1279 cmpl $ia32_sysenter_target,(%esp)
1155 jne debug_stack_correct 1280 jne debug_stack_correct
1156 FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn) 1281 FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn
1157debug_stack_correct: 1282debug_stack_correct:
1158 pushl $-1 # mark this as an int 1283 pushl $-1 # mark this as an int
1159 CFI_ADJUST_CFA_OFFSET 4 1284 CFI_ADJUST_CFA_OFFSET 4
@@ -1211,7 +1336,7 @@ nmi_stack_correct:
1211 1336
1212nmi_stack_fixup: 1337nmi_stack_fixup:
1213 RING0_INT_FRAME 1338 RING0_INT_FRAME
1214 FIX_STACK(12,nmi_stack_correct, 1) 1339 FIX_STACK 12, nmi_stack_correct, 1
1215 jmp nmi_stack_correct 1340 jmp nmi_stack_correct
1216 1341
1217nmi_debug_stack_check: 1342nmi_debug_stack_check:
@@ -1222,7 +1347,7 @@ nmi_debug_stack_check:
1222 jb nmi_stack_correct 1347 jb nmi_stack_correct
1223 cmpl $debug_esp_fix_insn,(%esp) 1348 cmpl $debug_esp_fix_insn,(%esp)
1224 ja nmi_stack_correct 1349 ja nmi_stack_correct
1225 FIX_STACK(24,nmi_stack_correct, 1) 1350 FIX_STACK 24, nmi_stack_correct, 1
1226 jmp nmi_stack_correct 1351 jmp nmi_stack_correct
1227 1352
1228nmi_espfix_stack: 1353nmi_espfix_stack:
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index a1346217e43c..fbcf96b295ff 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -52,6 +52,7 @@
52#include <asm/irqflags.h> 52#include <asm/irqflags.h>
53#include <asm/paravirt.h> 53#include <asm/paravirt.h>
54#include <asm/ftrace.h> 54#include <asm/ftrace.h>
55#include <asm/percpu.h>
55 56
56/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ 57/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
57#include <linux/elf-em.h> 58#include <linux/elf-em.h>
@@ -209,7 +210,7 @@ ENTRY(native_usergs_sysret64)
209 210
210 /* %rsp:at FRAMEEND */ 211 /* %rsp:at FRAMEEND */
211 .macro FIXUP_TOP_OF_STACK tmp offset=0 212 .macro FIXUP_TOP_OF_STACK tmp offset=0
212 movq %gs:pda_oldrsp,\tmp 213 movq PER_CPU_VAR(old_rsp),\tmp
213 movq \tmp,RSP+\offset(%rsp) 214 movq \tmp,RSP+\offset(%rsp)
214 movq $__USER_DS,SS+\offset(%rsp) 215 movq $__USER_DS,SS+\offset(%rsp)
215 movq $__USER_CS,CS+\offset(%rsp) 216 movq $__USER_CS,CS+\offset(%rsp)
@@ -220,7 +221,7 @@ ENTRY(native_usergs_sysret64)
220 221
221 .macro RESTORE_TOP_OF_STACK tmp offset=0 222 .macro RESTORE_TOP_OF_STACK tmp offset=0
222 movq RSP+\offset(%rsp),\tmp 223 movq RSP+\offset(%rsp),\tmp
223 movq \tmp,%gs:pda_oldrsp 224 movq \tmp,PER_CPU_VAR(old_rsp)
224 movq EFLAGS+\offset(%rsp),\tmp 225 movq EFLAGS+\offset(%rsp),\tmp
225 movq \tmp,R11+\offset(%rsp) 226 movq \tmp,R11+\offset(%rsp)
226 .endm 227 .endm
@@ -336,15 +337,15 @@ ENTRY(save_args)
336 je 1f 337 je 1f
337 SWAPGS 338 SWAPGS
338 /* 339 /*
339 * irqcount is used to check if a CPU is already on an interrupt stack 340 * irq_count is used to check if a CPU is already on an interrupt stack
340 * or not. While this is essentially redundant with preempt_count it is 341 * or not. While this is essentially redundant with preempt_count it is
341 * a little cheaper to use a separate counter in the PDA (short of 342 * a little cheaper to use a separate counter in the PDA (short of
342 * moving irq_enter into assembly, which would be too much work) 343 * moving irq_enter into assembly, which would be too much work)
343 */ 344 */
3441: incl %gs:pda_irqcount 3451: incl PER_CPU_VAR(irq_count)
345 jne 2f 346 jne 2f
346 popq_cfi %rax /* move return address... */ 347 popq_cfi %rax /* move return address... */
347 mov %gs:pda_irqstackptr,%rsp 348 mov PER_CPU_VAR(irq_stack_ptr),%rsp
348 EMPTY_FRAME 0 349 EMPTY_FRAME 0
349 pushq_cfi %rbp /* backlink for unwinder */ 350 pushq_cfi %rbp /* backlink for unwinder */
350 pushq_cfi %rax /* ... to the new stack */ 351 pushq_cfi %rax /* ... to the new stack */
@@ -409,6 +410,8 @@ END(save_paranoid)
409ENTRY(ret_from_fork) 410ENTRY(ret_from_fork)
410 DEFAULT_FRAME 411 DEFAULT_FRAME
411 412
413 LOCK ; btr $TIF_FORK,TI_flags(%r8)
414
412 push kernel_eflags(%rip) 415 push kernel_eflags(%rip)
413 CFI_ADJUST_CFA_OFFSET 8 416 CFI_ADJUST_CFA_OFFSET 8
414 popf # reset kernel eflags 417 popf # reset kernel eflags
@@ -468,7 +471,7 @@ END(ret_from_fork)
468ENTRY(system_call) 471ENTRY(system_call)
469 CFI_STARTPROC simple 472 CFI_STARTPROC simple
470 CFI_SIGNAL_FRAME 473 CFI_SIGNAL_FRAME
471 CFI_DEF_CFA rsp,PDA_STACKOFFSET 474 CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
472 CFI_REGISTER rip,rcx 475 CFI_REGISTER rip,rcx
473 /*CFI_REGISTER rflags,r11*/ 476 /*CFI_REGISTER rflags,r11*/
474 SWAPGS_UNSAFE_STACK 477 SWAPGS_UNSAFE_STACK
@@ -479,8 +482,8 @@ ENTRY(system_call)
479 */ 482 */
480ENTRY(system_call_after_swapgs) 483ENTRY(system_call_after_swapgs)
481 484
482 movq %rsp,%gs:pda_oldrsp 485 movq %rsp,PER_CPU_VAR(old_rsp)
483 movq %gs:pda_kernelstack,%rsp 486 movq PER_CPU_VAR(kernel_stack),%rsp
484 /* 487 /*
485 * No need to follow this irqs off/on section - it's straight 488 * No need to follow this irqs off/on section - it's straight
486 * and short: 489 * and short:
@@ -523,7 +526,7 @@ sysret_check:
523 CFI_REGISTER rip,rcx 526 CFI_REGISTER rip,rcx
524 RESTORE_ARGS 0,-ARG_SKIP,1 527 RESTORE_ARGS 0,-ARG_SKIP,1
525 /*CFI_REGISTER rflags,r11*/ 528 /*CFI_REGISTER rflags,r11*/
526 movq %gs:pda_oldrsp, %rsp 529 movq PER_CPU_VAR(old_rsp), %rsp
527 USERGS_SYSRET64 530 USERGS_SYSRET64
528 531
529 CFI_RESTORE_STATE 532 CFI_RESTORE_STATE
@@ -833,11 +836,11 @@ common_interrupt:
833 XCPT_FRAME 836 XCPT_FRAME
834 addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */ 837 addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */
835 interrupt do_IRQ 838 interrupt do_IRQ
836 /* 0(%rsp): oldrsp-ARGOFFSET */ 839 /* 0(%rsp): old_rsp-ARGOFFSET */
837ret_from_intr: 840ret_from_intr:
838 DISABLE_INTERRUPTS(CLBR_NONE) 841 DISABLE_INTERRUPTS(CLBR_NONE)
839 TRACE_IRQS_OFF 842 TRACE_IRQS_OFF
840 decl %gs:pda_irqcount 843 decl PER_CPU_VAR(irq_count)
841 leaveq 844 leaveq
842 CFI_DEF_CFA_REGISTER rsp 845 CFI_DEF_CFA_REGISTER rsp
843 CFI_ADJUST_CFA_OFFSET -8 846 CFI_ADJUST_CFA_OFFSET -8
@@ -982,8 +985,10 @@ apicinterrupt IRQ_MOVE_CLEANUP_VECTOR \
982 irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt 985 irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt
983#endif 986#endif
984 987
988#ifdef CONFIG_X86_UV
985apicinterrupt UV_BAU_MESSAGE \ 989apicinterrupt UV_BAU_MESSAGE \
986 uv_bau_message_intr1 uv_bau_message_interrupt 990 uv_bau_message_intr1 uv_bau_message_interrupt
991#endif
987apicinterrupt LOCAL_TIMER_VECTOR \ 992apicinterrupt LOCAL_TIMER_VECTOR \
988 apic_timer_interrupt smp_apic_timer_interrupt 993 apic_timer_interrupt smp_apic_timer_interrupt
989 994
@@ -1073,10 +1078,10 @@ ENTRY(\sym)
1073 TRACE_IRQS_OFF 1078 TRACE_IRQS_OFF
1074 movq %rsp,%rdi /* pt_regs pointer */ 1079 movq %rsp,%rdi /* pt_regs pointer */
1075 xorl %esi,%esi /* no error code */ 1080 xorl %esi,%esi /* no error code */
1076 movq %gs:pda_data_offset, %rbp 1081 PER_CPU(init_tss, %rbp)
1077 subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp) 1082 subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
1078 call \do_sym 1083 call \do_sym
1079 addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp) 1084 addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
1080 jmp paranoid_exit /* %ebx: no swapgs flag */ 1085 jmp paranoid_exit /* %ebx: no swapgs flag */
1081 CFI_ENDPROC 1086 CFI_ENDPROC
1082END(\sym) 1087END(\sym)
@@ -1138,7 +1143,7 @@ ENTRY(native_load_gs_index)
1138 CFI_STARTPROC 1143 CFI_STARTPROC
1139 pushf 1144 pushf
1140 CFI_ADJUST_CFA_OFFSET 8 1145 CFI_ADJUST_CFA_OFFSET 8
1141 DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI)) 1146 DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
1142 SWAPGS 1147 SWAPGS
1143gs_change: 1148gs_change:
1144 movl %edi,%gs 1149 movl %edi,%gs
@@ -1260,14 +1265,14 @@ ENTRY(call_softirq)
1260 CFI_REL_OFFSET rbp,0 1265 CFI_REL_OFFSET rbp,0
1261 mov %rsp,%rbp 1266 mov %rsp,%rbp
1262 CFI_DEF_CFA_REGISTER rbp 1267 CFI_DEF_CFA_REGISTER rbp
1263 incl %gs:pda_irqcount 1268 incl PER_CPU_VAR(irq_count)
1264 cmove %gs:pda_irqstackptr,%rsp 1269 cmove PER_CPU_VAR(irq_stack_ptr),%rsp
1265 push %rbp # backlink for old unwinder 1270 push %rbp # backlink for old unwinder
1266 call __do_softirq 1271 call __do_softirq
1267 leaveq 1272 leaveq
1268 CFI_DEF_CFA_REGISTER rsp 1273 CFI_DEF_CFA_REGISTER rsp
1269 CFI_ADJUST_CFA_OFFSET -8 1274 CFI_ADJUST_CFA_OFFSET -8
1270 decl %gs:pda_irqcount 1275 decl PER_CPU_VAR(irq_count)
1271 ret 1276 ret
1272 CFI_ENDPROC 1277 CFI_ENDPROC
1273END(call_softirq) 1278END(call_softirq)
@@ -1297,15 +1302,15 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
1297 movq %rdi, %rsp # we don't return, adjust the stack frame 1302 movq %rdi, %rsp # we don't return, adjust the stack frame
1298 CFI_ENDPROC 1303 CFI_ENDPROC
1299 DEFAULT_FRAME 1304 DEFAULT_FRAME
130011: incl %gs:pda_irqcount 130511: incl PER_CPU_VAR(irq_count)
1301 movq %rsp,%rbp 1306 movq %rsp,%rbp
1302 CFI_DEF_CFA_REGISTER rbp 1307 CFI_DEF_CFA_REGISTER rbp
1303 cmovzq %gs:pda_irqstackptr,%rsp 1308 cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp
1304 pushq %rbp # backlink for old unwinder 1309 pushq %rbp # backlink for old unwinder
1305 call xen_evtchn_do_upcall 1310 call xen_evtchn_do_upcall
1306 popq %rsp 1311 popq %rsp
1307 CFI_DEF_CFA_REGISTER rsp 1312 CFI_DEF_CFA_REGISTER rsp
1308 decl %gs:pda_irqcount 1313 decl PER_CPU_VAR(irq_count)
1309 jmp error_exit 1314 jmp error_exit
1310 CFI_ENDPROC 1315 CFI_ENDPROC
1311END(do_hypervisor_callback) 1316END(do_hypervisor_callback)
diff --git a/arch/x86/kernel/es7000_32.c b/arch/x86/kernel/es7000_32.c
index 53699c931ad4..55515d73d9c2 100644
--- a/arch/x86/kernel/es7000_32.c
+++ b/arch/x86/kernel/es7000_32.c
@@ -40,7 +40,6 @@
40#include <asm/smp.h> 40#include <asm/smp.h>
41#include <asm/atomic.h> 41#include <asm/atomic.h>
42#include <asm/apicdef.h> 42#include <asm/apicdef.h>
43#include <mach_mpparse.h>
44#include <asm/genapic.h> 43#include <asm/genapic.h>
45#include <asm/setup.h> 44#include <asm/setup.h>
46 45
@@ -182,20 +181,16 @@ static int wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip)
182 return 0; 181 return 0;
183} 182}
184 183
185static void noop_wait_for_deassert(atomic_t *deassert_not_used)
186{
187}
188
189static int __init es7000_update_genapic(void) 184static int __init es7000_update_genapic(void)
190{ 185{
191 genapic->wakeup_cpu = wakeup_secondary_cpu_via_mip; 186 apic->wakeup_cpu = wakeup_secondary_cpu_via_mip;
192 187
193 /* MPENTIUMIII */ 188 /* MPENTIUMIII */
194 if (boot_cpu_data.x86 == 6 && 189 if (boot_cpu_data.x86 == 6 &&
195 (boot_cpu_data.x86_model >= 7 || boot_cpu_data.x86_model <= 11)) { 190 (boot_cpu_data.x86_model >= 7 || boot_cpu_data.x86_model <= 11)) {
196 es7000_update_genapic_to_cluster(); 191 es7000_update_genapic_to_cluster();
197 genapic->wait_for_init_deassert = noop_wait_for_deassert; 192 apic->wait_for_init_deassert = NULL;
198 genapic->wakeup_cpu = wakeup_secondary_cpu_via_mip; 193 apic->wakeup_cpu = wakeup_secondary_cpu_via_mip;
199 } 194 }
200 195
201 return 0; 196 return 0;
@@ -292,24 +287,31 @@ int __init find_unisys_acpi_oem_table(unsigned long *oem_addr)
292{ 287{
293 struct acpi_table_header *header = NULL; 288 struct acpi_table_header *header = NULL;
294 int i = 0; 289 int i = 0;
290 acpi_size tbl_size;
295 291
296 while (ACPI_SUCCESS(acpi_get_table("OEM1", i++, &header))) { 292 while (ACPI_SUCCESS(acpi_get_table_with_size("OEM1", i++, &header, &tbl_size))) {
297 if (!memcmp((char *) &header->oem_id, "UNISYS", 6)) { 293 if (!memcmp((char *) &header->oem_id, "UNISYS", 6)) {
298 struct oem_table *t = (struct oem_table *)header; 294 struct oem_table *t = (struct oem_table *)header;
299 295
300 oem_addrX = t->OEMTableAddr; 296 oem_addrX = t->OEMTableAddr;
301 oem_size = t->OEMTableSize; 297 oem_size = t->OEMTableSize;
298 early_acpi_os_unmap_memory(header, tbl_size);
302 299
303 *oem_addr = (unsigned long)__acpi_map_table(oem_addrX, 300 *oem_addr = (unsigned long)__acpi_map_table(oem_addrX,
304 oem_size); 301 oem_size);
305 return 0; 302 return 0;
306 } 303 }
304 early_acpi_os_unmap_memory(header, tbl_size);
307 } 305 }
308 return -1; 306 return -1;
309} 307}
310 308
311void __init unmap_unisys_acpi_oem_table(unsigned long oem_addr) 309void __init unmap_unisys_acpi_oem_table(unsigned long oem_addr)
312{ 310{
311 if (!oem_addr)
312 return;
313
314 __acpi_unmap_table((char *)oem_addr, oem_size);
313} 315}
314#endif 316#endif
315 317
@@ -359,20 +361,449 @@ es7000_mip_write(struct mip_reg *mip_reg)
359 return status; 361 return status;
360} 362}
361 363
362void __init 364void __init es7000_enable_apic_mode(void)
363es7000_sw_apic(void) 365{
364{ 366 struct mip_reg es7000_mip_reg;
365 if (es7000_plat) { 367 int mip_status;
366 int mip_status; 368
367 struct mip_reg es7000_mip_reg; 369 if (!es7000_plat)
368
369 printk("ES7000: Enabling APIC mode.\n");
370 memset(&es7000_mip_reg, 0, sizeof(struct mip_reg));
371 es7000_mip_reg.off_0 = MIP_SW_APIC;
372 es7000_mip_reg.off_38 = (MIP_VALID);
373 while ((mip_status = es7000_mip_write(&es7000_mip_reg)) != 0)
374 printk("es7000_sw_apic: command failed, status = %x\n",
375 mip_status);
376 return; 370 return;
371
372 printk("ES7000: Enabling APIC mode.\n");
373 memset(&es7000_mip_reg, 0, sizeof(struct mip_reg));
374 es7000_mip_reg.off_0 = MIP_SW_APIC;
375 es7000_mip_reg.off_38 = MIP_VALID;
376
377 while ((mip_status = es7000_mip_write(&es7000_mip_reg)) != 0) {
378 printk("es7000_enable_apic_mode: command failed, status = %x\n",
379 mip_status);
380 }
381}
382
383/*
384 * APIC driver for the Unisys ES7000 chipset.
385 */
386#define APIC_DEFINITION 1
387#include <linux/threads.h>
388#include <linux/cpumask.h>
389#include <asm/mpspec.h>
390#include <asm/genapic.h>
391#include <asm/fixmap.h>
392#include <asm/apicdef.h>
393#include <linux/kernel.h>
394#include <linux/string.h>
395#include <linux/init.h>
396#include <linux/acpi.h>
397#include <linux/smp.h>
398#include <asm/ipi.h>
399
400#define APIC_DFR_VALUE_CLUSTER (APIC_DFR_CLUSTER)
401#define INT_DELIVERY_MODE_CLUSTER (dest_LowestPrio)
402#define INT_DEST_MODE_CLUSTER (1) /* logical delivery broadcast to all procs */
403
404#define APIC_DFR_VALUE (APIC_DFR_FLAT)
405
406extern void es7000_enable_apic_mode(void);
407extern int apic_version [MAX_APICS];
408extern u8 cpu_2_logical_apicid[];
409extern unsigned int boot_cpu_physical_apicid;
410
411extern int parse_unisys_oem (char *oemptr);
412extern int find_unisys_acpi_oem_table(unsigned long *oem_addr);
413extern void unmap_unisys_acpi_oem_table(unsigned long oem_addr);
414extern void setup_unisys(void);
415
416#define apicid_cluster(apicid) (apicid & 0xF0)
417#define xapic_phys_to_log_apicid(cpu) per_cpu(x86_bios_cpu_apicid, cpu)
418
419static void es7000_vector_allocation_domain(int cpu, cpumask_t *retmask)
420{
421 /* Careful. Some cpus do not strictly honor the set of cpus
422 * specified in the interrupt destination when using lowest
423 * priority interrupt delivery mode.
424 *
425 * In particular there was a hyperthreading cpu observed to
426 * deliver interrupts to the wrong hyperthread when only one
427 * hyperthread was specified in the interrupt desitination.
428 */
429 *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
430}
431
432
433static void es7000_wait_for_init_deassert(atomic_t *deassert)
434{
435#ifndef CONFIG_ES7000_CLUSTERED_APIC
436 while (!atomic_read(deassert))
437 cpu_relax();
438#endif
439 return;
440}
441
442static unsigned int es7000_get_apic_id(unsigned long x)
443{
444 return (x >> 24) & 0xFF;
445}
446
447#ifdef CONFIG_ACPI
448static int es7000_check_dsdt(void)
449{
450 struct acpi_table_header header;
451
452 if (ACPI_SUCCESS(acpi_get_table_header(ACPI_SIG_DSDT, 0, &header)) &&
453 !strncmp(header.oem_id, "UNISYS", 6))
454 return 1;
455 return 0;
456}
457#endif
458
459static void es7000_send_IPI_mask(const struct cpumask *mask, int vector)
460{
461 default_send_IPI_mask_sequence_phys(mask, vector);
462}
463
464static void es7000_send_IPI_allbutself(int vector)
465{
466 default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector);
467}
468
469static void es7000_send_IPI_all(int vector)
470{
471 es7000_send_IPI_mask(cpu_online_mask, vector);
472}
473
474static int es7000_apic_id_registered(void)
475{
476 return 1;
477}
478
479static const cpumask_t *target_cpus_cluster(void)
480{
481 return &CPU_MASK_ALL;
482}
483
484static const cpumask_t *es7000_target_cpus(void)
485{
486 return &cpumask_of_cpu(smp_processor_id());
487}
488
489static unsigned long
490es7000_check_apicid_used(physid_mask_t bitmap, int apicid)
491{
492 return 0;
493}
494static unsigned long es7000_check_apicid_present(int bit)
495{
496 return physid_isset(bit, phys_cpu_present_map);
497}
498
499static unsigned long calculate_ldr(int cpu)
500{
501 unsigned long id = xapic_phys_to_log_apicid(cpu);
502
503 return (SET_APIC_LOGICAL_ID(id));
504}
505
506/*
507 * Set up the logical destination ID.
508 *
509 * Intel recommends to set DFR, LdR and TPR before enabling
510 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
511 * document number 292116). So here it goes...
512 */
513static void es7000_init_apic_ldr_cluster(void)
514{
515 unsigned long val;
516 int cpu = smp_processor_id();
517
518 apic_write(APIC_DFR, APIC_DFR_VALUE_CLUSTER);
519 val = calculate_ldr(cpu);
520 apic_write(APIC_LDR, val);
521}
522
523static void es7000_init_apic_ldr(void)
524{
525 unsigned long val;
526 int cpu = smp_processor_id();
527
528 apic_write(APIC_DFR, APIC_DFR_VALUE);
529 val = calculate_ldr(cpu);
530 apic_write(APIC_LDR, val);
531}
532
533static void es7000_setup_apic_routing(void)
534{
535 int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id());
536 printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n",
537 (apic_version[apic] == 0x14) ?
538 "Physical Cluster" : "Logical Cluster",
539 nr_ioapics, cpus_addr(*es7000_target_cpus())[0]);
540}
541
542static int es7000_apicid_to_node(int logical_apicid)
543{
544 return 0;
545}
546
547
548static int es7000_cpu_present_to_apicid(int mps_cpu)
549{
550 if (!mps_cpu)
551 return boot_cpu_physical_apicid;
552 else if (mps_cpu < nr_cpu_ids)
553 return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
554 else
555 return BAD_APICID;
556}
557
558static physid_mask_t es7000_apicid_to_cpu_present(int phys_apicid)
559{
560 static int id = 0;
561 physid_mask_t mask;
562
563 mask = physid_mask_of_physid(id);
564 ++id;
565
566 return mask;
567}
568
569/* Mapping from cpu number to logical apicid */
570static int es7000_cpu_to_logical_apicid(int cpu)
571{
572#ifdef CONFIG_SMP
573 if (cpu >= nr_cpu_ids)
574 return BAD_APICID;
575 return (int)cpu_2_logical_apicid[cpu];
576#else
577 return logical_smp_processor_id();
578#endif
579}
580
581static physid_mask_t es7000_ioapic_phys_id_map(physid_mask_t phys_map)
582{
583 /* For clustered we don't have a good way to do this yet - hack */
584 return physids_promote(0xff);
585}
586
587static int es7000_check_phys_apicid_present(int cpu_physical_apicid)
588{
589 boot_cpu_physical_apicid = read_apic_id();
590 return (1);
591}
592
593static unsigned int
594es7000_cpu_mask_to_apicid_cluster(const struct cpumask *cpumask)
595{
596 int cpus_found = 0;
597 int num_bits_set;
598 int apicid;
599 int cpu;
600
601 num_bits_set = cpumask_weight(cpumask);
602 /* Return id to all */
603 if (num_bits_set == nr_cpu_ids)
604 return 0xFF;
605 /*
606 * The cpus in the mask must all be on the apic cluster. If are not
607 * on the same apicid cluster return default value of target_cpus():
608 */
609 cpu = cpumask_first(cpumask);
610 apicid = es7000_cpu_to_logical_apicid(cpu);
611
612 while (cpus_found < num_bits_set) {
613 if (cpumask_test_cpu(cpu, cpumask)) {
614 int new_apicid = es7000_cpu_to_logical_apicid(cpu);
615
616 if (apicid_cluster(apicid) !=
617 apicid_cluster(new_apicid)) {
618 printk ("%s: Not a valid mask!\n", __func__);
619
620 return 0xFF;
621 }
622 apicid = new_apicid;
623 cpus_found++;
624 }
625 cpu++;
626 }
627 return apicid;
628}
629
630static unsigned int es7000_cpu_mask_to_apicid(const cpumask_t *cpumask)
631{
632 int cpus_found = 0;
633 int num_bits_set;
634 int apicid;
635 int cpu;
636
637 num_bits_set = cpus_weight(*cpumask);
638 /* Return id to all */
639 if (num_bits_set == nr_cpu_ids)
640 return es7000_cpu_to_logical_apicid(0);
641 /*
642 * The cpus in the mask must all be on the apic cluster. If are not
643 * on the same apicid cluster return default value of target_cpus():
644 */
645 cpu = first_cpu(*cpumask);
646 apicid = es7000_cpu_to_logical_apicid(cpu);
647 while (cpus_found < num_bits_set) {
648 if (cpu_isset(cpu, *cpumask)) {
649 int new_apicid = es7000_cpu_to_logical_apicid(cpu);
650
651 if (apicid_cluster(apicid) !=
652 apicid_cluster(new_apicid)) {
653 printk ("%s: Not a valid mask!\n", __func__);
654
655 return es7000_cpu_to_logical_apicid(0);
656 }
657 apicid = new_apicid;
658 cpus_found++;
659 }
660 cpu++;
661 }
662 return apicid;
663}
664
665static unsigned int
666es7000_cpu_mask_to_apicid_and(const struct cpumask *inmask,
667 const struct cpumask *andmask)
668{
669 int apicid = es7000_cpu_to_logical_apicid(0);
670 cpumask_var_t cpumask;
671
672 if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
673 return apicid;
674
675 cpumask_and(cpumask, inmask, andmask);
676 cpumask_and(cpumask, cpumask, cpu_online_mask);
677 apicid = es7000_cpu_mask_to_apicid(cpumask);
678
679 free_cpumask_var(cpumask);
680
681 return apicid;
682}
683
684static int es7000_phys_pkg_id(int cpuid_apic, int index_msb)
685{
686 return cpuid_apic >> index_msb;
687}
688
689void __init es7000_update_genapic_to_cluster(void)
690{
691 apic->target_cpus = target_cpus_cluster;
692 apic->irq_delivery_mode = INT_DELIVERY_MODE_CLUSTER;
693 apic->irq_dest_mode = INT_DEST_MODE_CLUSTER;
694
695 apic->init_apic_ldr = es7000_init_apic_ldr_cluster;
696
697 apic->cpu_mask_to_apicid = es7000_cpu_mask_to_apicid_cluster;
698}
699
700static int probe_es7000(void)
701{
702 /* probed later in mptable/ACPI hooks */
703 return 0;
704}
705
706static __init int
707es7000_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
708{
709 if (mpc->oemptr) {
710 struct mpc_oemtable *oem_table =
711 (struct mpc_oemtable *)mpc->oemptr;
712
713 if (!strncmp(oem, "UNISYS", 6))
714 return parse_unisys_oem((char *)oem_table);
715 }
716 return 0;
717}
718
719#ifdef CONFIG_ACPI
720/* Hook from generic ACPI tables.c */
721static int __init es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
722{
723 unsigned long oem_addr = 0;
724 int check_dsdt;
725 int ret = 0;
726
727 /* check dsdt at first to avoid clear fix_map for oem_addr */
728 check_dsdt = es7000_check_dsdt();
729
730 if (!find_unisys_acpi_oem_table(&oem_addr)) {
731 if (check_dsdt)
732 ret = parse_unisys_oem((char *)oem_addr);
733 else {
734 setup_unisys();
735 ret = 1;
736 }
737 /*
738 * we need to unmap it
739 */
740 unmap_unisys_acpi_oem_table(oem_addr);
377 } 741 }
742 return ret;
743}
744#else
745static int __init es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
746{
747 return 0;
378} 748}
749#endif
750
751
752struct genapic apic_es7000 = {
753
754 .name = "es7000",
755 .probe = probe_es7000,
756 .acpi_madt_oem_check = es7000_acpi_madt_oem_check,
757 .apic_id_registered = es7000_apic_id_registered,
758
759 .irq_delivery_mode = dest_Fixed,
760 /* phys delivery to target CPUs: */
761 .irq_dest_mode = 0,
762
763 .target_cpus = es7000_target_cpus,
764 .disable_esr = 1,
765 .dest_logical = 0,
766 .check_apicid_used = es7000_check_apicid_used,
767 .check_apicid_present = es7000_check_apicid_present,
768
769 .vector_allocation_domain = es7000_vector_allocation_domain,
770 .init_apic_ldr = es7000_init_apic_ldr,
771
772 .ioapic_phys_id_map = es7000_ioapic_phys_id_map,
773 .setup_apic_routing = es7000_setup_apic_routing,
774 .multi_timer_check = NULL,
775 .apicid_to_node = es7000_apicid_to_node,
776 .cpu_to_logical_apicid = es7000_cpu_to_logical_apicid,
777 .cpu_present_to_apicid = es7000_cpu_present_to_apicid,
778 .apicid_to_cpu_present = es7000_apicid_to_cpu_present,
779 .setup_portio_remap = NULL,
780 .check_phys_apicid_present = es7000_check_phys_apicid_present,
781 .enable_apic_mode = es7000_enable_apic_mode,
782 .phys_pkg_id = es7000_phys_pkg_id,
783 .mps_oem_check = es7000_mps_oem_check,
784
785 .get_apic_id = es7000_get_apic_id,
786 .set_apic_id = NULL,
787 .apic_id_mask = 0xFF << 24,
788
789 .cpu_mask_to_apicid = es7000_cpu_mask_to_apicid,
790 .cpu_mask_to_apicid_and = es7000_cpu_mask_to_apicid_and,
791
792 .send_IPI_mask = es7000_send_IPI_mask,
793 .send_IPI_mask_allbutself = NULL,
794 .send_IPI_allbutself = es7000_send_IPI_allbutself,
795 .send_IPI_all = es7000_send_IPI_all,
796 .send_IPI_self = default_send_IPI_self,
797
798 .wakeup_cpu = NULL,
799
800 .trampoline_phys_low = 0x467,
801 .trampoline_phys_high = 0x469,
802
803 .wait_for_init_deassert = es7000_wait_for_init_deassert,
804
805 /* Nothing to do for most platforms, since cleared by the INIT cycle: */
806 .smp_callin_clear_local_apic = NULL,
807 .store_NMI_vector = NULL,
808 .inquire_remote_apic = default_inquire_remote_apic,
809};
diff --git a/arch/x86/kernel/genapic_64.c b/arch/x86/kernel/genapic_64.c
index 2bced78b0b8e..820dea5d0ebe 100644
--- a/arch/x86/kernel/genapic_64.c
+++ b/arch/x86/kernel/genapic_64.c
@@ -29,10 +29,12 @@ extern struct genapic apic_x2xpic_uv_x;
29extern struct genapic apic_x2apic_phys; 29extern struct genapic apic_x2apic_phys;
30extern struct genapic apic_x2apic_cluster; 30extern struct genapic apic_x2apic_cluster;
31 31
32struct genapic __read_mostly *genapic = &apic_flat; 32struct genapic __read_mostly *apic = &apic_flat;
33 33
34static struct genapic *apic_probe[] __initdata = { 34static struct genapic *apic_probe[] __initdata = {
35#ifdef CONFIG_X86_UV
35 &apic_x2apic_uv_x, 36 &apic_x2apic_uv_x,
37#endif
36 &apic_x2apic_phys, 38 &apic_x2apic_phys,
37 &apic_x2apic_cluster, 39 &apic_x2apic_cluster,
38 &apic_physflat, 40 &apic_physflat,
@@ -42,17 +44,17 @@ static struct genapic *apic_probe[] __initdata = {
42/* 44/*
43 * Check the APIC IDs in bios_cpu_apicid and choose the APIC mode. 45 * Check the APIC IDs in bios_cpu_apicid and choose the APIC mode.
44 */ 46 */
45void __init setup_apic_routing(void) 47void __init default_setup_apic_routing(void)
46{ 48{
47 if (genapic == &apic_x2apic_phys || genapic == &apic_x2apic_cluster) { 49 if (apic == &apic_x2apic_phys || apic == &apic_x2apic_cluster) {
48 if (!intr_remapping_enabled) 50 if (!intr_remapping_enabled)
49 genapic = &apic_flat; 51 apic = &apic_flat;
50 } 52 }
51 53
52 if (genapic == &apic_flat) { 54 if (apic == &apic_flat) {
53 if (max_physical_apicid >= 8) 55 if (max_physical_apicid >= 8)
54 genapic = &apic_physflat; 56 apic = &apic_physflat;
55 printk(KERN_INFO "Setting APIC routing to %s\n", genapic->name); 57 printk(KERN_INFO "Setting APIC routing to %s\n", apic->name);
56 } 58 }
57 59
58 if (x86_quirks->update_genapic) 60 if (x86_quirks->update_genapic)
@@ -63,18 +65,18 @@ void __init setup_apic_routing(void)
63 65
64void apic_send_IPI_self(int vector) 66void apic_send_IPI_self(int vector)
65{ 67{
66 __send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL); 68 __default_send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL);
67} 69}
68 70
69int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id) 71int __init default_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
70{ 72{
71 int i; 73 int i;
72 74
73 for (i = 0; apic_probe[i]; ++i) { 75 for (i = 0; apic_probe[i]; ++i) {
74 if (apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id)) { 76 if (apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id)) {
75 genapic = apic_probe[i]; 77 apic = apic_probe[i];
76 printk(KERN_INFO "Setting APIC routing to %s.\n", 78 printk(KERN_INFO "Setting APIC routing to %s.\n",
77 genapic->name); 79 apic->name);
78 return 1; 80 return 1;
79 } 81 }
80 } 82 }
diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c
index 34185488e4fb..249d2d3c034c 100644
--- a/arch/x86/kernel/genapic_flat_64.c
+++ b/arch/x86/kernel/genapic_flat_64.c
@@ -19,7 +19,6 @@
19#include <asm/smp.h> 19#include <asm/smp.h>
20#include <asm/ipi.h> 20#include <asm/ipi.h>
21#include <asm/genapic.h> 21#include <asm/genapic.h>
22#include <mach_apicdef.h>
23 22
24#ifdef CONFIG_ACPI 23#ifdef CONFIG_ACPI
25#include <acpi/acpi_bus.h> 24#include <acpi/acpi_bus.h>
@@ -74,7 +73,7 @@ static inline void _flat_send_IPI_mask(unsigned long mask, int vector)
74 unsigned long flags; 73 unsigned long flags;
75 74
76 local_irq_save(flags); 75 local_irq_save(flags);
77 __send_IPI_dest_field(mask, vector, APIC_DEST_LOGICAL); 76 __default_send_IPI_dest_field(mask, vector, apic->dest_logical);
78 local_irq_restore(flags); 77 local_irq_restore(flags);
79} 78}
80 79
@@ -85,14 +84,15 @@ static void flat_send_IPI_mask(const struct cpumask *cpumask, int vector)
85 _flat_send_IPI_mask(mask, vector); 84 _flat_send_IPI_mask(mask, vector);
86} 85}
87 86
88static void flat_send_IPI_mask_allbutself(const struct cpumask *cpumask, 87static void
89 int vector) 88 flat_send_IPI_mask_allbutself(const struct cpumask *cpumask, int vector)
90{ 89{
91 unsigned long mask = cpumask_bits(cpumask)[0]; 90 unsigned long mask = cpumask_bits(cpumask)[0];
92 int cpu = smp_processor_id(); 91 int cpu = smp_processor_id();
93 92
94 if (cpu < BITS_PER_LONG) 93 if (cpu < BITS_PER_LONG)
95 clear_bit(cpu, &mask); 94 clear_bit(cpu, &mask);
95
96 _flat_send_IPI_mask(mask, vector); 96 _flat_send_IPI_mask(mask, vector);
97} 97}
98 98
@@ -114,23 +114,27 @@ static void flat_send_IPI_allbutself(int vector)
114 _flat_send_IPI_mask(mask, vector); 114 _flat_send_IPI_mask(mask, vector);
115 } 115 }
116 } else if (num_online_cpus() > 1) { 116 } else if (num_online_cpus() > 1) {
117 __send_IPI_shortcut(APIC_DEST_ALLBUT, vector,APIC_DEST_LOGICAL); 117 __default_send_IPI_shortcut(APIC_DEST_ALLBUT,
118 vector, apic->dest_logical);
118 } 119 }
119} 120}
120 121
121static void flat_send_IPI_all(int vector) 122static void flat_send_IPI_all(int vector)
122{ 123{
123 if (vector == NMI_VECTOR) 124 if (vector == NMI_VECTOR) {
124 flat_send_IPI_mask(cpu_online_mask, vector); 125 flat_send_IPI_mask(cpu_online_mask, vector);
125 else 126 } else {
126 __send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL); 127 __default_send_IPI_shortcut(APIC_DEST_ALLINC,
128 vector, apic->dest_logical);
129 }
127} 130}
128 131
129static unsigned int get_apic_id(unsigned long x) 132static unsigned int flat_get_apic_id(unsigned long x)
130{ 133{
131 unsigned int id; 134 unsigned int id;
132 135
133 id = (((x)>>24) & 0xFFu); 136 id = (((x)>>24) & 0xFFu);
137
134 return id; 138 return id;
135} 139}
136 140
@@ -146,7 +150,7 @@ static unsigned int read_xapic_id(void)
146{ 150{
147 unsigned int id; 151 unsigned int id;
148 152
149 id = get_apic_id(apic_read(APIC_ID)); 153 id = flat_get_apic_id(apic_read(APIC_ID));
150 return id; 154 return id;
151} 155}
152 156
@@ -169,31 +173,62 @@ static unsigned int flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
169 return mask1 & mask2; 173 return mask1 & mask2;
170} 174}
171 175
172static unsigned int phys_pkg_id(int index_msb) 176static int flat_phys_pkg_id(int initial_apic_id, int index_msb)
173{ 177{
174 return hard_smp_processor_id() >> index_msb; 178 return hard_smp_processor_id() >> index_msb;
175} 179}
176 180
177struct genapic apic_flat = { 181struct genapic apic_flat = {
178 .name = "flat", 182 .name = "flat",
179 .acpi_madt_oem_check = flat_acpi_madt_oem_check, 183 .probe = NULL,
180 .int_delivery_mode = dest_LowestPrio, 184 .acpi_madt_oem_check = flat_acpi_madt_oem_check,
181 .int_dest_mode = (APIC_DEST_LOGICAL != 0), 185 .apic_id_registered = flat_apic_id_registered,
182 .target_cpus = flat_target_cpus, 186
183 .vector_allocation_domain = flat_vector_allocation_domain, 187 .irq_delivery_mode = dest_LowestPrio,
184 .apic_id_registered = flat_apic_id_registered, 188 .irq_dest_mode = 1, /* logical */
185 .init_apic_ldr = flat_init_apic_ldr, 189
186 .send_IPI_all = flat_send_IPI_all, 190 .target_cpus = flat_target_cpus,
187 .send_IPI_allbutself = flat_send_IPI_allbutself, 191 .disable_esr = 0,
188 .send_IPI_mask = flat_send_IPI_mask, 192 .dest_logical = APIC_DEST_LOGICAL,
189 .send_IPI_mask_allbutself = flat_send_IPI_mask_allbutself, 193 .check_apicid_used = NULL,
190 .send_IPI_self = apic_send_IPI_self, 194 .check_apicid_present = NULL,
191 .cpu_mask_to_apicid = flat_cpu_mask_to_apicid, 195
192 .cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and, 196 .vector_allocation_domain = flat_vector_allocation_domain,
193 .phys_pkg_id = phys_pkg_id, 197 .init_apic_ldr = flat_init_apic_ldr,
194 .get_apic_id = get_apic_id, 198
195 .set_apic_id = set_apic_id, 199 .ioapic_phys_id_map = NULL,
196 .apic_id_mask = (0xFFu<<24), 200 .setup_apic_routing = NULL,
201 .multi_timer_check = NULL,
202 .apicid_to_node = NULL,
203 .cpu_to_logical_apicid = NULL,
204 .cpu_present_to_apicid = default_cpu_present_to_apicid,
205 .apicid_to_cpu_present = NULL,
206 .setup_portio_remap = NULL,
207 .check_phys_apicid_present = default_check_phys_apicid_present,
208 .enable_apic_mode = NULL,
209 .phys_pkg_id = flat_phys_pkg_id,
210 .mps_oem_check = NULL,
211
212 .get_apic_id = flat_get_apic_id,
213 .set_apic_id = set_apic_id,
214 .apic_id_mask = 0xFFu << 24,
215
216 .cpu_mask_to_apicid = flat_cpu_mask_to_apicid,
217 .cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and,
218
219 .send_IPI_mask = flat_send_IPI_mask,
220 .send_IPI_mask_allbutself = flat_send_IPI_mask_allbutself,
221 .send_IPI_allbutself = flat_send_IPI_allbutself,
222 .send_IPI_all = flat_send_IPI_all,
223 .send_IPI_self = apic_send_IPI_self,
224
225 .wakeup_cpu = NULL,
226 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
227 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
228 .wait_for_init_deassert = NULL,
229 .smp_callin_clear_local_apic = NULL,
230 .store_NMI_vector = NULL,
231 .inquire_remote_apic = NULL,
197}; 232};
198 233
199/* 234/*
@@ -232,18 +267,18 @@ static void physflat_vector_allocation_domain(int cpu, struct cpumask *retmask)
232 267
233static void physflat_send_IPI_mask(const struct cpumask *cpumask, int vector) 268static void physflat_send_IPI_mask(const struct cpumask *cpumask, int vector)
234{ 269{
235 send_IPI_mask_sequence(cpumask, vector); 270 default_send_IPI_mask_sequence_phys(cpumask, vector);
236} 271}
237 272
238static void physflat_send_IPI_mask_allbutself(const struct cpumask *cpumask, 273static void physflat_send_IPI_mask_allbutself(const struct cpumask *cpumask,
239 int vector) 274 int vector)
240{ 275{
241 send_IPI_mask_allbutself(cpumask, vector); 276 default_send_IPI_mask_allbutself_phys(cpumask, vector);
242} 277}
243 278
244static void physflat_send_IPI_allbutself(int vector) 279static void physflat_send_IPI_allbutself(int vector)
245{ 280{
246 send_IPI_mask_allbutself(cpu_online_mask, vector); 281 default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector);
247} 282}
248 283
249static void physflat_send_IPI_all(int vector) 284static void physflat_send_IPI_all(int vector)
@@ -276,32 +311,67 @@ physflat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
276 * We're using fixed IRQ delivery, can only return one phys APIC ID. 311 * We're using fixed IRQ delivery, can only return one phys APIC ID.
277 * May as well be the first. 312 * May as well be the first.
278 */ 313 */
279 for_each_cpu_and(cpu, cpumask, andmask) 314 for_each_cpu_and(cpu, cpumask, andmask) {
280 if (cpumask_test_cpu(cpu, cpu_online_mask)) 315 if (cpumask_test_cpu(cpu, cpu_online_mask))
281 break; 316 break;
317 }
282 if (cpu < nr_cpu_ids) 318 if (cpu < nr_cpu_ids)
283 return per_cpu(x86_cpu_to_apicid, cpu); 319 return per_cpu(x86_cpu_to_apicid, cpu);
320
284 return BAD_APICID; 321 return BAD_APICID;
285} 322}
286 323
287struct genapic apic_physflat = { 324struct genapic apic_physflat = {
288 .name = "physical flat", 325
289 .acpi_madt_oem_check = physflat_acpi_madt_oem_check, 326 .name = "physical flat",
290 .int_delivery_mode = dest_Fixed, 327 .probe = NULL,
291 .int_dest_mode = (APIC_DEST_PHYSICAL != 0), 328 .acpi_madt_oem_check = physflat_acpi_madt_oem_check,
292 .target_cpus = physflat_target_cpus, 329 .apic_id_registered = flat_apic_id_registered,
293 .vector_allocation_domain = physflat_vector_allocation_domain, 330
294 .apic_id_registered = flat_apic_id_registered, 331 .irq_delivery_mode = dest_Fixed,
295 .init_apic_ldr = flat_init_apic_ldr,/*not needed, but shouldn't hurt*/ 332 .irq_dest_mode = 0, /* physical */
296 .send_IPI_all = physflat_send_IPI_all, 333
297 .send_IPI_allbutself = physflat_send_IPI_allbutself, 334 .target_cpus = physflat_target_cpus,
298 .send_IPI_mask = physflat_send_IPI_mask, 335 .disable_esr = 0,
299 .send_IPI_mask_allbutself = physflat_send_IPI_mask_allbutself, 336 .dest_logical = 0,
300 .send_IPI_self = apic_send_IPI_self, 337 .check_apicid_used = NULL,
301 .cpu_mask_to_apicid = physflat_cpu_mask_to_apicid, 338 .check_apicid_present = NULL,
302 .cpu_mask_to_apicid_and = physflat_cpu_mask_to_apicid_and, 339
303 .phys_pkg_id = phys_pkg_id, 340 .vector_allocation_domain = physflat_vector_allocation_domain,
304 .get_apic_id = get_apic_id, 341 /* not needed, but shouldn't hurt: */
305 .set_apic_id = set_apic_id, 342 .init_apic_ldr = flat_init_apic_ldr,
306 .apic_id_mask = (0xFFu<<24), 343
344 .ioapic_phys_id_map = NULL,
345 .setup_apic_routing = NULL,
346 .multi_timer_check = NULL,
347 .apicid_to_node = NULL,
348 .cpu_to_logical_apicid = NULL,
349 .cpu_present_to_apicid = default_cpu_present_to_apicid,
350 .apicid_to_cpu_present = NULL,
351 .setup_portio_remap = NULL,
352 .check_phys_apicid_present = default_check_phys_apicid_present,
353 .enable_apic_mode = NULL,
354 .phys_pkg_id = flat_phys_pkg_id,
355 .mps_oem_check = NULL,
356
357 .get_apic_id = flat_get_apic_id,
358 .set_apic_id = set_apic_id,
359 .apic_id_mask = 0xFFu << 24,
360
361 .cpu_mask_to_apicid = physflat_cpu_mask_to_apicid,
362 .cpu_mask_to_apicid_and = physflat_cpu_mask_to_apicid_and,
363
364 .send_IPI_mask = physflat_send_IPI_mask,
365 .send_IPI_mask_allbutself = physflat_send_IPI_mask_allbutself,
366 .send_IPI_allbutself = physflat_send_IPI_allbutself,
367 .send_IPI_all = physflat_send_IPI_all,
368 .send_IPI_self = apic_send_IPI_self,
369
370 .wakeup_cpu = NULL,
371 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
372 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
373 .wait_for_init_deassert = NULL,
374 .smp_callin_clear_local_apic = NULL,
375 .store_NMI_vector = NULL,
376 .inquire_remote_apic = NULL,
307}; 377};
diff --git a/arch/x86/kernel/genx2apic_cluster.c b/arch/x86/kernel/genx2apic_cluster.c
index 6ce497cc372d..7c87156b6411 100644
--- a/arch/x86/kernel/genx2apic_cluster.c
+++ b/arch/x86/kernel/genx2apic_cluster.c
@@ -36,8 +36,8 @@ static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask)
36 cpumask_set_cpu(cpu, retmask); 36 cpumask_set_cpu(cpu, retmask);
37} 37}
38 38
39static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, 39static void
40 unsigned int dest) 40 __x2apic_send_IPI_dest(unsigned int apicid, int vector, unsigned int dest)
41{ 41{
42 unsigned long cfg; 42 unsigned long cfg;
43 43
@@ -57,45 +57,50 @@ static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
57 */ 57 */
58static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector) 58static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
59{ 59{
60 unsigned long flags;
61 unsigned long query_cpu; 60 unsigned long query_cpu;
61 unsigned long flags;
62 62
63 local_irq_save(flags); 63 local_irq_save(flags);
64 for_each_cpu(query_cpu, mask) 64 for_each_cpu(query_cpu, mask) {
65 __x2apic_send_IPI_dest( 65 __x2apic_send_IPI_dest(
66 per_cpu(x86_cpu_to_logical_apicid, query_cpu), 66 per_cpu(x86_cpu_to_logical_apicid, query_cpu),
67 vector, APIC_DEST_LOGICAL); 67 vector, apic->dest_logical);
68 }
68 local_irq_restore(flags); 69 local_irq_restore(flags);
69} 70}
70 71
71static void x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, 72static void
72 int vector) 73 x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
73{ 74{
74 unsigned long flags;
75 unsigned long query_cpu;
76 unsigned long this_cpu = smp_processor_id(); 75 unsigned long this_cpu = smp_processor_id();
76 unsigned long query_cpu;
77 unsigned long flags;
77 78
78 local_irq_save(flags); 79 local_irq_save(flags);
79 for_each_cpu(query_cpu, mask) 80 for_each_cpu(query_cpu, mask) {
80 if (query_cpu != this_cpu) 81 if (query_cpu == this_cpu)
81 __x2apic_send_IPI_dest( 82 continue;
83 __x2apic_send_IPI_dest(
82 per_cpu(x86_cpu_to_logical_apicid, query_cpu), 84 per_cpu(x86_cpu_to_logical_apicid, query_cpu),
83 vector, APIC_DEST_LOGICAL); 85 vector, apic->dest_logical);
86 }
84 local_irq_restore(flags); 87 local_irq_restore(flags);
85} 88}
86 89
87static void x2apic_send_IPI_allbutself(int vector) 90static void x2apic_send_IPI_allbutself(int vector)
88{ 91{
89 unsigned long flags;
90 unsigned long query_cpu;
91 unsigned long this_cpu = smp_processor_id(); 92 unsigned long this_cpu = smp_processor_id();
93 unsigned long query_cpu;
94 unsigned long flags;
92 95
93 local_irq_save(flags); 96 local_irq_save(flags);
94 for_each_online_cpu(query_cpu) 97 for_each_online_cpu(query_cpu) {
95 if (query_cpu != this_cpu) 98 if (query_cpu == this_cpu)
96 __x2apic_send_IPI_dest( 99 continue;
100 __x2apic_send_IPI_dest(
97 per_cpu(x86_cpu_to_logical_apicid, query_cpu), 101 per_cpu(x86_cpu_to_logical_apicid, query_cpu),
98 vector, APIC_DEST_LOGICAL); 102 vector, apic->dest_logical);
103 }
99 local_irq_restore(flags); 104 local_irq_restore(flags);
100} 105}
101 106
@@ -111,21 +116,21 @@ static int x2apic_apic_id_registered(void)
111 116
112static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask) 117static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask)
113{ 118{
114 int cpu;
115
116 /* 119 /*
117 * We're using fixed IRQ delivery, can only return one logical APIC ID. 120 * We're using fixed IRQ delivery, can only return one logical APIC ID.
118 * May as well be the first. 121 * May as well be the first.
119 */ 122 */
120 cpu = cpumask_first(cpumask); 123 int cpu = cpumask_first(cpumask);
124
121 if ((unsigned)cpu < nr_cpu_ids) 125 if ((unsigned)cpu < nr_cpu_ids)
122 return per_cpu(x86_cpu_to_logical_apicid, cpu); 126 return per_cpu(x86_cpu_to_logical_apicid, cpu);
123 else 127 else
124 return BAD_APICID; 128 return BAD_APICID;
125} 129}
126 130
127static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask, 131static unsigned int
128 const struct cpumask *andmask) 132x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
133 const struct cpumask *andmask)
129{ 134{
130 int cpu; 135 int cpu;
131 136
@@ -133,15 +138,18 @@ static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
133 * We're using fixed IRQ delivery, can only return one logical APIC ID. 138 * We're using fixed IRQ delivery, can only return one logical APIC ID.
134 * May as well be the first. 139 * May as well be the first.
135 */ 140 */
136 for_each_cpu_and(cpu, cpumask, andmask) 141 for_each_cpu_and(cpu, cpumask, andmask) {
137 if (cpumask_test_cpu(cpu, cpu_online_mask)) 142 if (cpumask_test_cpu(cpu, cpu_online_mask))
138 break; 143 break;
144 }
145
139 if (cpu < nr_cpu_ids) 146 if (cpu < nr_cpu_ids)
140 return per_cpu(x86_cpu_to_logical_apicid, cpu); 147 return per_cpu(x86_cpu_to_logical_apicid, cpu);
148
141 return BAD_APICID; 149 return BAD_APICID;
142} 150}
143 151
144static unsigned int get_apic_id(unsigned long x) 152static unsigned int x2apic_cluster_phys_get_apic_id(unsigned long x)
145{ 153{
146 unsigned int id; 154 unsigned int id;
147 155
@@ -157,7 +165,7 @@ static unsigned long set_apic_id(unsigned int id)
157 return x; 165 return x;
158} 166}
159 167
160static unsigned int phys_pkg_id(int index_msb) 168static int x2apic_cluster_phys_pkg_id(int initial_apicid, int index_msb)
161{ 169{
162 return current_cpu_data.initial_apicid >> index_msb; 170 return current_cpu_data.initial_apicid >> index_msb;
163} 171}
@@ -172,27 +180,58 @@ static void init_x2apic_ldr(void)
172 int cpu = smp_processor_id(); 180 int cpu = smp_processor_id();
173 181
174 per_cpu(x86_cpu_to_logical_apicid, cpu) = apic_read(APIC_LDR); 182 per_cpu(x86_cpu_to_logical_apicid, cpu) = apic_read(APIC_LDR);
175 return;
176} 183}
177 184
178struct genapic apic_x2apic_cluster = { 185struct genapic apic_x2apic_cluster = {
179 .name = "cluster x2apic", 186
180 .acpi_madt_oem_check = x2apic_acpi_madt_oem_check, 187 .name = "cluster x2apic",
181 .int_delivery_mode = dest_LowestPrio, 188 .probe = NULL,
182 .int_dest_mode = (APIC_DEST_LOGICAL != 0), 189 .acpi_madt_oem_check = x2apic_acpi_madt_oem_check,
183 .target_cpus = x2apic_target_cpus, 190 .apic_id_registered = x2apic_apic_id_registered,
184 .vector_allocation_domain = x2apic_vector_allocation_domain, 191
185 .apic_id_registered = x2apic_apic_id_registered, 192 .irq_delivery_mode = dest_LowestPrio,
186 .init_apic_ldr = init_x2apic_ldr, 193 .irq_dest_mode = 1, /* logical */
187 .send_IPI_all = x2apic_send_IPI_all, 194
188 .send_IPI_allbutself = x2apic_send_IPI_allbutself, 195 .target_cpus = x2apic_target_cpus,
189 .send_IPI_mask = x2apic_send_IPI_mask, 196 .disable_esr = 0,
190 .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself, 197 .dest_logical = APIC_DEST_LOGICAL,
191 .send_IPI_self = x2apic_send_IPI_self, 198 .check_apicid_used = NULL,
192 .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid, 199 .check_apicid_present = NULL,
193 .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and, 200
194 .phys_pkg_id = phys_pkg_id, 201 .vector_allocation_domain = x2apic_vector_allocation_domain,
195 .get_apic_id = get_apic_id, 202 .init_apic_ldr = init_x2apic_ldr,
196 .set_apic_id = set_apic_id, 203
197 .apic_id_mask = (0xFFFFFFFFu), 204 .ioapic_phys_id_map = NULL,
205 .setup_apic_routing = NULL,
206 .multi_timer_check = NULL,
207 .apicid_to_node = NULL,
208 .cpu_to_logical_apicid = NULL,
209 .cpu_present_to_apicid = default_cpu_present_to_apicid,
210 .apicid_to_cpu_present = NULL,
211 .setup_portio_remap = NULL,
212 .check_phys_apicid_present = default_check_phys_apicid_present,
213 .enable_apic_mode = NULL,
214 .phys_pkg_id = x2apic_cluster_phys_pkg_id,
215 .mps_oem_check = NULL,
216
217 .get_apic_id = x2apic_cluster_phys_get_apic_id,
218 .set_apic_id = set_apic_id,
219 .apic_id_mask = 0xFFFFFFFFu,
220
221 .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid,
222 .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and,
223
224 .send_IPI_mask = x2apic_send_IPI_mask,
225 .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself,
226 .send_IPI_allbutself = x2apic_send_IPI_allbutself,
227 .send_IPI_all = x2apic_send_IPI_all,
228 .send_IPI_self = x2apic_send_IPI_self,
229
230 .wakeup_cpu = NULL,
231 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
232 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
233 .wait_for_init_deassert = NULL,
234 .smp_callin_clear_local_apic = NULL,
235 .store_NMI_vector = NULL,
236 .inquire_remote_apic = NULL,
198}; 237};
diff --git a/arch/x86/kernel/genx2apic_phys.c b/arch/x86/kernel/genx2apic_phys.c
index 21bcc0e098ba..5cbae8aa0408 100644
--- a/arch/x86/kernel/genx2apic_phys.c
+++ b/arch/x86/kernel/genx2apic_phys.c
@@ -55,8 +55,8 @@ static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
55 55
56static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector) 56static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
57{ 57{
58 unsigned long flags;
59 unsigned long query_cpu; 58 unsigned long query_cpu;
59 unsigned long flags;
60 60
61 local_irq_save(flags); 61 local_irq_save(flags);
62 for_each_cpu(query_cpu, mask) { 62 for_each_cpu(query_cpu, mask) {
@@ -66,12 +66,12 @@ static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
66 local_irq_restore(flags); 66 local_irq_restore(flags);
67} 67}
68 68
69static void x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, 69static void
70 int vector) 70 x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
71{ 71{
72 unsigned long flags;
73 unsigned long query_cpu;
74 unsigned long this_cpu = smp_processor_id(); 72 unsigned long this_cpu = smp_processor_id();
73 unsigned long query_cpu;
74 unsigned long flags;
75 75
76 local_irq_save(flags); 76 local_irq_save(flags);
77 for_each_cpu(query_cpu, mask) { 77 for_each_cpu(query_cpu, mask) {
@@ -85,16 +85,17 @@ static void x2apic_send_IPI_mask_allbutself(const struct cpumask *mask,
85 85
86static void x2apic_send_IPI_allbutself(int vector) 86static void x2apic_send_IPI_allbutself(int vector)
87{ 87{
88 unsigned long flags;
89 unsigned long query_cpu;
90 unsigned long this_cpu = smp_processor_id(); 88 unsigned long this_cpu = smp_processor_id();
89 unsigned long query_cpu;
90 unsigned long flags;
91 91
92 local_irq_save(flags); 92 local_irq_save(flags);
93 for_each_online_cpu(query_cpu) 93 for_each_online_cpu(query_cpu) {
94 if (query_cpu != this_cpu) 94 if (query_cpu == this_cpu)
95 __x2apic_send_IPI_dest( 95 continue;
96 per_cpu(x86_cpu_to_apicid, query_cpu), 96 __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu),
97 vector, APIC_DEST_PHYSICAL); 97 vector, APIC_DEST_PHYSICAL);
98 }
98 local_irq_restore(flags); 99 local_irq_restore(flags);
99} 100}
100 101
@@ -110,21 +111,21 @@ static int x2apic_apic_id_registered(void)
110 111
111static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask) 112static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask)
112{ 113{
113 int cpu;
114
115 /* 114 /*
116 * We're using fixed IRQ delivery, can only return one phys APIC ID. 115 * We're using fixed IRQ delivery, can only return one phys APIC ID.
117 * May as well be the first. 116 * May as well be the first.
118 */ 117 */
119 cpu = cpumask_first(cpumask); 118 int cpu = cpumask_first(cpumask);
119
120 if ((unsigned)cpu < nr_cpu_ids) 120 if ((unsigned)cpu < nr_cpu_ids)
121 return per_cpu(x86_cpu_to_apicid, cpu); 121 return per_cpu(x86_cpu_to_apicid, cpu);
122 else 122 else
123 return BAD_APICID; 123 return BAD_APICID;
124} 124}
125 125
126static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask, 126static unsigned int
127 const struct cpumask *andmask) 127x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
128 const struct cpumask *andmask)
128{ 129{
129 int cpu; 130 int cpu;
130 131
@@ -132,31 +133,28 @@ static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
132 * We're using fixed IRQ delivery, can only return one phys APIC ID. 133 * We're using fixed IRQ delivery, can only return one phys APIC ID.
133 * May as well be the first. 134 * May as well be the first.
134 */ 135 */
135 for_each_cpu_and(cpu, cpumask, andmask) 136 for_each_cpu_and(cpu, cpumask, andmask) {
136 if (cpumask_test_cpu(cpu, cpu_online_mask)) 137 if (cpumask_test_cpu(cpu, cpu_online_mask))
137 break; 138 break;
139 }
140
138 if (cpu < nr_cpu_ids) 141 if (cpu < nr_cpu_ids)
139 return per_cpu(x86_cpu_to_apicid, cpu); 142 return per_cpu(x86_cpu_to_apicid, cpu);
143
140 return BAD_APICID; 144 return BAD_APICID;
141} 145}
142 146
143static unsigned int get_apic_id(unsigned long x) 147static unsigned int x2apic_phys_get_apic_id(unsigned long x)
144{ 148{
145 unsigned int id; 149 return x;
146
147 id = x;
148 return id;
149} 150}
150 151
151static unsigned long set_apic_id(unsigned int id) 152static unsigned long set_apic_id(unsigned int id)
152{ 153{
153 unsigned long x; 154 return id;
154
155 x = id;
156 return x;
157} 155}
158 156
159static unsigned int phys_pkg_id(int index_msb) 157static int x2apic_phys_pkg_id(int initial_apicid, int index_msb)
160{ 158{
161 return current_cpu_data.initial_apicid >> index_msb; 159 return current_cpu_data.initial_apicid >> index_msb;
162} 160}
@@ -168,27 +166,58 @@ static void x2apic_send_IPI_self(int vector)
168 166
169static void init_x2apic_ldr(void) 167static void init_x2apic_ldr(void)
170{ 168{
171 return;
172} 169}
173 170
174struct genapic apic_x2apic_phys = { 171struct genapic apic_x2apic_phys = {
175 .name = "physical x2apic", 172
176 .acpi_madt_oem_check = x2apic_acpi_madt_oem_check, 173 .name = "physical x2apic",
177 .int_delivery_mode = dest_Fixed, 174 .probe = NULL,
178 .int_dest_mode = (APIC_DEST_PHYSICAL != 0), 175 .acpi_madt_oem_check = x2apic_acpi_madt_oem_check,
179 .target_cpus = x2apic_target_cpus, 176 .apic_id_registered = x2apic_apic_id_registered,
180 .vector_allocation_domain = x2apic_vector_allocation_domain, 177
181 .apic_id_registered = x2apic_apic_id_registered, 178 .irq_delivery_mode = dest_Fixed,
182 .init_apic_ldr = init_x2apic_ldr, 179 .irq_dest_mode = 0, /* physical */
183 .send_IPI_all = x2apic_send_IPI_all, 180
184 .send_IPI_allbutself = x2apic_send_IPI_allbutself, 181 .target_cpus = x2apic_target_cpus,
185 .send_IPI_mask = x2apic_send_IPI_mask, 182 .disable_esr = 0,
186 .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself, 183 .dest_logical = 0,
187 .send_IPI_self = x2apic_send_IPI_self, 184 .check_apicid_used = NULL,
188 .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid, 185 .check_apicid_present = NULL,
189 .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and, 186
190 .phys_pkg_id = phys_pkg_id, 187 .vector_allocation_domain = x2apic_vector_allocation_domain,
191 .get_apic_id = get_apic_id, 188 .init_apic_ldr = init_x2apic_ldr,
192 .set_apic_id = set_apic_id, 189
193 .apic_id_mask = (0xFFFFFFFFu), 190 .ioapic_phys_id_map = NULL,
191 .setup_apic_routing = NULL,
192 .multi_timer_check = NULL,
193 .apicid_to_node = NULL,
194 .cpu_to_logical_apicid = NULL,
195 .cpu_present_to_apicid = default_cpu_present_to_apicid,
196 .apicid_to_cpu_present = NULL,
197 .setup_portio_remap = NULL,
198 .check_phys_apicid_present = default_check_phys_apicid_present,
199 .enable_apic_mode = NULL,
200 .phys_pkg_id = x2apic_phys_pkg_id,
201 .mps_oem_check = NULL,
202
203 .get_apic_id = x2apic_phys_get_apic_id,
204 .set_apic_id = set_apic_id,
205 .apic_id_mask = 0xFFFFFFFFu,
206
207 .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid,
208 .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and,
209
210 .send_IPI_mask = x2apic_send_IPI_mask,
211 .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself,
212 .send_IPI_allbutself = x2apic_send_IPI_allbutself,
213 .send_IPI_all = x2apic_send_IPI_all,
214 .send_IPI_self = x2apic_send_IPI_self,
215
216 .wakeup_cpu = NULL,
217 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
218 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
219 .wait_for_init_deassert = NULL,
220 .smp_callin_clear_local_apic = NULL,
221 .store_NMI_vector = NULL,
222 .inquire_remote_apic = NULL,
194}; 223};
diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c
index b193e082f6ce..6adb5e6f4d92 100644
--- a/arch/x86/kernel/genx2apic_uv_x.c
+++ b/arch/x86/kernel/genx2apic_uv_x.c
@@ -25,6 +25,7 @@
25#include <asm/ipi.h> 25#include <asm/ipi.h>
26#include <asm/genapic.h> 26#include <asm/genapic.h>
27#include <asm/pgtable.h> 27#include <asm/pgtable.h>
28#include <asm/uv/uv.h>
28#include <asm/uv/uv_mmrs.h> 29#include <asm/uv/uv_mmrs.h>
29#include <asm/uv/uv_hub.h> 30#include <asm/uv/uv_hub.h>
30#include <asm/uv/bios.h> 31#include <asm/uv/bios.h>
@@ -117,12 +118,13 @@ static void uv_send_IPI_one(int cpu, int vector)
117 int pnode; 118 int pnode;
118 119
119 apicid = per_cpu(x86_cpu_to_apicid, cpu); 120 apicid = per_cpu(x86_cpu_to_apicid, cpu);
120 lapicid = apicid & 0x3f; /* ZZZ macro needed */ 121 lapicid = apicid & 0x3f; /* ZZZ macro needed */
121 pnode = uv_apicid_to_pnode(apicid); 122 pnode = uv_apicid_to_pnode(apicid);
122 val = 123
123 (1UL << UVH_IPI_INT_SEND_SHFT) | (lapicid << 124 val = ( 1UL << UVH_IPI_INT_SEND_SHFT ) |
124 UVH_IPI_INT_APIC_ID_SHFT) | 125 ( lapicid << UVH_IPI_INT_APIC_ID_SHFT ) |
125 (vector << UVH_IPI_INT_VECTOR_SHFT); 126 ( vector << UVH_IPI_INT_VECTOR_SHFT );
127
126 uv_write_global_mmr64(pnode, UVH_IPI_INT, val); 128 uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
127} 129}
128 130
@@ -136,22 +138,24 @@ static void uv_send_IPI_mask(const struct cpumask *mask, int vector)
136 138
137static void uv_send_IPI_mask_allbutself(const struct cpumask *mask, int vector) 139static void uv_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
138{ 140{
139 unsigned int cpu;
140 unsigned int this_cpu = smp_processor_id(); 141 unsigned int this_cpu = smp_processor_id();
142 unsigned int cpu;
141 143
142 for_each_cpu(cpu, mask) 144 for_each_cpu(cpu, mask) {
143 if (cpu != this_cpu) 145 if (cpu != this_cpu)
144 uv_send_IPI_one(cpu, vector); 146 uv_send_IPI_one(cpu, vector);
147 }
145} 148}
146 149
147static void uv_send_IPI_allbutself(int vector) 150static void uv_send_IPI_allbutself(int vector)
148{ 151{
149 unsigned int cpu;
150 unsigned int this_cpu = smp_processor_id(); 152 unsigned int this_cpu = smp_processor_id();
153 unsigned int cpu;
151 154
152 for_each_online_cpu(cpu) 155 for_each_online_cpu(cpu) {
153 if (cpu != this_cpu) 156 if (cpu != this_cpu)
154 uv_send_IPI_one(cpu, vector); 157 uv_send_IPI_one(cpu, vector);
158 }
155} 159}
156 160
157static void uv_send_IPI_all(int vector) 161static void uv_send_IPI_all(int vector)
@@ -170,21 +174,21 @@ static void uv_init_apic_ldr(void)
170 174
171static unsigned int uv_cpu_mask_to_apicid(const struct cpumask *cpumask) 175static unsigned int uv_cpu_mask_to_apicid(const struct cpumask *cpumask)
172{ 176{
173 int cpu;
174
175 /* 177 /*
176 * We're using fixed IRQ delivery, can only return one phys APIC ID. 178 * We're using fixed IRQ delivery, can only return one phys APIC ID.
177 * May as well be the first. 179 * May as well be the first.
178 */ 180 */
179 cpu = cpumask_first(cpumask); 181 int cpu = cpumask_first(cpumask);
182
180 if ((unsigned)cpu < nr_cpu_ids) 183 if ((unsigned)cpu < nr_cpu_ids)
181 return per_cpu(x86_cpu_to_apicid, cpu); 184 return per_cpu(x86_cpu_to_apicid, cpu);
182 else 185 else
183 return BAD_APICID; 186 return BAD_APICID;
184} 187}
185 188
186static unsigned int uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask, 189static unsigned int
187 const struct cpumask *andmask) 190uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
191 const struct cpumask *andmask)
188{ 192{
189 int cpu; 193 int cpu;
190 194
@@ -192,15 +196,17 @@ static unsigned int uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
192 * We're using fixed IRQ delivery, can only return one phys APIC ID. 196 * We're using fixed IRQ delivery, can only return one phys APIC ID.
193 * May as well be the first. 197 * May as well be the first.
194 */ 198 */
195 for_each_cpu_and(cpu, cpumask, andmask) 199 for_each_cpu_and(cpu, cpumask, andmask) {
196 if (cpumask_test_cpu(cpu, cpu_online_mask)) 200 if (cpumask_test_cpu(cpu, cpu_online_mask))
197 break; 201 break;
202 }
198 if (cpu < nr_cpu_ids) 203 if (cpu < nr_cpu_ids)
199 return per_cpu(x86_cpu_to_apicid, cpu); 204 return per_cpu(x86_cpu_to_apicid, cpu);
205
200 return BAD_APICID; 206 return BAD_APICID;
201} 207}
202 208
203static unsigned int get_apic_id(unsigned long x) 209static unsigned int x2apic_get_apic_id(unsigned long x)
204{ 210{
205 unsigned int id; 211 unsigned int id;
206 212
@@ -222,10 +228,10 @@ static unsigned long set_apic_id(unsigned int id)
222static unsigned int uv_read_apic_id(void) 228static unsigned int uv_read_apic_id(void)
223{ 229{
224 230
225 return get_apic_id(apic_read(APIC_ID)); 231 return x2apic_get_apic_id(apic_read(APIC_ID));
226} 232}
227 233
228static unsigned int phys_pkg_id(int index_msb) 234static int uv_phys_pkg_id(int initial_apicid, int index_msb)
229{ 235{
230 return uv_read_apic_id() >> index_msb; 236 return uv_read_apic_id() >> index_msb;
231} 237}
@@ -236,25 +242,57 @@ static void uv_send_IPI_self(int vector)
236} 242}
237 243
238struct genapic apic_x2apic_uv_x = { 244struct genapic apic_x2apic_uv_x = {
239 .name = "UV large system", 245
240 .acpi_madt_oem_check = uv_acpi_madt_oem_check, 246 .name = "UV large system",
241 .int_delivery_mode = dest_Fixed, 247 .probe = NULL,
242 .int_dest_mode = (APIC_DEST_PHYSICAL != 0), 248 .acpi_madt_oem_check = uv_acpi_madt_oem_check,
243 .target_cpus = uv_target_cpus, 249 .apic_id_registered = uv_apic_id_registered,
244 .vector_allocation_domain = uv_vector_allocation_domain, 250
245 .apic_id_registered = uv_apic_id_registered, 251 .irq_delivery_mode = dest_Fixed,
246 .init_apic_ldr = uv_init_apic_ldr, 252 .irq_dest_mode = 1, /* logical */
247 .send_IPI_all = uv_send_IPI_all, 253
248 .send_IPI_allbutself = uv_send_IPI_allbutself, 254 .target_cpus = uv_target_cpus,
249 .send_IPI_mask = uv_send_IPI_mask, 255 .disable_esr = 0,
250 .send_IPI_mask_allbutself = uv_send_IPI_mask_allbutself, 256 .dest_logical = APIC_DEST_LOGICAL,
251 .send_IPI_self = uv_send_IPI_self, 257 .check_apicid_used = NULL,
252 .cpu_mask_to_apicid = uv_cpu_mask_to_apicid, 258 .check_apicid_present = NULL,
253 .cpu_mask_to_apicid_and = uv_cpu_mask_to_apicid_and, 259
254 .phys_pkg_id = phys_pkg_id, 260 .vector_allocation_domain = uv_vector_allocation_domain,
255 .get_apic_id = get_apic_id, 261 .init_apic_ldr = uv_init_apic_ldr,
256 .set_apic_id = set_apic_id, 262
257 .apic_id_mask = (0xFFFFFFFFu), 263 .ioapic_phys_id_map = NULL,
264 .setup_apic_routing = NULL,
265 .multi_timer_check = NULL,
266 .apicid_to_node = NULL,
267 .cpu_to_logical_apicid = NULL,
268 .cpu_present_to_apicid = default_cpu_present_to_apicid,
269 .apicid_to_cpu_present = NULL,
270 .setup_portio_remap = NULL,
271 .check_phys_apicid_present = default_check_phys_apicid_present,
272 .enable_apic_mode = NULL,
273 .phys_pkg_id = uv_phys_pkg_id,
274 .mps_oem_check = NULL,
275
276 .get_apic_id = x2apic_get_apic_id,
277 .set_apic_id = set_apic_id,
278 .apic_id_mask = 0xFFFFFFFFu,
279
280 .cpu_mask_to_apicid = uv_cpu_mask_to_apicid,
281 .cpu_mask_to_apicid_and = uv_cpu_mask_to_apicid_and,
282
283 .send_IPI_mask = uv_send_IPI_mask,
284 .send_IPI_mask_allbutself = uv_send_IPI_mask_allbutself,
285 .send_IPI_allbutself = uv_send_IPI_allbutself,
286 .send_IPI_all = uv_send_IPI_all,
287 .send_IPI_self = uv_send_IPI_self,
288
289 .wakeup_cpu = NULL,
290 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
291 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
292 .wait_for_init_deassert = NULL,
293 .smp_callin_clear_local_apic = NULL,
294 .store_NMI_vector = NULL,
295 .inquire_remote_apic = NULL,
258}; 296};
259 297
260static __cpuinit void set_x2apic_extra_bits(int pnode) 298static __cpuinit void set_x2apic_extra_bits(int pnode)
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index b9a4d8c4b935..f5b272247690 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -26,27 +26,6 @@
26#include <asm/bios_ebda.h> 26#include <asm/bios_ebda.h>
27#include <asm/trampoline.h> 27#include <asm/trampoline.h>
28 28
29/* boot cpu pda */
30static struct x8664_pda _boot_cpu_pda;
31
32#ifdef CONFIG_SMP
33/*
34 * We install an empty cpu_pda pointer table to indicate to early users
35 * (numa_set_node) that the cpu_pda pointer table for cpus other than
36 * the boot cpu is not yet setup.
37 */
38static struct x8664_pda *__cpu_pda[NR_CPUS] __initdata;
39#else
40static struct x8664_pda *__cpu_pda[NR_CPUS] __read_mostly;
41#endif
42
43void __init x86_64_init_pda(void)
44{
45 _cpu_pda = __cpu_pda;
46 cpu_pda(0) = &_boot_cpu_pda;
47 pda_init(0);
48}
49
50static void __init zap_identity_mappings(void) 29static void __init zap_identity_mappings(void)
51{ 30{
52 pgd_t *pgd = pgd_offset_k(0UL); 31 pgd_t *pgd = pgd_offset_k(0UL);
@@ -112,8 +91,6 @@ void __init x86_64_start_kernel(char * real_mode_data)
112 if (console_loglevel == 10) 91 if (console_loglevel == 10)
113 early_printk("Kernel alive\n"); 92 early_printk("Kernel alive\n");
114 93
115 x86_64_init_pda();
116
117 x86_64_start_reservations(real_mode_data); 94 x86_64_start_reservations(real_mode_data);
118} 95}
119 96
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index e835b4eea70b..2a0aad7718d5 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -19,6 +19,7 @@
19#include <asm/asm-offsets.h> 19#include <asm/asm-offsets.h>
20#include <asm/setup.h> 20#include <asm/setup.h>
21#include <asm/processor-flags.h> 21#include <asm/processor-flags.h>
22#include <asm/percpu.h>
22 23
23/* Physical address */ 24/* Physical address */
24#define pa(X) ((X) - __PAGE_OFFSET) 25#define pa(X) ((X) - __PAGE_OFFSET)
@@ -429,14 +430,34 @@ is386: movl $2,%ecx # set MP
429 ljmp $(__KERNEL_CS),$1f 430 ljmp $(__KERNEL_CS),$1f
4301: movl $(__KERNEL_DS),%eax # reload all the segment registers 4311: movl $(__KERNEL_DS),%eax # reload all the segment registers
431 movl %eax,%ss # after changing gdt. 432 movl %eax,%ss # after changing gdt.
432 movl %eax,%fs # gets reset once there's real percpu
433 433
434 movl $(__USER_DS),%eax # DS/ES contains default USER segment 434 movl $(__USER_DS),%eax # DS/ES contains default USER segment
435 movl %eax,%ds 435 movl %eax,%ds
436 movl %eax,%es 436 movl %eax,%es
437 437
438 xorl %eax,%eax # Clear GS and LDT 438 movl $(__KERNEL_PERCPU), %eax
439 movl %eax,%fs # set this cpu's percpu
440
441#ifdef CONFIG_CC_STACKPROTECTOR
442 /*
443 * The linker can't handle this by relocation. Manually set
444 * base address in stack canary segment descriptor.
445 */
446 cmpb $0,ready
447 jne 1f
448 movl $per_cpu__gdt_page,%eax
449 movl $per_cpu__stack_canary,%ecx
450 subl $20, %ecx
451 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
452 shrl $16, %ecx
453 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
454 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
4551:
456#endif
457 movl $(__KERNEL_STACK_CANARY),%eax
439 movl %eax,%gs 458 movl %eax,%gs
459
460 xorl %eax,%eax # Clear LDT
440 lldt %ax 461 lldt %ax
441 462
442 cld # gcc2 wants the direction flag cleared at all times 463 cld # gcc2 wants the direction flag cleared at all times
@@ -446,8 +467,6 @@ is386: movl $2,%ecx # set MP
446 movb $1, ready 467 movb $1, ready
447 cmpb $0,%cl # the first CPU calls start_kernel 468 cmpb $0,%cl # the first CPU calls start_kernel
448 je 1f 469 je 1f
449 movl $(__KERNEL_PERCPU), %eax
450 movl %eax,%fs # set this cpu's percpu
451 movl (stack_start), %esp 470 movl (stack_start), %esp
4521: 4711:
453#endif /* CONFIG_SMP */ 472#endif /* CONFIG_SMP */
@@ -548,12 +567,8 @@ early_fault:
548 pushl %eax 567 pushl %eax
549 pushl %edx /* trapno */ 568 pushl %edx /* trapno */
550 pushl $fault_msg 569 pushl $fault_msg
551#ifdef CONFIG_EARLY_PRINTK
552 call early_printk
553#else
554 call printk 570 call printk
555#endif 571#endif
556#endif
557 call dump_stack 572 call dump_stack
558hlt_loop: 573hlt_loop:
559 hlt 574 hlt
@@ -580,11 +595,10 @@ ignore_int:
580 pushl 32(%esp) 595 pushl 32(%esp)
581 pushl 40(%esp) 596 pushl 40(%esp)
582 pushl $int_msg 597 pushl $int_msg
583#ifdef CONFIG_EARLY_PRINTK
584 call early_printk
585#else
586 call printk 598 call printk
587#endif 599
600 call dump_stack
601
588 addl $(5*4),%esp 602 addl $(5*4),%esp
589 popl %ds 603 popl %ds
590 popl %es 604 popl %es
@@ -660,7 +674,7 @@ early_recursion_flag:
660 .long 0 674 .long 0
661 675
662int_msg: 676int_msg:
663 .asciz "Unknown interrupt or fault at EIP %p %p %p\n" 677 .asciz "Unknown interrupt or fault at: %p %p %p\n"
664 678
665fault_msg: 679fault_msg:
666/* fault info: */ 680/* fault info: */
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 0e275d495563..2e648e3a5ea4 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -19,6 +19,7 @@
19#include <asm/msr.h> 19#include <asm/msr.h>
20#include <asm/cache.h> 20#include <asm/cache.h>
21#include <asm/processor-flags.h> 21#include <asm/processor-flags.h>
22#include <asm/percpu.h>
22 23
23#ifdef CONFIG_PARAVIRT 24#ifdef CONFIG_PARAVIRT
24#include <asm/asm-offsets.h> 25#include <asm/asm-offsets.h>
@@ -226,12 +227,15 @@ ENTRY(secondary_startup_64)
226 movl %eax,%fs 227 movl %eax,%fs
227 movl %eax,%gs 228 movl %eax,%gs
228 229
229 /* 230 /* Set up %gs.
230 * Setup up a dummy PDA. this is just for some early bootup code 231 *
231 * that does in_interrupt() 232 * The base of %gs always points to the bottom of the irqstack
232 */ 233 * union. If the stack protector canary is enabled, it is
234 * located at %gs:40. Note that, on SMP, the boot cpu uses
235 * init data section till per cpu areas are set up.
236 */
233 movl $MSR_GS_BASE,%ecx 237 movl $MSR_GS_BASE,%ecx
234 movq $empty_zero_page,%rax 238 movq initial_gs(%rip),%rax
235 movq %rax,%rdx 239 movq %rax,%rdx
236 shrq $32,%rdx 240 shrq $32,%rdx
237 wrmsr 241 wrmsr
@@ -257,6 +261,8 @@ ENTRY(secondary_startup_64)
257 .align 8 261 .align 8
258 ENTRY(initial_code) 262 ENTRY(initial_code)
259 .quad x86_64_start_kernel 263 .quad x86_64_start_kernel
264 ENTRY(initial_gs)
265 .quad INIT_PER_CPU_VAR(irq_stack_union)
260 __FINITDATA 266 __FINITDATA
261 267
262 ENTRY(stack_start) 268 ENTRY(stack_start)
@@ -401,7 +407,8 @@ NEXT_PAGE(level2_spare_pgt)
401 .globl early_gdt_descr 407 .globl early_gdt_descr
402early_gdt_descr: 408early_gdt_descr:
403 .word GDT_ENTRIES*8-1 409 .word GDT_ENTRIES*8-1
404 .quad per_cpu__gdt_page 410early_gdt_descr_base:
411 .quad INIT_PER_CPU_VAR(gdt_page)
405 412
406ENTRY(phys_base) 413ENTRY(phys_base)
407 /* This must match the first entry in level2_kernel_pgt */ 414 /* This must match the first entry in level2_kernel_pgt */
diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c
index bc7ac4da90d7..7248ca11bdcd 100644
--- a/arch/x86/kernel/io_apic.c
+++ b/arch/x86/kernel/io_apic.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Intel IO-APIC support for multi-Pentium hosts. 2 * Intel IO-APIC support for multi-Pentium hosts.
3 * 3 *
4 * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo 4 * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
5 * 5 *
6 * Many thanks to Stig Venaas for trying out countless experimental 6 * Many thanks to Stig Venaas for trying out countless experimental
7 * patches and reporting/debugging problems patiently! 7 * patches and reporting/debugging problems patiently!
@@ -46,6 +46,7 @@
46#include <asm/idle.h> 46#include <asm/idle.h>
47#include <asm/io.h> 47#include <asm/io.h>
48#include <asm/smp.h> 48#include <asm/smp.h>
49#include <asm/cpu.h>
49#include <asm/desc.h> 50#include <asm/desc.h>
50#include <asm/proto.h> 51#include <asm/proto.h>
51#include <asm/acpi.h> 52#include <asm/acpi.h>
@@ -61,9 +62,7 @@
61#include <asm/uv/uv_hub.h> 62#include <asm/uv/uv_hub.h>
62#include <asm/uv/uv_irq.h> 63#include <asm/uv/uv_irq.h>
63 64
64#include <mach_ipi.h> 65#include <asm/genapic.h>
65#include <mach_apic.h>
66#include <mach_apicdef.h>
67 66
68#define __apicdebuginit(type) static type __init 67#define __apicdebuginit(type) static type __init
69 68
@@ -82,11 +81,11 @@ static DEFINE_SPINLOCK(vector_lock);
82int nr_ioapic_registers[MAX_IO_APICS]; 81int nr_ioapic_registers[MAX_IO_APICS];
83 82
84/* I/O APIC entries */ 83/* I/O APIC entries */
85struct mp_config_ioapic mp_ioapics[MAX_IO_APICS]; 84struct mpc_ioapic mp_ioapics[MAX_IO_APICS];
86int nr_ioapics; 85int nr_ioapics;
87 86
88/* MP IRQ source entries */ 87/* MP IRQ source entries */
89struct mp_config_intsrc mp_irqs[MAX_IRQ_SOURCES]; 88struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES];
90 89
91/* # of MP IRQ source entries */ 90/* # of MP IRQ source entries */
92int mp_irq_entries; 91int mp_irq_entries;
@@ -99,10 +98,19 @@ DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
99 98
100int skip_ioapic_setup; 99int skip_ioapic_setup;
101 100
101void arch_disable_smp_support(void)
102{
103#ifdef CONFIG_PCI
104 noioapicquirk = 1;
105 noioapicreroute = -1;
106#endif
107 skip_ioapic_setup = 1;
108}
109
102static int __init parse_noapic(char *str) 110static int __init parse_noapic(char *str)
103{ 111{
104 /* disable IO-APIC */ 112 /* disable IO-APIC */
105 disable_ioapic_setup(); 113 arch_disable_smp_support();
106 return 0; 114 return 0;
107} 115}
108early_param("noapic", parse_noapic); 116early_param("noapic", parse_noapic);
@@ -356,7 +364,7 @@ set_extra_move_desc(struct irq_desc *desc, const struct cpumask *mask)
356 364
357 if (!cfg->move_in_progress) { 365 if (!cfg->move_in_progress) {
358 /* it means that domain is not changed */ 366 /* it means that domain is not changed */
359 if (!cpumask_intersects(&desc->affinity, mask)) 367 if (!cpumask_intersects(desc->affinity, mask))
360 cfg->move_desc_pending = 1; 368 cfg->move_desc_pending = 1;
361 } 369 }
362} 370}
@@ -386,7 +394,7 @@ struct io_apic {
386static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx) 394static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
387{ 395{
388 return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx) 396 return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx)
389 + (mp_ioapics[idx].mp_apicaddr & ~PAGE_MASK); 397 + (mp_ioapics[idx].apicaddr & ~PAGE_MASK);
390} 398}
391 399
392static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg) 400static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
@@ -478,7 +486,7 @@ __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
478 io_apic_write(apic, 0x10 + 2*pin, eu.w1); 486 io_apic_write(apic, 0x10 + 2*pin, eu.w1);
479} 487}
480 488
481static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) 489void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
482{ 490{
483 unsigned long flags; 491 unsigned long flags;
484 spin_lock_irqsave(&ioapic_lock, flags); 492 spin_lock_irqsave(&ioapic_lock, flags);
@@ -513,11 +521,11 @@ static void send_cleanup_vector(struct irq_cfg *cfg)
513 for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) 521 for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
514 cfg->move_cleanup_count++; 522 cfg->move_cleanup_count++;
515 for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) 523 for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
516 send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR); 524 apic->send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR);
517 } else { 525 } else {
518 cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask); 526 cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask);
519 cfg->move_cleanup_count = cpumask_weight(cleanup_mask); 527 cfg->move_cleanup_count = cpumask_weight(cleanup_mask);
520 send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); 528 apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
521 free_cpumask_var(cleanup_mask); 529 free_cpumask_var(cleanup_mask);
522 } 530 }
523 cfg->move_in_progress = 0; 531 cfg->move_in_progress = 0;
@@ -562,8 +570,9 @@ static int
562assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask); 570assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask);
563 571
564/* 572/*
565 * Either sets desc->affinity to a valid value, and returns cpu_mask_to_apicid 573 * Either sets desc->affinity to a valid value, and returns
566 * of that, or returns BAD_APICID and leaves desc->affinity untouched. 574 * ->cpu_mask_to_apicid of that, or returns BAD_APICID and
575 * leaves desc->affinity untouched.
567 */ 576 */
568static unsigned int 577static unsigned int
569set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask) 578set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask)
@@ -579,9 +588,10 @@ set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask)
579 if (assign_irq_vector(irq, cfg, mask)) 588 if (assign_irq_vector(irq, cfg, mask))
580 return BAD_APICID; 589 return BAD_APICID;
581 590
582 cpumask_and(&desc->affinity, cfg->domain, mask); 591 cpumask_and(desc->affinity, cfg->domain, mask);
583 set_extra_move_desc(desc, mask); 592 set_extra_move_desc(desc, mask);
584 return cpu_mask_to_apicid_and(&desc->affinity, cpu_online_mask); 593
594 return apic->cpu_mask_to_apicid_and(desc->affinity, cpu_online_mask);
585} 595}
586 596
587static void 597static void
@@ -796,23 +806,6 @@ static void clear_IO_APIC (void)
796 clear_IO_APIC_pin(apic, pin); 806 clear_IO_APIC_pin(apic, pin);
797} 807}
798 808
799#if !defined(CONFIG_SMP) && defined(CONFIG_X86_32)
800void send_IPI_self(int vector)
801{
802 unsigned int cfg;
803
804 /*
805 * Wait for idle.
806 */
807 apic_wait_icr_idle();
808 cfg = APIC_DM_FIXED | APIC_DEST_SELF | vector | APIC_DEST_LOGICAL;
809 /*
810 * Send the IPI. The write to APIC_ICR fires this off.
811 */
812 apic_write(APIC_ICR, cfg);
813}
814#endif /* !CONFIG_SMP && CONFIG_X86_32*/
815
816#ifdef CONFIG_X86_32 809#ifdef CONFIG_X86_32
817/* 810/*
818 * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to 811 * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
@@ -944,10 +937,10 @@ static int find_irq_entry(int apic, int pin, int type)
944 int i; 937 int i;
945 938
946 for (i = 0; i < mp_irq_entries; i++) 939 for (i = 0; i < mp_irq_entries; i++)
947 if (mp_irqs[i].mp_irqtype == type && 940 if (mp_irqs[i].irqtype == type &&
948 (mp_irqs[i].mp_dstapic == mp_ioapics[apic].mp_apicid || 941 (mp_irqs[i].dstapic == mp_ioapics[apic].apicid ||
949 mp_irqs[i].mp_dstapic == MP_APIC_ALL) && 942 mp_irqs[i].dstapic == MP_APIC_ALL) &&
950 mp_irqs[i].mp_dstirq == pin) 943 mp_irqs[i].dstirq == pin)
951 return i; 944 return i;
952 945
953 return -1; 946 return -1;
@@ -961,13 +954,13 @@ static int __init find_isa_irq_pin(int irq, int type)
961 int i; 954 int i;
962 955
963 for (i = 0; i < mp_irq_entries; i++) { 956 for (i = 0; i < mp_irq_entries; i++) {
964 int lbus = mp_irqs[i].mp_srcbus; 957 int lbus = mp_irqs[i].srcbus;
965 958
966 if (test_bit(lbus, mp_bus_not_pci) && 959 if (test_bit(lbus, mp_bus_not_pci) &&
967 (mp_irqs[i].mp_irqtype == type) && 960 (mp_irqs[i].irqtype == type) &&
968 (mp_irqs[i].mp_srcbusirq == irq)) 961 (mp_irqs[i].srcbusirq == irq))
969 962
970 return mp_irqs[i].mp_dstirq; 963 return mp_irqs[i].dstirq;
971 } 964 }
972 return -1; 965 return -1;
973} 966}
@@ -977,17 +970,17 @@ static int __init find_isa_irq_apic(int irq, int type)
977 int i; 970 int i;
978 971
979 for (i = 0; i < mp_irq_entries; i++) { 972 for (i = 0; i < mp_irq_entries; i++) {
980 int lbus = mp_irqs[i].mp_srcbus; 973 int lbus = mp_irqs[i].srcbus;
981 974
982 if (test_bit(lbus, mp_bus_not_pci) && 975 if (test_bit(lbus, mp_bus_not_pci) &&
983 (mp_irqs[i].mp_irqtype == type) && 976 (mp_irqs[i].irqtype == type) &&
984 (mp_irqs[i].mp_srcbusirq == irq)) 977 (mp_irqs[i].srcbusirq == irq))
985 break; 978 break;
986 } 979 }
987 if (i < mp_irq_entries) { 980 if (i < mp_irq_entries) {
988 int apic; 981 int apic;
989 for(apic = 0; apic < nr_ioapics; apic++) { 982 for(apic = 0; apic < nr_ioapics; apic++) {
990 if (mp_ioapics[apic].mp_apicid == mp_irqs[i].mp_dstapic) 983 if (mp_ioapics[apic].apicid == mp_irqs[i].dstapic)
991 return apic; 984 return apic;
992 } 985 }
993 } 986 }
@@ -1012,23 +1005,23 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
1012 return -1; 1005 return -1;
1013 } 1006 }
1014 for (i = 0; i < mp_irq_entries; i++) { 1007 for (i = 0; i < mp_irq_entries; i++) {
1015 int lbus = mp_irqs[i].mp_srcbus; 1008 int lbus = mp_irqs[i].srcbus;
1016 1009
1017 for (apic = 0; apic < nr_ioapics; apic++) 1010 for (apic = 0; apic < nr_ioapics; apic++)
1018 if (mp_ioapics[apic].mp_apicid == mp_irqs[i].mp_dstapic || 1011 if (mp_ioapics[apic].apicid == mp_irqs[i].dstapic ||
1019 mp_irqs[i].mp_dstapic == MP_APIC_ALL) 1012 mp_irqs[i].dstapic == MP_APIC_ALL)
1020 break; 1013 break;
1021 1014
1022 if (!test_bit(lbus, mp_bus_not_pci) && 1015 if (!test_bit(lbus, mp_bus_not_pci) &&
1023 !mp_irqs[i].mp_irqtype && 1016 !mp_irqs[i].irqtype &&
1024 (bus == lbus) && 1017 (bus == lbus) &&
1025 (slot == ((mp_irqs[i].mp_srcbusirq >> 2) & 0x1f))) { 1018 (slot == ((mp_irqs[i].srcbusirq >> 2) & 0x1f))) {
1026 int irq = pin_2_irq(i,apic,mp_irqs[i].mp_dstirq); 1019 int irq = pin_2_irq(i, apic, mp_irqs[i].dstirq);
1027 1020
1028 if (!(apic || IO_APIC_IRQ(irq))) 1021 if (!(apic || IO_APIC_IRQ(irq)))
1029 continue; 1022 continue;
1030 1023
1031 if (pin == (mp_irqs[i].mp_srcbusirq & 3)) 1024 if (pin == (mp_irqs[i].srcbusirq & 3))
1032 return irq; 1025 return irq;
1033 /* 1026 /*
1034 * Use the first all-but-pin matching entry as a 1027 * Use the first all-but-pin matching entry as a
@@ -1071,7 +1064,7 @@ static int EISA_ELCR(unsigned int irq)
1071 * EISA conforming in the MP table, that means its trigger type must 1064 * EISA conforming in the MP table, that means its trigger type must
1072 * be read in from the ELCR */ 1065 * be read in from the ELCR */
1073 1066
1074#define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].mp_srcbusirq)) 1067#define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].srcbusirq))
1075#define default_EISA_polarity(idx) default_ISA_polarity(idx) 1068#define default_EISA_polarity(idx) default_ISA_polarity(idx)
1076 1069
1077/* PCI interrupts are always polarity one level triggered, 1070/* PCI interrupts are always polarity one level triggered,
@@ -1088,13 +1081,13 @@ static int EISA_ELCR(unsigned int irq)
1088 1081
1089static int MPBIOS_polarity(int idx) 1082static int MPBIOS_polarity(int idx)
1090{ 1083{
1091 int bus = mp_irqs[idx].mp_srcbus; 1084 int bus = mp_irqs[idx].srcbus;
1092 int polarity; 1085 int polarity;
1093 1086
1094 /* 1087 /*
1095 * Determine IRQ line polarity (high active or low active): 1088 * Determine IRQ line polarity (high active or low active):
1096 */ 1089 */
1097 switch (mp_irqs[idx].mp_irqflag & 3) 1090 switch (mp_irqs[idx].irqflag & 3)
1098 { 1091 {
1099 case 0: /* conforms, ie. bus-type dependent polarity */ 1092 case 0: /* conforms, ie. bus-type dependent polarity */
1100 if (test_bit(bus, mp_bus_not_pci)) 1093 if (test_bit(bus, mp_bus_not_pci))
@@ -1130,13 +1123,13 @@ static int MPBIOS_polarity(int idx)
1130 1123
1131static int MPBIOS_trigger(int idx) 1124static int MPBIOS_trigger(int idx)
1132{ 1125{
1133 int bus = mp_irqs[idx].mp_srcbus; 1126 int bus = mp_irqs[idx].srcbus;
1134 int trigger; 1127 int trigger;
1135 1128
1136 /* 1129 /*
1137 * Determine IRQ trigger mode (edge or level sensitive): 1130 * Determine IRQ trigger mode (edge or level sensitive):
1138 */ 1131 */
1139 switch ((mp_irqs[idx].mp_irqflag>>2) & 3) 1132 switch ((mp_irqs[idx].irqflag>>2) & 3)
1140 { 1133 {
1141 case 0: /* conforms, ie. bus-type dependent */ 1134 case 0: /* conforms, ie. bus-type dependent */
1142 if (test_bit(bus, mp_bus_not_pci)) 1135 if (test_bit(bus, mp_bus_not_pci))
@@ -1214,16 +1207,16 @@ int (*ioapic_renumber_irq)(int ioapic, int irq);
1214static int pin_2_irq(int idx, int apic, int pin) 1207static int pin_2_irq(int idx, int apic, int pin)
1215{ 1208{
1216 int irq, i; 1209 int irq, i;
1217 int bus = mp_irqs[idx].mp_srcbus; 1210 int bus = mp_irqs[idx].srcbus;
1218 1211
1219 /* 1212 /*
1220 * Debugging check, we are in big trouble if this message pops up! 1213 * Debugging check, we are in big trouble if this message pops up!
1221 */ 1214 */
1222 if (mp_irqs[idx].mp_dstirq != pin) 1215 if (mp_irqs[idx].dstirq != pin)
1223 printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n"); 1216 printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
1224 1217
1225 if (test_bit(bus, mp_bus_not_pci)) { 1218 if (test_bit(bus, mp_bus_not_pci)) {
1226 irq = mp_irqs[idx].mp_srcbusirq; 1219 irq = mp_irqs[idx].srcbusirq;
1227 } else { 1220 } else {
1228 /* 1221 /*
1229 * PCI IRQs are mapped in order 1222 * PCI IRQs are mapped in order
@@ -1315,7 +1308,7 @@ __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1315 int new_cpu; 1308 int new_cpu;
1316 int vector, offset; 1309 int vector, offset;
1317 1310
1318 vector_allocation_domain(cpu, tmp_mask); 1311 apic->vector_allocation_domain(cpu, tmp_mask);
1319 1312
1320 vector = current_vector; 1313 vector = current_vector;
1321 offset = current_offset; 1314 offset = current_offset;
@@ -1485,10 +1478,10 @@ static void ioapic_register_intr(int irq, struct irq_desc *desc, unsigned long t
1485 handle_edge_irq, "edge"); 1478 handle_edge_irq, "edge");
1486} 1479}
1487 1480
1488static int setup_ioapic_entry(int apic, int irq, 1481int setup_ioapic_entry(int apic_id, int irq,
1489 struct IO_APIC_route_entry *entry, 1482 struct IO_APIC_route_entry *entry,
1490 unsigned int destination, int trigger, 1483 unsigned int destination, int trigger,
1491 int polarity, int vector) 1484 int polarity, int vector)
1492{ 1485{
1493 /* 1486 /*
1494 * add it to the IO-APIC irq-routing table: 1487 * add it to the IO-APIC irq-routing table:
@@ -1497,25 +1490,25 @@ static int setup_ioapic_entry(int apic, int irq,
1497 1490
1498#ifdef CONFIG_INTR_REMAP 1491#ifdef CONFIG_INTR_REMAP
1499 if (intr_remapping_enabled) { 1492 if (intr_remapping_enabled) {
1500 struct intel_iommu *iommu = map_ioapic_to_ir(apic); 1493 struct intel_iommu *iommu = map_ioapic_to_ir(apic_id);
1501 struct irte irte; 1494 struct irte irte;
1502 struct IR_IO_APIC_route_entry *ir_entry = 1495 struct IR_IO_APIC_route_entry *ir_entry =
1503 (struct IR_IO_APIC_route_entry *) entry; 1496 (struct IR_IO_APIC_route_entry *) entry;
1504 int index; 1497 int index;
1505 1498
1506 if (!iommu) 1499 if (!iommu)
1507 panic("No mapping iommu for ioapic %d\n", apic); 1500 panic("No mapping iommu for ioapic %d\n", apic_id);
1508 1501
1509 index = alloc_irte(iommu, irq, 1); 1502 index = alloc_irte(iommu, irq, 1);
1510 if (index < 0) 1503 if (index < 0)
1511 panic("Failed to allocate IRTE for ioapic %d\n", apic); 1504 panic("Failed to allocate IRTE for ioapic %d\n", apic_id);
1512 1505
1513 memset(&irte, 0, sizeof(irte)); 1506 memset(&irte, 0, sizeof(irte));
1514 1507
1515 irte.present = 1; 1508 irte.present = 1;
1516 irte.dst_mode = INT_DEST_MODE; 1509 irte.dst_mode = apic->irq_dest_mode;
1517 irte.trigger_mode = trigger; 1510 irte.trigger_mode = trigger;
1518 irte.dlvry_mode = INT_DELIVERY_MODE; 1511 irte.dlvry_mode = apic->irq_delivery_mode;
1519 irte.vector = vector; 1512 irte.vector = vector;
1520 irte.dest_id = IRTE_DEST(destination); 1513 irte.dest_id = IRTE_DEST(destination);
1521 1514
@@ -1528,8 +1521,8 @@ static int setup_ioapic_entry(int apic, int irq,
1528 } else 1521 } else
1529#endif 1522#endif
1530 { 1523 {
1531 entry->delivery_mode = INT_DELIVERY_MODE; 1524 entry->delivery_mode = apic->irq_delivery_mode;
1532 entry->dest_mode = INT_DEST_MODE; 1525 entry->dest_mode = apic->irq_dest_mode;
1533 entry->dest = destination; 1526 entry->dest = destination;
1534 } 1527 }
1535 1528
@@ -1546,7 +1539,7 @@ static int setup_ioapic_entry(int apic, int irq,
1546 return 0; 1539 return 0;
1547} 1540}
1548 1541
1549static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, struct irq_desc *desc, 1542static void setup_IO_APIC_irq(int apic_id, int pin, unsigned int irq, struct irq_desc *desc,
1550 int trigger, int polarity) 1543 int trigger, int polarity)
1551{ 1544{
1552 struct irq_cfg *cfg; 1545 struct irq_cfg *cfg;
@@ -1558,22 +1551,22 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, struct irq_de
1558 1551
1559 cfg = desc->chip_data; 1552 cfg = desc->chip_data;
1560 1553
1561 if (assign_irq_vector(irq, cfg, TARGET_CPUS)) 1554 if (assign_irq_vector(irq, cfg, apic->target_cpus()))
1562 return; 1555 return;
1563 1556
1564 dest = cpu_mask_to_apicid_and(cfg->domain, TARGET_CPUS); 1557 dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus());
1565 1558
1566 apic_printk(APIC_VERBOSE,KERN_DEBUG 1559 apic_printk(APIC_VERBOSE,KERN_DEBUG
1567 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> " 1560 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
1568 "IRQ %d Mode:%i Active:%i)\n", 1561 "IRQ %d Mode:%i Active:%i)\n",
1569 apic, mp_ioapics[apic].mp_apicid, pin, cfg->vector, 1562 apic_id, mp_ioapics[apic_id].apicid, pin, cfg->vector,
1570 irq, trigger, polarity); 1563 irq, trigger, polarity);
1571 1564
1572 1565
1573 if (setup_ioapic_entry(mp_ioapics[apic].mp_apicid, irq, &entry, 1566 if (setup_ioapic_entry(mp_ioapics[apic_id].apicid, irq, &entry,
1574 dest, trigger, polarity, cfg->vector)) { 1567 dest, trigger, polarity, cfg->vector)) {
1575 printk("Failed to setup ioapic entry for ioapic %d, pin %d\n", 1568 printk("Failed to setup ioapic entry for ioapic %d, pin %d\n",
1576 mp_ioapics[apic].mp_apicid, pin); 1569 mp_ioapics[apic_id].apicid, pin);
1577 __clear_irq_vector(irq, cfg); 1570 __clear_irq_vector(irq, cfg);
1578 return; 1571 return;
1579 } 1572 }
@@ -1582,12 +1575,12 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, struct irq_de
1582 if (irq < NR_IRQS_LEGACY) 1575 if (irq < NR_IRQS_LEGACY)
1583 disable_8259A_irq(irq); 1576 disable_8259A_irq(irq);
1584 1577
1585 ioapic_write_entry(apic, pin, entry); 1578 ioapic_write_entry(apic_id, pin, entry);
1586} 1579}
1587 1580
1588static void __init setup_IO_APIC_irqs(void) 1581static void __init setup_IO_APIC_irqs(void)
1589{ 1582{
1590 int apic, pin, idx, irq; 1583 int apic_id, pin, idx, irq;
1591 int notcon = 0; 1584 int notcon = 0;
1592 struct irq_desc *desc; 1585 struct irq_desc *desc;
1593 struct irq_cfg *cfg; 1586 struct irq_cfg *cfg;
@@ -1595,21 +1588,19 @@ static void __init setup_IO_APIC_irqs(void)
1595 1588
1596 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); 1589 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
1597 1590
1598 for (apic = 0; apic < nr_ioapics; apic++) { 1591 for (apic_id = 0; apic_id < nr_ioapics; apic_id++) {
1599 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) { 1592 for (pin = 0; pin < nr_ioapic_registers[apic_id]; pin++) {
1600 1593
1601 idx = find_irq_entry(apic, pin, mp_INT); 1594 idx = find_irq_entry(apic_id, pin, mp_INT);
1602 if (idx == -1) { 1595 if (idx == -1) {
1603 if (!notcon) { 1596 if (!notcon) {
1604 notcon = 1; 1597 notcon = 1;
1605 apic_printk(APIC_VERBOSE, 1598 apic_printk(APIC_VERBOSE,
1606 KERN_DEBUG " %d-%d", 1599 KERN_DEBUG " %d-%d",
1607 mp_ioapics[apic].mp_apicid, 1600 mp_ioapics[apic_id].apicid, pin);
1608 pin);
1609 } else 1601 } else
1610 apic_printk(APIC_VERBOSE, " %d-%d", 1602 apic_printk(APIC_VERBOSE, " %d-%d",
1611 mp_ioapics[apic].mp_apicid, 1603 mp_ioapics[apic_id].apicid, pin);
1612 pin);
1613 continue; 1604 continue;
1614 } 1605 }
1615 if (notcon) { 1606 if (notcon) {
@@ -1618,20 +1609,25 @@ static void __init setup_IO_APIC_irqs(void)
1618 notcon = 0; 1609 notcon = 0;
1619 } 1610 }
1620 1611
1621 irq = pin_2_irq(idx, apic, pin); 1612 irq = pin_2_irq(idx, apic_id, pin);
1622#ifdef CONFIG_X86_32 1613
1623 if (multi_timer_check(apic, irq)) 1614 /*
1615 * Skip the timer IRQ if there's a quirk handler
1616 * installed and if it returns 1:
1617 */
1618 if (apic->multi_timer_check &&
1619 apic->multi_timer_check(apic_id, irq))
1624 continue; 1620 continue;
1625#endif 1621
1626 desc = irq_to_desc_alloc_cpu(irq, cpu); 1622 desc = irq_to_desc_alloc_cpu(irq, cpu);
1627 if (!desc) { 1623 if (!desc) {
1628 printk(KERN_INFO "can not get irq_desc for %d\n", irq); 1624 printk(KERN_INFO "can not get irq_desc for %d\n", irq);
1629 continue; 1625 continue;
1630 } 1626 }
1631 cfg = desc->chip_data; 1627 cfg = desc->chip_data;
1632 add_pin_to_irq_cpu(cfg, cpu, apic, pin); 1628 add_pin_to_irq_cpu(cfg, cpu, apic_id, pin);
1633 1629
1634 setup_IO_APIC_irq(apic, pin, irq, desc, 1630 setup_IO_APIC_irq(apic_id, pin, irq, desc,
1635 irq_trigger(idx), irq_polarity(idx)); 1631 irq_trigger(idx), irq_polarity(idx));
1636 } 1632 }
1637 } 1633 }
@@ -1644,7 +1640,7 @@ static void __init setup_IO_APIC_irqs(void)
1644/* 1640/*
1645 * Set up the timer pin, possibly with the 8259A-master behind. 1641 * Set up the timer pin, possibly with the 8259A-master behind.
1646 */ 1642 */
1647static void __init setup_timer_IRQ0_pin(unsigned int apic, unsigned int pin, 1643static void __init setup_timer_IRQ0_pin(unsigned int apic_id, unsigned int pin,
1648 int vector) 1644 int vector)
1649{ 1645{
1650 struct IO_APIC_route_entry entry; 1646 struct IO_APIC_route_entry entry;
@@ -1660,10 +1656,10 @@ static void __init setup_timer_IRQ0_pin(unsigned int apic, unsigned int pin,
1660 * We use logical delivery to get the timer IRQ 1656 * We use logical delivery to get the timer IRQ
1661 * to the first CPU. 1657 * to the first CPU.
1662 */ 1658 */
1663 entry.dest_mode = INT_DEST_MODE; 1659 entry.dest_mode = apic->irq_dest_mode;
1664 entry.mask = 1; /* mask IRQ now */ 1660 entry.mask = 0; /* don't mask IRQ for edge */
1665 entry.dest = cpu_mask_to_apicid(TARGET_CPUS); 1661 entry.dest = apic->cpu_mask_to_apicid(apic->target_cpus());
1666 entry.delivery_mode = INT_DELIVERY_MODE; 1662 entry.delivery_mode = apic->irq_delivery_mode;
1667 entry.polarity = 0; 1663 entry.polarity = 0;
1668 entry.trigger = 0; 1664 entry.trigger = 0;
1669 entry.vector = vector; 1665 entry.vector = vector;
@@ -1677,7 +1673,7 @@ static void __init setup_timer_IRQ0_pin(unsigned int apic, unsigned int pin,
1677 /* 1673 /*
1678 * Add it to the IO-APIC irq-routing table: 1674 * Add it to the IO-APIC irq-routing table:
1679 */ 1675 */
1680 ioapic_write_entry(apic, pin, entry); 1676 ioapic_write_entry(apic_id, pin, entry);
1681} 1677}
1682 1678
1683 1679
@@ -1699,7 +1695,7 @@ __apicdebuginit(void) print_IO_APIC(void)
1699 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries); 1695 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
1700 for (i = 0; i < nr_ioapics; i++) 1696 for (i = 0; i < nr_ioapics; i++)
1701 printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n", 1697 printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
1702 mp_ioapics[i].mp_apicid, nr_ioapic_registers[i]); 1698 mp_ioapics[i].apicid, nr_ioapic_registers[i]);
1703 1699
1704 /* 1700 /*
1705 * We are a bit conservative about what we expect. We have to 1701 * We are a bit conservative about what we expect. We have to
@@ -1719,7 +1715,7 @@ __apicdebuginit(void) print_IO_APIC(void)
1719 spin_unlock_irqrestore(&ioapic_lock, flags); 1715 spin_unlock_irqrestore(&ioapic_lock, flags);
1720 1716
1721 printk("\n"); 1717 printk("\n");
1722 printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mp_apicid); 1718 printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].apicid);
1723 printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw); 1719 printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
1724 printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID); 1720 printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
1725 printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type); 1721 printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type);
@@ -2090,7 +2086,7 @@ static void __init setup_ioapic_ids_from_mpc(void)
2090{ 2086{
2091 union IO_APIC_reg_00 reg_00; 2087 union IO_APIC_reg_00 reg_00;
2092 physid_mask_t phys_id_present_map; 2088 physid_mask_t phys_id_present_map;
2093 int apic; 2089 int apic_id;
2094 int i; 2090 int i;
2095 unsigned char old_id; 2091 unsigned char old_id;
2096 unsigned long flags; 2092 unsigned long flags;
@@ -2109,26 +2105,26 @@ static void __init setup_ioapic_ids_from_mpc(void)
2109 * This is broken; anything with a real cpu count has to 2105 * This is broken; anything with a real cpu count has to
2110 * circumvent this idiocy regardless. 2106 * circumvent this idiocy regardless.
2111 */ 2107 */
2112 phys_id_present_map = ioapic_phys_id_map(phys_cpu_present_map); 2108 phys_id_present_map = apic->ioapic_phys_id_map(phys_cpu_present_map);
2113 2109
2114 /* 2110 /*
2115 * Set the IOAPIC ID to the value stored in the MPC table. 2111 * Set the IOAPIC ID to the value stored in the MPC table.
2116 */ 2112 */
2117 for (apic = 0; apic < nr_ioapics; apic++) { 2113 for (apic_id = 0; apic_id < nr_ioapics; apic_id++) {
2118 2114
2119 /* Read the register 0 value */ 2115 /* Read the register 0 value */
2120 spin_lock_irqsave(&ioapic_lock, flags); 2116 spin_lock_irqsave(&ioapic_lock, flags);
2121 reg_00.raw = io_apic_read(apic, 0); 2117 reg_00.raw = io_apic_read(apic_id, 0);
2122 spin_unlock_irqrestore(&ioapic_lock, flags); 2118 spin_unlock_irqrestore(&ioapic_lock, flags);
2123 2119
2124 old_id = mp_ioapics[apic].mp_apicid; 2120 old_id = mp_ioapics[apic_id].apicid;
2125 2121
2126 if (mp_ioapics[apic].mp_apicid >= get_physical_broadcast()) { 2122 if (mp_ioapics[apic_id].apicid >= get_physical_broadcast()) {
2127 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n", 2123 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
2128 apic, mp_ioapics[apic].mp_apicid); 2124 apic_id, mp_ioapics[apic_id].apicid);
2129 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n", 2125 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
2130 reg_00.bits.ID); 2126 reg_00.bits.ID);
2131 mp_ioapics[apic].mp_apicid = reg_00.bits.ID; 2127 mp_ioapics[apic_id].apicid = reg_00.bits.ID;
2132 } 2128 }
2133 2129
2134 /* 2130 /*
@@ -2136,10 +2132,10 @@ static void __init setup_ioapic_ids_from_mpc(void)
2136 * system must have a unique ID or we get lots of nice 2132 * system must have a unique ID or we get lots of nice
2137 * 'stuck on smp_invalidate_needed IPI wait' messages. 2133 * 'stuck on smp_invalidate_needed IPI wait' messages.
2138 */ 2134 */
2139 if (check_apicid_used(phys_id_present_map, 2135 if (apic->check_apicid_used(phys_id_present_map,
2140 mp_ioapics[apic].mp_apicid)) { 2136 mp_ioapics[apic_id].apicid)) {
2141 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n", 2137 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
2142 apic, mp_ioapics[apic].mp_apicid); 2138 apic_id, mp_ioapics[apic_id].apicid);
2143 for (i = 0; i < get_physical_broadcast(); i++) 2139 for (i = 0; i < get_physical_broadcast(); i++)
2144 if (!physid_isset(i, phys_id_present_map)) 2140 if (!physid_isset(i, phys_id_present_map))
2145 break; 2141 break;
@@ -2148,13 +2144,13 @@ static void __init setup_ioapic_ids_from_mpc(void)
2148 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n", 2144 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
2149 i); 2145 i);
2150 physid_set(i, phys_id_present_map); 2146 physid_set(i, phys_id_present_map);
2151 mp_ioapics[apic].mp_apicid = i; 2147 mp_ioapics[apic_id].apicid = i;
2152 } else { 2148 } else {
2153 physid_mask_t tmp; 2149 physid_mask_t tmp;
2154 tmp = apicid_to_cpu_present(mp_ioapics[apic].mp_apicid); 2150 tmp = apic->apicid_to_cpu_present(mp_ioapics[apic_id].apicid);
2155 apic_printk(APIC_VERBOSE, "Setting %d in the " 2151 apic_printk(APIC_VERBOSE, "Setting %d in the "
2156 "phys_id_present_map\n", 2152 "phys_id_present_map\n",
2157 mp_ioapics[apic].mp_apicid); 2153 mp_ioapics[apic_id].apicid);
2158 physids_or(phys_id_present_map, phys_id_present_map, tmp); 2154 physids_or(phys_id_present_map, phys_id_present_map, tmp);
2159 } 2155 }
2160 2156
@@ -2163,11 +2159,11 @@ static void __init setup_ioapic_ids_from_mpc(void)
2163 * We need to adjust the IRQ routing table 2159 * We need to adjust the IRQ routing table
2164 * if the ID changed. 2160 * if the ID changed.
2165 */ 2161 */
2166 if (old_id != mp_ioapics[apic].mp_apicid) 2162 if (old_id != mp_ioapics[apic_id].apicid)
2167 for (i = 0; i < mp_irq_entries; i++) 2163 for (i = 0; i < mp_irq_entries; i++)
2168 if (mp_irqs[i].mp_dstapic == old_id) 2164 if (mp_irqs[i].dstapic == old_id)
2169 mp_irqs[i].mp_dstapic 2165 mp_irqs[i].dstapic
2170 = mp_ioapics[apic].mp_apicid; 2166 = mp_ioapics[apic_id].apicid;
2171 2167
2172 /* 2168 /*
2173 * Read the right value from the MPC table and 2169 * Read the right value from the MPC table and
@@ -2175,20 +2171,20 @@ static void __init setup_ioapic_ids_from_mpc(void)
2175 */ 2171 */
2176 apic_printk(APIC_VERBOSE, KERN_INFO 2172 apic_printk(APIC_VERBOSE, KERN_INFO
2177 "...changing IO-APIC physical APIC ID to %d ...", 2173 "...changing IO-APIC physical APIC ID to %d ...",
2178 mp_ioapics[apic].mp_apicid); 2174 mp_ioapics[apic_id].apicid);
2179 2175
2180 reg_00.bits.ID = mp_ioapics[apic].mp_apicid; 2176 reg_00.bits.ID = mp_ioapics[apic_id].apicid;
2181 spin_lock_irqsave(&ioapic_lock, flags); 2177 spin_lock_irqsave(&ioapic_lock, flags);
2182 io_apic_write(apic, 0, reg_00.raw); 2178 io_apic_write(apic_id, 0, reg_00.raw);
2183 spin_unlock_irqrestore(&ioapic_lock, flags); 2179 spin_unlock_irqrestore(&ioapic_lock, flags);
2184 2180
2185 /* 2181 /*
2186 * Sanity check 2182 * Sanity check
2187 */ 2183 */
2188 spin_lock_irqsave(&ioapic_lock, flags); 2184 spin_lock_irqsave(&ioapic_lock, flags);
2189 reg_00.raw = io_apic_read(apic, 0); 2185 reg_00.raw = io_apic_read(apic_id, 0);
2190 spin_unlock_irqrestore(&ioapic_lock, flags); 2186 spin_unlock_irqrestore(&ioapic_lock, flags);
2191 if (reg_00.bits.ID != mp_ioapics[apic].mp_apicid) 2187 if (reg_00.bits.ID != mp_ioapics[apic_id].apicid)
2192 printk("could not set ID!\n"); 2188 printk("could not set ID!\n");
2193 else 2189 else
2194 apic_printk(APIC_VERBOSE, " ok.\n"); 2190 apic_printk(APIC_VERBOSE, " ok.\n");
@@ -2291,7 +2287,7 @@ static int ioapic_retrigger_irq(unsigned int irq)
2291 unsigned long flags; 2287 unsigned long flags;
2292 2288
2293 spin_lock_irqsave(&vector_lock, flags); 2289 spin_lock_irqsave(&vector_lock, flags);
2294 send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector); 2290 apic->send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector);
2295 spin_unlock_irqrestore(&vector_lock, flags); 2291 spin_unlock_irqrestore(&vector_lock, flags);
2296 2292
2297 return 1; 2293 return 1;
@@ -2299,7 +2295,7 @@ static int ioapic_retrigger_irq(unsigned int irq)
2299#else 2295#else
2300static int ioapic_retrigger_irq(unsigned int irq) 2296static int ioapic_retrigger_irq(unsigned int irq)
2301{ 2297{
2302 send_IPI_self(irq_cfg(irq)->vector); 2298 apic->send_IPI_self(irq_cfg(irq)->vector);
2303 2299
2304 return 1; 2300 return 1;
2305} 2301}
@@ -2363,7 +2359,7 @@ migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
2363 2359
2364 set_extra_move_desc(desc, mask); 2360 set_extra_move_desc(desc, mask);
2365 2361
2366 dest = cpu_mask_to_apicid_and(cfg->domain, mask); 2362 dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask);
2367 2363
2368 modify_ioapic_rte = desc->status & IRQ_LEVEL; 2364 modify_ioapic_rte = desc->status & IRQ_LEVEL;
2369 if (modify_ioapic_rte) { 2365 if (modify_ioapic_rte) {
@@ -2383,7 +2379,7 @@ migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
2383 if (cfg->move_in_progress) 2379 if (cfg->move_in_progress)
2384 send_cleanup_vector(cfg); 2380 send_cleanup_vector(cfg);
2385 2381
2386 cpumask_copy(&desc->affinity, mask); 2382 cpumask_copy(desc->affinity, mask);
2387} 2383}
2388 2384
2389static int migrate_irq_remapped_level_desc(struct irq_desc *desc) 2385static int migrate_irq_remapped_level_desc(struct irq_desc *desc)
@@ -2405,11 +2401,11 @@ static int migrate_irq_remapped_level_desc(struct irq_desc *desc)
2405 } 2401 }
2406 2402
2407 /* everthing is clear. we have right of way */ 2403 /* everthing is clear. we have right of way */
2408 migrate_ioapic_irq_desc(desc, &desc->pending_mask); 2404 migrate_ioapic_irq_desc(desc, desc->pending_mask);
2409 2405
2410 ret = 0; 2406 ret = 0;
2411 desc->status &= ~IRQ_MOVE_PENDING; 2407 desc->status &= ~IRQ_MOVE_PENDING;
2412 cpumask_clear(&desc->pending_mask); 2408 cpumask_clear(desc->pending_mask);
2413 2409
2414unmask: 2410unmask:
2415 unmask_IO_APIC_irq_desc(desc); 2411 unmask_IO_APIC_irq_desc(desc);
@@ -2434,7 +2430,7 @@ static void ir_irq_migration(struct work_struct *work)
2434 continue; 2430 continue;
2435 } 2431 }
2436 2432
2437 desc->chip->set_affinity(irq, &desc->pending_mask); 2433 desc->chip->set_affinity(irq, desc->pending_mask);
2438 spin_unlock_irqrestore(&desc->lock, flags); 2434 spin_unlock_irqrestore(&desc->lock, flags);
2439 } 2435 }
2440 } 2436 }
@@ -2448,7 +2444,7 @@ static void set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc,
2448{ 2444{
2449 if (desc->status & IRQ_LEVEL) { 2445 if (desc->status & IRQ_LEVEL) {
2450 desc->status |= IRQ_MOVE_PENDING; 2446 desc->status |= IRQ_MOVE_PENDING;
2451 cpumask_copy(&desc->pending_mask, mask); 2447 cpumask_copy(desc->pending_mask, mask);
2452 migrate_irq_remapped_level_desc(desc); 2448 migrate_irq_remapped_level_desc(desc);
2453 return; 2449 return;
2454 } 2450 }
@@ -2516,7 +2512,7 @@ static void irq_complete_move(struct irq_desc **descp)
2516 2512
2517 /* domain has not changed, but affinity did */ 2513 /* domain has not changed, but affinity did */
2518 me = smp_processor_id(); 2514 me = smp_processor_id();
2519 if (cpu_isset(me, desc->affinity)) { 2515 if (cpumask_test_cpu(me, desc->affinity)) {
2520 *descp = desc = move_irq_desc(desc, me); 2516 *descp = desc = move_irq_desc(desc, me);
2521 /* get the new one */ 2517 /* get the new one */
2522 cfg = desc->chip_data; 2518 cfg = desc->chip_data;
@@ -2867,19 +2863,15 @@ static inline void __init check_timer(void)
2867 int cpu = boot_cpu_id; 2863 int cpu = boot_cpu_id;
2868 int apic1, pin1, apic2, pin2; 2864 int apic1, pin1, apic2, pin2;
2869 unsigned long flags; 2865 unsigned long flags;
2870 unsigned int ver;
2871 int no_pin1 = 0; 2866 int no_pin1 = 0;
2872 2867
2873 local_irq_save(flags); 2868 local_irq_save(flags);
2874 2869
2875 ver = apic_read(APIC_LVR);
2876 ver = GET_APIC_VERSION(ver);
2877
2878 /* 2870 /*
2879 * get/set the timer IRQ vector: 2871 * get/set the timer IRQ vector:
2880 */ 2872 */
2881 disable_8259A_irq(0); 2873 disable_8259A_irq(0);
2882 assign_irq_vector(0, cfg, TARGET_CPUS); 2874 assign_irq_vector(0, cfg, apic->target_cpus());
2883 2875
2884 /* 2876 /*
2885 * As IRQ0 is to be enabled in the 8259A, the virtual 2877 * As IRQ0 is to be enabled in the 8259A, the virtual
@@ -2893,7 +2885,13 @@ static inline void __init check_timer(void)
2893 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT); 2885 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
2894 init_8259A(1); 2886 init_8259A(1);
2895#ifdef CONFIG_X86_32 2887#ifdef CONFIG_X86_32
2896 timer_ack = (nmi_watchdog == NMI_IO_APIC && !APIC_INTEGRATED(ver)); 2888 {
2889 unsigned int ver;
2890
2891 ver = apic_read(APIC_LVR);
2892 ver = GET_APIC_VERSION(ver);
2893 timer_ack = (nmi_watchdog == NMI_IO_APIC && !APIC_INTEGRATED(ver));
2894 }
2897#endif 2895#endif
2898 2896
2899 pin1 = find_isa_irq_pin(0, mp_INT); 2897 pin1 = find_isa_irq_pin(0, mp_INT);
@@ -2932,8 +2930,17 @@ static inline void __init check_timer(void)
2932 if (no_pin1) { 2930 if (no_pin1) {
2933 add_pin_to_irq_cpu(cfg, cpu, apic1, pin1); 2931 add_pin_to_irq_cpu(cfg, cpu, apic1, pin1);
2934 setup_timer_IRQ0_pin(apic1, pin1, cfg->vector); 2932 setup_timer_IRQ0_pin(apic1, pin1, cfg->vector);
2933 } else {
2934 /* for edge trigger, setup_IO_APIC_irq already
2935 * leave it unmasked.
2936 * so only need to unmask if it is level-trigger
2937 * do we really have level trigger timer?
2938 */
2939 int idx;
2940 idx = find_irq_entry(apic1, pin1, mp_INT);
2941 if (idx != -1 && irq_trigger(idx))
2942 unmask_IO_APIC_irq_desc(desc);
2935 } 2943 }
2936 unmask_IO_APIC_irq_desc(desc);
2937 if (timer_irq_works()) { 2944 if (timer_irq_works()) {
2938 if (nmi_watchdog == NMI_IO_APIC) { 2945 if (nmi_watchdog == NMI_IO_APIC) {
2939 setup_nmi(); 2946 setup_nmi();
@@ -2947,6 +2954,7 @@ static inline void __init check_timer(void)
2947 if (intr_remapping_enabled) 2954 if (intr_remapping_enabled)
2948 panic("timer doesn't work through Interrupt-remapped IO-APIC"); 2955 panic("timer doesn't work through Interrupt-remapped IO-APIC");
2949#endif 2956#endif
2957 local_irq_disable();
2950 clear_IO_APIC_pin(apic1, pin1); 2958 clear_IO_APIC_pin(apic1, pin1);
2951 if (!no_pin1) 2959 if (!no_pin1)
2952 apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: " 2960 apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: "
@@ -2961,7 +2969,6 @@ static inline void __init check_timer(void)
2961 */ 2969 */
2962 replace_pin_at_irq_cpu(cfg, cpu, apic1, pin1, apic2, pin2); 2970 replace_pin_at_irq_cpu(cfg, cpu, apic1, pin1, apic2, pin2);
2963 setup_timer_IRQ0_pin(apic2, pin2, cfg->vector); 2971 setup_timer_IRQ0_pin(apic2, pin2, cfg->vector);
2964 unmask_IO_APIC_irq_desc(desc);
2965 enable_8259A_irq(0); 2972 enable_8259A_irq(0);
2966 if (timer_irq_works()) { 2973 if (timer_irq_works()) {
2967 apic_printk(APIC_QUIET, KERN_INFO "....... works.\n"); 2974 apic_printk(APIC_QUIET, KERN_INFO "....... works.\n");
@@ -2976,6 +2983,7 @@ static inline void __init check_timer(void)
2976 /* 2983 /*
2977 * Cleanup, just in case ... 2984 * Cleanup, just in case ...
2978 */ 2985 */
2986 local_irq_disable();
2979 disable_8259A_irq(0); 2987 disable_8259A_irq(0);
2980 clear_IO_APIC_pin(apic2, pin2); 2988 clear_IO_APIC_pin(apic2, pin2);
2981 apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n"); 2989 apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n");
@@ -3001,6 +3009,7 @@ static inline void __init check_timer(void)
3001 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n"); 3009 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
3002 goto out; 3010 goto out;
3003 } 3011 }
3012 local_irq_disable();
3004 disable_8259A_irq(0); 3013 disable_8259A_irq(0);
3005 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector); 3014 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector);
3006 apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n"); 3015 apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n");
@@ -3018,6 +3027,7 @@ static inline void __init check_timer(void)
3018 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n"); 3027 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
3019 goto out; 3028 goto out;
3020 } 3029 }
3030 local_irq_disable();
3021 apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n"); 3031 apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n");
3022 panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a " 3032 panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a "
3023 "report. Then try booting with the 'noapic' option.\n"); 3033 "report. Then try booting with the 'noapic' option.\n");
@@ -3118,8 +3128,8 @@ static int ioapic_resume(struct sys_device *dev)
3118 3128
3119 spin_lock_irqsave(&ioapic_lock, flags); 3129 spin_lock_irqsave(&ioapic_lock, flags);
3120 reg_00.raw = io_apic_read(dev->id, 0); 3130 reg_00.raw = io_apic_read(dev->id, 0);
3121 if (reg_00.bits.ID != mp_ioapics[dev->id].mp_apicid) { 3131 if (reg_00.bits.ID != mp_ioapics[dev->id].apicid) {
3122 reg_00.bits.ID = mp_ioapics[dev->id].mp_apicid; 3132 reg_00.bits.ID = mp_ioapics[dev->id].apicid;
3123 io_apic_write(dev->id, 0, reg_00.raw); 3133 io_apic_write(dev->id, 0, reg_00.raw);
3124 } 3134 }
3125 spin_unlock_irqrestore(&ioapic_lock, flags); 3135 spin_unlock_irqrestore(&ioapic_lock, flags);
@@ -3169,6 +3179,7 @@ static int __init ioapic_init_sysfs(void)
3169 3179
3170device_initcall(ioapic_init_sysfs); 3180device_initcall(ioapic_init_sysfs);
3171 3181
3182static int nr_irqs_gsi = NR_IRQS_LEGACY;
3172/* 3183/*
3173 * Dynamic irq allocate and deallocation 3184 * Dynamic irq allocate and deallocation
3174 */ 3185 */
@@ -3183,11 +3194,11 @@ unsigned int create_irq_nr(unsigned int irq_want)
3183 struct irq_desc *desc_new = NULL; 3194 struct irq_desc *desc_new = NULL;
3184 3195
3185 irq = 0; 3196 irq = 0;
3186 spin_lock_irqsave(&vector_lock, flags); 3197 if (irq_want < nr_irqs_gsi)
3187 for (new = irq_want; new < NR_IRQS; new++) { 3198 irq_want = nr_irqs_gsi;
3188 if (platform_legacy_irq(new))
3189 continue;
3190 3199
3200 spin_lock_irqsave(&vector_lock, flags);
3201 for (new = irq_want; new < nr_irqs; new++) {
3191 desc_new = irq_to_desc_alloc_cpu(new, cpu); 3202 desc_new = irq_to_desc_alloc_cpu(new, cpu);
3192 if (!desc_new) { 3203 if (!desc_new) {
3193 printk(KERN_INFO "can not get irq_desc for %d\n", new); 3204 printk(KERN_INFO "can not get irq_desc for %d\n", new);
@@ -3197,7 +3208,7 @@ unsigned int create_irq_nr(unsigned int irq_want)
3197 3208
3198 if (cfg_new->vector != 0) 3209 if (cfg_new->vector != 0)
3199 continue; 3210 continue;
3200 if (__assign_irq_vector(new, cfg_new, TARGET_CPUS) == 0) 3211 if (__assign_irq_vector(new, cfg_new, apic->target_cpus()) == 0)
3201 irq = new; 3212 irq = new;
3202 break; 3213 break;
3203 } 3214 }
@@ -3212,7 +3223,6 @@ unsigned int create_irq_nr(unsigned int irq_want)
3212 return irq; 3223 return irq;
3213} 3224}
3214 3225
3215static int nr_irqs_gsi = NR_IRQS_LEGACY;
3216int create_irq(void) 3226int create_irq(void)
3217{ 3227{
3218 unsigned int irq_want; 3228 unsigned int irq_want;
@@ -3259,12 +3269,15 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms
3259 int err; 3269 int err;
3260 unsigned dest; 3270 unsigned dest;
3261 3271
3272 if (disable_apic)
3273 return -ENXIO;
3274
3262 cfg = irq_cfg(irq); 3275 cfg = irq_cfg(irq);
3263 err = assign_irq_vector(irq, cfg, TARGET_CPUS); 3276 err = assign_irq_vector(irq, cfg, apic->target_cpus());
3264 if (err) 3277 if (err)
3265 return err; 3278 return err;
3266 3279
3267 dest = cpu_mask_to_apicid_and(cfg->domain, TARGET_CPUS); 3280 dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus());
3268 3281
3269#ifdef CONFIG_INTR_REMAP 3282#ifdef CONFIG_INTR_REMAP
3270 if (irq_remapped(irq)) { 3283 if (irq_remapped(irq)) {
@@ -3278,9 +3291,9 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms
3278 memset (&irte, 0, sizeof(irte)); 3291 memset (&irte, 0, sizeof(irte));
3279 3292
3280 irte.present = 1; 3293 irte.present = 1;
3281 irte.dst_mode = INT_DEST_MODE; 3294 irte.dst_mode = apic->irq_dest_mode;
3282 irte.trigger_mode = 0; /* edge */ 3295 irte.trigger_mode = 0; /* edge */
3283 irte.dlvry_mode = INT_DELIVERY_MODE; 3296 irte.dlvry_mode = apic->irq_delivery_mode;
3284 irte.vector = cfg->vector; 3297 irte.vector = cfg->vector;
3285 irte.dest_id = IRTE_DEST(dest); 3298 irte.dest_id = IRTE_DEST(dest);
3286 3299
@@ -3298,10 +3311,10 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms
3298 msg->address_hi = MSI_ADDR_BASE_HI; 3311 msg->address_hi = MSI_ADDR_BASE_HI;
3299 msg->address_lo = 3312 msg->address_lo =
3300 MSI_ADDR_BASE_LO | 3313 MSI_ADDR_BASE_LO |
3301 ((INT_DEST_MODE == 0) ? 3314 ((apic->irq_dest_mode == 0) ?
3302 MSI_ADDR_DEST_MODE_PHYSICAL: 3315 MSI_ADDR_DEST_MODE_PHYSICAL:
3303 MSI_ADDR_DEST_MODE_LOGICAL) | 3316 MSI_ADDR_DEST_MODE_LOGICAL) |
3304 ((INT_DELIVERY_MODE != dest_LowestPrio) ? 3317 ((apic->irq_delivery_mode != dest_LowestPrio) ?
3305 MSI_ADDR_REDIRECTION_CPU: 3318 MSI_ADDR_REDIRECTION_CPU:
3306 MSI_ADDR_REDIRECTION_LOWPRI) | 3319 MSI_ADDR_REDIRECTION_LOWPRI) |
3307 MSI_ADDR_DEST_ID(dest); 3320 MSI_ADDR_DEST_ID(dest);
@@ -3309,7 +3322,7 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms
3309 msg->data = 3322 msg->data =
3310 MSI_DATA_TRIGGER_EDGE | 3323 MSI_DATA_TRIGGER_EDGE |
3311 MSI_DATA_LEVEL_ASSERT | 3324 MSI_DATA_LEVEL_ASSERT |
3312 ((INT_DELIVERY_MODE != dest_LowestPrio) ? 3325 ((apic->irq_delivery_mode != dest_LowestPrio) ?
3313 MSI_DATA_DELIVERY_FIXED: 3326 MSI_DATA_DELIVERY_FIXED:
3314 MSI_DATA_DELIVERY_LOWPRI) | 3327 MSI_DATA_DELIVERY_LOWPRI) |
3315 MSI_DATA_VECTOR(cfg->vector); 3328 MSI_DATA_VECTOR(cfg->vector);
@@ -3464,40 +3477,6 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
3464 return 0; 3477 return 0;
3465} 3478}
3466 3479
3467int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc)
3468{
3469 unsigned int irq;
3470 int ret;
3471 unsigned int irq_want;
3472
3473 irq_want = nr_irqs_gsi;
3474 irq = create_irq_nr(irq_want);
3475 if (irq == 0)
3476 return -1;
3477
3478#ifdef CONFIG_INTR_REMAP
3479 if (!intr_remapping_enabled)
3480 goto no_ir;
3481
3482 ret = msi_alloc_irte(dev, irq, 1);
3483 if (ret < 0)
3484 goto error;
3485no_ir:
3486#endif
3487 ret = setup_msi_irq(dev, msidesc, irq);
3488 if (ret < 0) {
3489 destroy_irq(irq);
3490 return ret;
3491 }
3492 return 0;
3493
3494#ifdef CONFIG_INTR_REMAP
3495error:
3496 destroy_irq(irq);
3497 return ret;
3498#endif
3499}
3500
3501int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) 3480int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
3502{ 3481{
3503 unsigned int irq; 3482 unsigned int irq;
@@ -3514,9 +3493,9 @@ int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
3514 sub_handle = 0; 3493 sub_handle = 0;
3515 list_for_each_entry(msidesc, &dev->msi_list, list) { 3494 list_for_each_entry(msidesc, &dev->msi_list, list) {
3516 irq = create_irq_nr(irq_want); 3495 irq = create_irq_nr(irq_want);
3517 irq_want++;
3518 if (irq == 0) 3496 if (irq == 0)
3519 return -1; 3497 return -1;
3498 irq_want = irq + 1;
3520#ifdef CONFIG_INTR_REMAP 3499#ifdef CONFIG_INTR_REMAP
3521 if (!intr_remapping_enabled) 3500 if (!intr_remapping_enabled)
3522 goto no_ir; 3501 goto no_ir;
@@ -3727,13 +3706,17 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
3727 struct irq_cfg *cfg; 3706 struct irq_cfg *cfg;
3728 int err; 3707 int err;
3729 3708
3709 if (disable_apic)
3710 return -ENXIO;
3711
3730 cfg = irq_cfg(irq); 3712 cfg = irq_cfg(irq);
3731 err = assign_irq_vector(irq, cfg, TARGET_CPUS); 3713 err = assign_irq_vector(irq, cfg, apic->target_cpus());
3732 if (!err) { 3714 if (!err) {
3733 struct ht_irq_msg msg; 3715 struct ht_irq_msg msg;
3734 unsigned dest; 3716 unsigned dest;
3735 3717
3736 dest = cpu_mask_to_apicid_and(cfg->domain, TARGET_CPUS); 3718 dest = apic->cpu_mask_to_apicid_and(cfg->domain,
3719 apic->target_cpus());
3737 3720
3738 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest); 3721 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
3739 3722
@@ -3741,11 +3724,11 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
3741 HT_IRQ_LOW_BASE | 3724 HT_IRQ_LOW_BASE |
3742 HT_IRQ_LOW_DEST_ID(dest) | 3725 HT_IRQ_LOW_DEST_ID(dest) |
3743 HT_IRQ_LOW_VECTOR(cfg->vector) | 3726 HT_IRQ_LOW_VECTOR(cfg->vector) |
3744 ((INT_DEST_MODE == 0) ? 3727 ((apic->irq_dest_mode == 0) ?
3745 HT_IRQ_LOW_DM_PHYSICAL : 3728 HT_IRQ_LOW_DM_PHYSICAL :
3746 HT_IRQ_LOW_DM_LOGICAL) | 3729 HT_IRQ_LOW_DM_LOGICAL) |
3747 HT_IRQ_LOW_RQEOI_EDGE | 3730 HT_IRQ_LOW_RQEOI_EDGE |
3748 ((INT_DELIVERY_MODE != dest_LowestPrio) ? 3731 ((apic->irq_delivery_mode != dest_LowestPrio) ?
3749 HT_IRQ_LOW_MT_FIXED : 3732 HT_IRQ_LOW_MT_FIXED :
3750 HT_IRQ_LOW_MT_ARBITRATED) | 3733 HT_IRQ_LOW_MT_ARBITRATED) |
3751 HT_IRQ_LOW_IRQ_MASKED; 3734 HT_IRQ_LOW_IRQ_MASKED;
@@ -3761,7 +3744,7 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
3761} 3744}
3762#endif /* CONFIG_HT_IRQ */ 3745#endif /* CONFIG_HT_IRQ */
3763 3746
3764#ifdef CONFIG_X86_64 3747#ifdef CONFIG_X86_UV
3765/* 3748/*
3766 * Re-target the irq to the specified CPU and enable the specified MMR located 3749 * Re-target the irq to the specified CPU and enable the specified MMR located
3767 * on the specified blade to allow the sending of MSIs to the specified CPU. 3750 * on the specified blade to allow the sending of MSIs to the specified CPU.
@@ -3793,12 +3776,12 @@ int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
3793 BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long)); 3776 BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long));
3794 3777
3795 entry->vector = cfg->vector; 3778 entry->vector = cfg->vector;
3796 entry->delivery_mode = INT_DELIVERY_MODE; 3779 entry->delivery_mode = apic->irq_delivery_mode;
3797 entry->dest_mode = INT_DEST_MODE; 3780 entry->dest_mode = apic->irq_dest_mode;
3798 entry->polarity = 0; 3781 entry->polarity = 0;
3799 entry->trigger = 0; 3782 entry->trigger = 0;
3800 entry->mask = 0; 3783 entry->mask = 0;
3801 entry->dest = cpu_mask_to_apicid(eligible_cpu); 3784 entry->dest = apic->cpu_mask_to_apicid(eligible_cpu);
3802 3785
3803 mmr_pnode = uv_blade_to_pnode(mmr_blade); 3786 mmr_pnode = uv_blade_to_pnode(mmr_blade);
3804 uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); 3787 uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
@@ -3861,6 +3844,28 @@ void __init probe_nr_irqs_gsi(void)
3861 printk(KERN_DEBUG "nr_irqs_gsi: %d\n", nr_irqs_gsi); 3844 printk(KERN_DEBUG "nr_irqs_gsi: %d\n", nr_irqs_gsi);
3862} 3845}
3863 3846
3847#ifdef CONFIG_SPARSE_IRQ
3848int __init arch_probe_nr_irqs(void)
3849{
3850 int nr;
3851
3852 if (nr_irqs > (NR_VECTORS * nr_cpu_ids))
3853 nr_irqs = NR_VECTORS * nr_cpu_ids;
3854
3855 nr = nr_irqs_gsi + 8 * nr_cpu_ids;
3856#if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ)
3857 /*
3858 * for MSI and HT dyn irq
3859 */
3860 nr += nr_irqs_gsi * 16;
3861#endif
3862 if (nr < nr_irqs)
3863 nr_irqs = nr;
3864
3865 return 0;
3866}
3867#endif
3868
3864/* -------------------------------------------------------------------------- 3869/* --------------------------------------------------------------------------
3865 ACPI-based IOAPIC Configuration 3870 ACPI-based IOAPIC Configuration
3866 -------------------------------------------------------------------------- */ 3871 -------------------------------------------------------------------------- */
@@ -3886,7 +3891,7 @@ int __init io_apic_get_unique_id(int ioapic, int apic_id)
3886 */ 3891 */
3887 3892
3888 if (physids_empty(apic_id_map)) 3893 if (physids_empty(apic_id_map))
3889 apic_id_map = ioapic_phys_id_map(phys_cpu_present_map); 3894 apic_id_map = apic->ioapic_phys_id_map(phys_cpu_present_map);
3890 3895
3891 spin_lock_irqsave(&ioapic_lock, flags); 3896 spin_lock_irqsave(&ioapic_lock, flags);
3892 reg_00.raw = io_apic_read(ioapic, 0); 3897 reg_00.raw = io_apic_read(ioapic, 0);
@@ -3902,10 +3907,10 @@ int __init io_apic_get_unique_id(int ioapic, int apic_id)
3902 * Every APIC in a system must have a unique ID or we get lots of nice 3907 * Every APIC in a system must have a unique ID or we get lots of nice
3903 * 'stuck on smp_invalidate_needed IPI wait' messages. 3908 * 'stuck on smp_invalidate_needed IPI wait' messages.
3904 */ 3909 */
3905 if (check_apicid_used(apic_id_map, apic_id)) { 3910 if (apic->check_apicid_used(apic_id_map, apic_id)) {
3906 3911
3907 for (i = 0; i < get_physical_broadcast(); i++) { 3912 for (i = 0; i < get_physical_broadcast(); i++) {
3908 if (!check_apicid_used(apic_id_map, i)) 3913 if (!apic->check_apicid_used(apic_id_map, i))
3909 break; 3914 break;
3910 } 3915 }
3911 3916
@@ -3918,7 +3923,7 @@ int __init io_apic_get_unique_id(int ioapic, int apic_id)
3918 apic_id = i; 3923 apic_id = i;
3919 } 3924 }
3920 3925
3921 tmp = apicid_to_cpu_present(apic_id); 3926 tmp = apic->apicid_to_cpu_present(apic_id);
3922 physids_or(apic_id_map, apic_id_map, tmp); 3927 physids_or(apic_id_map, apic_id_map, tmp);
3923 3928
3924 if (reg_00.bits.ID != apic_id) { 3929 if (reg_00.bits.ID != apic_id) {
@@ -3995,8 +4000,8 @@ int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity)
3995 return -1; 4000 return -1;
3996 4001
3997 for (i = 0; i < mp_irq_entries; i++) 4002 for (i = 0; i < mp_irq_entries; i++)
3998 if (mp_irqs[i].mp_irqtype == mp_INT && 4003 if (mp_irqs[i].irqtype == mp_INT &&
3999 mp_irqs[i].mp_srcbusirq == bus_irq) 4004 mp_irqs[i].srcbusirq == bus_irq)
4000 break; 4005 break;
4001 if (i >= mp_irq_entries) 4006 if (i >= mp_irq_entries)
4002 return -1; 4007 return -1;
@@ -4011,7 +4016,7 @@ int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity)
4011/* 4016/*
4012 * This function currently is only a helper for the i386 smp boot process where 4017 * This function currently is only a helper for the i386 smp boot process where
4013 * we need to reprogram the ioredtbls to cater for the cpus which have come online 4018 * we need to reprogram the ioredtbls to cater for the cpus which have come online
4014 * so mask in all cases should simply be TARGET_CPUS 4019 * so mask in all cases should simply be apic->target_cpus()
4015 */ 4020 */
4016#ifdef CONFIG_SMP 4021#ifdef CONFIG_SMP
4017void __init setup_ioapic_dest(void) 4022void __init setup_ioapic_dest(void)
@@ -4050,9 +4055,9 @@ void __init setup_ioapic_dest(void)
4050 */ 4055 */
4051 if (desc->status & 4056 if (desc->status &
4052 (IRQ_NO_BALANCING | IRQ_AFFINITY_SET)) 4057 (IRQ_NO_BALANCING | IRQ_AFFINITY_SET))
4053 mask = &desc->affinity; 4058 mask = desc->affinity;
4054 else 4059 else
4055 mask = TARGET_CPUS; 4060 mask = apic->target_cpus();
4056 4061
4057#ifdef CONFIG_INTR_REMAP 4062#ifdef CONFIG_INTR_REMAP
4058 if (intr_remapping_enabled) 4063 if (intr_remapping_enabled)
@@ -4111,7 +4116,7 @@ void __init ioapic_init_mappings(void)
4111 ioapic_res = ioapic_setup_resources(); 4116 ioapic_res = ioapic_setup_resources();
4112 for (i = 0; i < nr_ioapics; i++) { 4117 for (i = 0; i < nr_ioapics; i++) {
4113 if (smp_found_config) { 4118 if (smp_found_config) {
4114 ioapic_phys = mp_ioapics[i].mp_apicaddr; 4119 ioapic_phys = mp_ioapics[i].apicaddr;
4115#ifdef CONFIG_X86_32 4120#ifdef CONFIG_X86_32
4116 if (!ioapic_phys) { 4121 if (!ioapic_phys) {
4117 printk(KERN_ERR 4122 printk(KERN_ERR
diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
index b12208f4dfee..e41980a373ab 100644
--- a/arch/x86/kernel/ioport.c
+++ b/arch/x86/kernel/ioport.c
@@ -131,9 +131,8 @@ static int do_iopl(unsigned int level, struct pt_regs *regs)
131} 131}
132 132
133#ifdef CONFIG_X86_32 133#ifdef CONFIG_X86_32
134asmlinkage long sys_iopl(unsigned long regsp) 134long sys_iopl(struct pt_regs *regs)
135{ 135{
136 struct pt_regs *regs = (struct pt_regs *)&regsp;
137 unsigned int level = regs->bx; 136 unsigned int level = regs->bx;
138 struct thread_struct *t = &current->thread; 137 struct thread_struct *t = &current->thread;
139 int rc; 138 int rc;
diff --git a/arch/x86/kernel/ipi.c b/arch/x86/kernel/ipi.c
index 285bbf8831fa..dbf5445727a9 100644
--- a/arch/x86/kernel/ipi.c
+++ b/arch/x86/kernel/ipi.c
@@ -17,147 +17,121 @@
17#include <asm/mmu_context.h> 17#include <asm/mmu_context.h>
18#include <asm/apic.h> 18#include <asm/apic.h>
19#include <asm/proto.h> 19#include <asm/proto.h>
20#include <asm/ipi.h>
20 21
21#ifdef CONFIG_X86_32 22void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector)
22#include <mach_apic.h>
23#include <mach_ipi.h>
24
25/*
26 * the following functions deal with sending IPIs between CPUs.
27 *
28 * We use 'broadcast', CPU->CPU IPIs and self-IPIs too.
29 */
30
31static inline int __prepare_ICR(unsigned int shortcut, int vector)
32{ 23{
33 unsigned int icr = shortcut | APIC_DEST_LOGICAL; 24 unsigned long query_cpu;
34 25 unsigned long flags;
35 switch (vector) { 26
36 default: 27 /*
37 icr |= APIC_DM_FIXED | vector; 28 * Hack. The clustered APIC addressing mode doesn't allow us to send
38 break; 29 * to an arbitrary mask, so I do a unicast to each CPU instead.
39 case NMI_VECTOR: 30 * - mbligh
40 icr |= APIC_DM_NMI; 31 */
41 break; 32 local_irq_save(flags);
33 for_each_cpu(query_cpu, mask) {
34 __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
35 query_cpu), vector, APIC_DEST_PHYSICAL);
42 } 36 }
43 return icr; 37 local_irq_restore(flags);
44} 38}
45 39
46static inline int __prepare_ICR2(unsigned int mask) 40void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
41 int vector)
47{ 42{
48 return SET_APIC_DEST_FIELD(mask); 43 unsigned int this_cpu = smp_processor_id();
49} 44 unsigned int query_cpu;
45 unsigned long flags;
50 46
51void __send_IPI_shortcut(unsigned int shortcut, int vector) 47 /* See Hack comment above */
52{
53 /*
54 * Subtle. In the case of the 'never do double writes' workaround
55 * we have to lock out interrupts to be safe. As we don't care
56 * of the value read we use an atomic rmw access to avoid costly
57 * cli/sti. Otherwise we use an even cheaper single atomic write
58 * to the APIC.
59 */
60 unsigned int cfg;
61 48
62 /* 49 local_irq_save(flags);
63 * Wait for idle. 50 for_each_cpu(query_cpu, mask) {
64 */ 51 if (query_cpu == this_cpu)
65 apic_wait_icr_idle(); 52 continue;
53 __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
54 query_cpu), vector, APIC_DEST_PHYSICAL);
55 }
56 local_irq_restore(flags);
57}
66 58
67 /* 59void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
68 * No need to touch the target chip field 60 int vector)
69 */ 61{
70 cfg = __prepare_ICR(shortcut, vector); 62 unsigned long flags;
63 unsigned int query_cpu;
71 64
72 /* 65 /*
73 * Send the IPI. The write to APIC_ICR fires this off. 66 * Hack. The clustered APIC addressing mode doesn't allow us to send
67 * to an arbitrary mask, so I do a unicasts to each CPU instead. This
68 * should be modified to do 1 message per cluster ID - mbligh
74 */ 69 */
75 apic_write(APIC_ICR, cfg);
76}
77 70
78void send_IPI_self(int vector) 71 local_irq_save(flags);
79{ 72 for_each_cpu(query_cpu, mask)
80 __send_IPI_shortcut(APIC_DEST_SELF, vector); 73 __default_send_IPI_dest_field(
74 apic->cpu_to_logical_apicid(query_cpu), vector,
75 apic->dest_logical);
76 local_irq_restore(flags);
81} 77}
82 78
83/* 79void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
84 * This is used to send an IPI with no shorthand notation (the destination is 80 int vector)
85 * specified in bits 56 to 63 of the ICR).
86 */
87static inline void __send_IPI_dest_field(unsigned long mask, int vector)
88{ 81{
89 unsigned long cfg; 82 unsigned long flags;
90 83 unsigned int query_cpu;
91 /* 84 unsigned int this_cpu = smp_processor_id();
92 * Wait for idle.
93 */
94 if (unlikely(vector == NMI_VECTOR))
95 safe_apic_wait_icr_idle();
96 else
97 apic_wait_icr_idle();
98
99 /*
100 * prepare target chip field
101 */
102 cfg = __prepare_ICR2(mask);
103 apic_write(APIC_ICR2, cfg);
104 85
105 /* 86 /* See Hack comment above */
106 * program the ICR
107 */
108 cfg = __prepare_ICR(0, vector);
109 87
110 /* 88 local_irq_save(flags);
111 * Send the IPI. The write to APIC_ICR fires this off. 89 for_each_cpu(query_cpu, mask) {
112 */ 90 if (query_cpu == this_cpu)
113 apic_write(APIC_ICR, cfg); 91 continue;
92 __default_send_IPI_dest_field(
93 apic->cpu_to_logical_apicid(query_cpu), vector,
94 apic->dest_logical);
95 }
96 local_irq_restore(flags);
114} 97}
115 98
99#ifdef CONFIG_X86_32
100
116/* 101/*
117 * This is only used on smaller machines. 102 * This is only used on smaller machines.
118 */ 103 */
119void send_IPI_mask_bitmask(const struct cpumask *cpumask, int vector) 104void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector)
120{ 105{
121 unsigned long mask = cpumask_bits(cpumask)[0]; 106 unsigned long mask = cpumask_bits(cpumask)[0];
122 unsigned long flags; 107 unsigned long flags;
123 108
124 local_irq_save(flags); 109 local_irq_save(flags);
125 WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]); 110 WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
126 __send_IPI_dest_field(mask, vector); 111 __default_send_IPI_dest_field(mask, vector, apic->dest_logical);
127 local_irq_restore(flags); 112 local_irq_restore(flags);
128} 113}
129 114
130void send_IPI_mask_sequence(const struct cpumask *mask, int vector) 115void default_send_IPI_allbutself(int vector)
131{ 116{
132 unsigned long flags;
133 unsigned int query_cpu;
134
135 /* 117 /*
136 * Hack. The clustered APIC addressing mode doesn't allow us to send 118 * if there are no other CPUs in the system then we get an APIC send
137 * to an arbitrary mask, so I do a unicasts to each CPU instead. This 119 * error if we try to broadcast, thus avoid sending IPIs in this case.
138 * should be modified to do 1 message per cluster ID - mbligh
139 */ 120 */
121 if (!(num_online_cpus() > 1))
122 return;
140 123
141 local_irq_save(flags); 124 __default_local_send_IPI_allbutself(vector);
142 for_each_cpu(query_cpu, mask)
143 __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu), vector);
144 local_irq_restore(flags);
145} 125}
146 126
147void send_IPI_mask_allbutself(const struct cpumask *mask, int vector) 127void default_send_IPI_all(int vector)
148{ 128{
149 unsigned long flags; 129 __default_local_send_IPI_all(vector);
150 unsigned int query_cpu; 130}
151 unsigned int this_cpu = smp_processor_id();
152
153 /* See Hack comment above */
154 131
155 local_irq_save(flags); 132void default_send_IPI_self(int vector)
156 for_each_cpu(query_cpu, mask) 133{
157 if (query_cpu != this_cpu) 134 __default_send_IPI_shortcut(APIC_DEST_SELF, vector, apic->dest_logical);
158 __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu),
159 vector);
160 local_irq_restore(flags);
161} 135}
162 136
163/* must come after the send_IPI functions above for inlining */ 137/* must come after the send_IPI functions above for inlining */
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 3973e2df7f87..f13ca1650aaf 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -6,10 +6,12 @@
6#include <linux/kernel_stat.h> 6#include <linux/kernel_stat.h>
7#include <linux/seq_file.h> 7#include <linux/seq_file.h>
8#include <linux/smp.h> 8#include <linux/smp.h>
9#include <linux/ftrace.h>
9 10
10#include <asm/apic.h> 11#include <asm/apic.h>
11#include <asm/io_apic.h> 12#include <asm/io_apic.h>
12#include <asm/irq.h> 13#include <asm/irq.h>
14#include <asm/idle.h>
13 15
14atomic_t irq_err_count; 16atomic_t irq_err_count;
15 17
@@ -36,11 +38,7 @@ void ack_bad_irq(unsigned int irq)
36#endif 38#endif
37} 39}
38 40
39#ifdef CONFIG_X86_32 41#define irq_stats(x) (&per_cpu(irq_stat, x))
40# define irq_stats(x) (&per_cpu(irq_stat, x))
41#else
42# define irq_stats(x) cpu_pda(x)
43#endif
44/* 42/*
45 * /proc/interrupts printing: 43 * /proc/interrupts printing:
46 */ 44 */
@@ -192,4 +190,40 @@ u64 arch_irq_stat(void)
192 return sum; 190 return sum;
193} 191}
194 192
193
194/*
195 * do_IRQ handles all normal device IRQ's (the special
196 * SMP cross-CPU interrupts have their own specific
197 * handlers).
198 */
199unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
200{
201 struct pt_regs *old_regs = set_irq_regs(regs);
202
203 /* high bit used in ret_from_ code */
204 unsigned vector = ~regs->orig_ax;
205 unsigned irq;
206
207 exit_idle();
208 irq_enter();
209
210 irq = __get_cpu_var(vector_irq)[vector];
211
212 if (!handle_irq(irq, regs)) {
213#ifdef CONFIG_X86_64
214 if (!disable_apic)
215 ack_APIC_irq();
216#endif
217
218 if (printk_ratelimit())
219 printk(KERN_EMERG "%s: %d.%d No irq handler for vector (irq %d)\n",
220 __func__, smp_processor_id(), vector, irq);
221 }
222
223 irq_exit();
224
225 set_irq_regs(old_regs);
226 return 1;
227}
228
195EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq); 229EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq);
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 74b9ff7341e9..4beb9a13873d 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -191,33 +191,16 @@ static inline int
191execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) { return 0; } 191execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) { return 0; }
192#endif 192#endif
193 193
194/* 194bool handle_irq(unsigned irq, struct pt_regs *regs)
195 * do_IRQ handles all normal device IRQ's (the special
196 * SMP cross-CPU interrupts have their own specific
197 * handlers).
198 */
199unsigned int do_IRQ(struct pt_regs *regs)
200{ 195{
201 struct pt_regs *old_regs;
202 /* high bit used in ret_from_ code */
203 int overflow;
204 unsigned vector = ~regs->orig_ax;
205 struct irq_desc *desc; 196 struct irq_desc *desc;
206 unsigned irq; 197 int overflow;
207
208
209 old_regs = set_irq_regs(regs);
210 irq_enter();
211 irq = __get_cpu_var(vector_irq)[vector];
212 198
213 overflow = check_stack_overflow(); 199 overflow = check_stack_overflow();
214 200
215 desc = irq_to_desc(irq); 201 desc = irq_to_desc(irq);
216 if (unlikely(!desc)) { 202 if (unlikely(!desc))
217 printk(KERN_EMERG "%s: cannot handle IRQ %d vector %#x cpu %d\n", 203 return false;
218 __func__, irq, vector, smp_processor_id());
219 BUG();
220 }
221 204
222 if (!execute_on_irq_stack(overflow, desc, irq)) { 205 if (!execute_on_irq_stack(overflow, desc, irq)) {
223 if (unlikely(overflow)) 206 if (unlikely(overflow))
@@ -225,13 +208,11 @@ unsigned int do_IRQ(struct pt_regs *regs)
225 desc->handle_irq(irq, desc); 208 desc->handle_irq(irq, desc);
226 } 209 }
227 210
228 irq_exit(); 211 return true;
229 set_irq_regs(old_regs);
230 return 1;
231} 212}
232 213
233#ifdef CONFIG_HOTPLUG_CPU 214#ifdef CONFIG_HOTPLUG_CPU
234#include <mach_apic.h> 215#include <asm/genapic.h>
235 216
236/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */ 217/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
237void fixup_irqs(void) 218void fixup_irqs(void)
@@ -248,7 +229,7 @@ void fixup_irqs(void)
248 if (irq == 2) 229 if (irq == 2)
249 continue; 230 continue;
250 231
251 affinity = &desc->affinity; 232 affinity = desc->affinity;
252 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { 233 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
253 printk("Breaking affinity for irq %i\n", irq); 234 printk("Breaking affinity for irq %i\n", irq);
254 affinity = cpu_all_mask; 235 affinity = cpu_all_mask;
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index 63c88e6ec025..977d8b43a0dd 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -18,6 +18,13 @@
18#include <linux/smp.h> 18#include <linux/smp.h>
19#include <asm/io_apic.h> 19#include <asm/io_apic.h>
20#include <asm/idle.h> 20#include <asm/idle.h>
21#include <asm/apic.h>
22
23DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
24EXPORT_PER_CPU_SYMBOL(irq_stat);
25
26DEFINE_PER_CPU(struct pt_regs *, irq_regs);
27EXPORT_PER_CPU_SYMBOL(irq_regs);
21 28
22/* 29/*
23 * Probabilistic stack overflow check: 30 * Probabilistic stack overflow check:
@@ -41,42 +48,18 @@ static inline void stack_overflow_check(struct pt_regs *regs)
41#endif 48#endif
42} 49}
43 50
44/* 51bool handle_irq(unsigned irq, struct pt_regs *regs)
45 * do_IRQ handles all normal device IRQ's (the special
46 * SMP cross-CPU interrupts have their own specific
47 * handlers).
48 */
49asmlinkage unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
50{ 52{
51 struct pt_regs *old_regs = set_irq_regs(regs);
52 struct irq_desc *desc; 53 struct irq_desc *desc;
53 54
54 /* high bit used in ret_from_ code */
55 unsigned vector = ~regs->orig_ax;
56 unsigned irq;
57
58 exit_idle();
59 irq_enter();
60 irq = __get_cpu_var(vector_irq)[vector];
61
62 stack_overflow_check(regs); 55 stack_overflow_check(regs);
63 56
64 desc = irq_to_desc(irq); 57 desc = irq_to_desc(irq);
65 if (likely(desc)) 58 if (unlikely(!desc))
66 generic_handle_irq_desc(irq, desc); 59 return false;
67 else {
68 if (!disable_apic)
69 ack_APIC_irq();
70
71 if (printk_ratelimit())
72 printk(KERN_EMERG "%s: %d.%d No irq handler for vector\n",
73 __func__, smp_processor_id(), vector);
74 }
75
76 irq_exit();
77 60
78 set_irq_regs(old_regs); 61 generic_handle_irq_desc(irq, desc);
79 return 1; 62 return true;
80} 63}
81 64
82#ifdef CONFIG_HOTPLUG_CPU 65#ifdef CONFIG_HOTPLUG_CPU
@@ -100,7 +83,7 @@ void fixup_irqs(void)
100 /* interrupt's are disabled at this point */ 83 /* interrupt's are disabled at this point */
101 spin_lock(&desc->lock); 84 spin_lock(&desc->lock);
102 85
103 affinity = &desc->affinity; 86 affinity = desc->affinity;
104 if (!irq_has_action(irq) || 87 if (!irq_has_action(irq) ||
105 cpumask_equal(affinity, cpu_online_mask)) { 88 cpumask_equal(affinity, cpu_online_mask)) {
106 spin_unlock(&desc->lock); 89 spin_unlock(&desc->lock);
diff --git a/arch/x86/kernel/irqinit_32.c b/arch/x86/kernel/irqinit_32.c
index 10a09c2f1828..bf629cadec1a 100644
--- a/arch/x86/kernel/irqinit_32.c
+++ b/arch/x86/kernel/irqinit_32.c
@@ -78,6 +78,15 @@ void __init init_ISA_irqs(void)
78 } 78 }
79} 79}
80 80
81/*
82 * IRQ2 is cascade interrupt to second interrupt controller
83 */
84static struct irqaction irq2 = {
85 .handler = no_action,
86 .mask = CPU_MASK_NONE,
87 .name = "cascade",
88};
89
81DEFINE_PER_CPU(vector_irq_t, vector_irq) = { 90DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
82 [0 ... IRQ0_VECTOR - 1] = -1, 91 [0 ... IRQ0_VECTOR - 1] = -1,
83 [IRQ0_VECTOR] = 0, 92 [IRQ0_VECTOR] = 0,
@@ -140,8 +149,15 @@ void __init native_init_IRQ(void)
140 */ 149 */
141 alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt); 150 alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
142 151
143 /* IPI for invalidation */ 152 /* IPIs for invalidation */
144 alloc_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt); 153 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+0, invalidate_interrupt0);
154 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+1, invalidate_interrupt1);
155 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+2, invalidate_interrupt2);
156 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+3, invalidate_interrupt3);
157 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+4, invalidate_interrupt4);
158 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+5, invalidate_interrupt5);
159 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+6, invalidate_interrupt6);
160 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+7, invalidate_interrupt7);
145 161
146 /* IPI for generic function call */ 162 /* IPI for generic function call */
147 alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); 163 alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
@@ -169,6 +185,9 @@ void __init native_init_IRQ(void)
169 alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); 185 alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
170#endif 186#endif
171 187
188 if (!acpi_ioapic)
189 setup_irq(2, &irq2);
190
172 /* setup after call gates are initialised (usually add in 191 /* setup after call gates are initialised (usually add in
173 * the architecture specific gates) 192 * the architecture specific gates)
174 */ 193 */
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index 10435a120d22..5c4f55483849 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -46,7 +46,7 @@
46#include <asm/apicdef.h> 46#include <asm/apicdef.h>
47#include <asm/system.h> 47#include <asm/system.h>
48 48
49#include <mach_ipi.h> 49#include <asm/genapic.h>
50 50
51/* 51/*
52 * Put the error code here just in case the user cares: 52 * Put the error code here just in case the user cares:
@@ -347,7 +347,7 @@ void kgdb_post_primary_code(struct pt_regs *regs, int e_vector, int err_code)
347 */ 347 */
348void kgdb_roundup_cpus(unsigned long flags) 348void kgdb_roundup_cpus(unsigned long flags)
349{ 349{
350 send_IPI_allbutself(APIC_DM_NMI); 350 apic->send_IPI_allbutself(APIC_DM_NMI);
351} 351}
352#endif 352#endif
353 353
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
index c43caa3a91f3..6993d51b7fd8 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -18,15 +18,6 @@
18#include <asm/mmu_context.h> 18#include <asm/mmu_context.h>
19#include <asm/io.h> 19#include <asm/io.h>
20 20
21#define PAGE_ALIGNED __attribute__ ((__aligned__(PAGE_SIZE)))
22static u64 kexec_pgd[512] PAGE_ALIGNED;
23static u64 kexec_pud0[512] PAGE_ALIGNED;
24static u64 kexec_pmd0[512] PAGE_ALIGNED;
25static u64 kexec_pte0[512] PAGE_ALIGNED;
26static u64 kexec_pud1[512] PAGE_ALIGNED;
27static u64 kexec_pmd1[512] PAGE_ALIGNED;
28static u64 kexec_pte1[512] PAGE_ALIGNED;
29
30static void init_level2_page(pmd_t *level2p, unsigned long addr) 21static void init_level2_page(pmd_t *level2p, unsigned long addr)
31{ 22{
32 unsigned long end_addr; 23 unsigned long end_addr;
@@ -107,12 +98,65 @@ out:
107 return result; 98 return result;
108} 99}
109 100
101static void free_transition_pgtable(struct kimage *image)
102{
103 free_page((unsigned long)image->arch.pud);
104 free_page((unsigned long)image->arch.pmd);
105 free_page((unsigned long)image->arch.pte);
106}
107
108static int init_transition_pgtable(struct kimage *image, pgd_t *pgd)
109{
110 pud_t *pud;
111 pmd_t *pmd;
112 pte_t *pte;
113 unsigned long vaddr, paddr;
114 int result = -ENOMEM;
115
116 vaddr = (unsigned long)relocate_kernel;
117 paddr = __pa(page_address(image->control_code_page)+PAGE_SIZE);
118 pgd += pgd_index(vaddr);
119 if (!pgd_present(*pgd)) {
120 pud = (pud_t *)get_zeroed_page(GFP_KERNEL);
121 if (!pud)
122 goto err;
123 image->arch.pud = pud;
124 set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE));
125 }
126 pud = pud_offset(pgd, vaddr);
127 if (!pud_present(*pud)) {
128 pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL);
129 if (!pmd)
130 goto err;
131 image->arch.pmd = pmd;
132 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
133 }
134 pmd = pmd_offset(pud, vaddr);
135 if (!pmd_present(*pmd)) {
136 pte = (pte_t *)get_zeroed_page(GFP_KERNEL);
137 if (!pte)
138 goto err;
139 image->arch.pte = pte;
140 set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
141 }
142 pte = pte_offset_kernel(pmd, vaddr);
143 set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
144 return 0;
145err:
146 free_transition_pgtable(image);
147 return result;
148}
149
110 150
111static int init_pgtable(struct kimage *image, unsigned long start_pgtable) 151static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
112{ 152{
113 pgd_t *level4p; 153 pgd_t *level4p;
154 int result;
114 level4p = (pgd_t *)__va(start_pgtable); 155 level4p = (pgd_t *)__va(start_pgtable);
115 return init_level4_page(image, level4p, 0, max_pfn << PAGE_SHIFT); 156 result = init_level4_page(image, level4p, 0, max_pfn << PAGE_SHIFT);
157 if (result)
158 return result;
159 return init_transition_pgtable(image, level4p);
116} 160}
117 161
118static void set_idt(void *newidt, u16 limit) 162static void set_idt(void *newidt, u16 limit)
@@ -174,7 +218,7 @@ int machine_kexec_prepare(struct kimage *image)
174 218
175void machine_kexec_cleanup(struct kimage *image) 219void machine_kexec_cleanup(struct kimage *image)
176{ 220{
177 return; 221 free_transition_pgtable(image);
178} 222}
179 223
180/* 224/*
@@ -195,22 +239,6 @@ void machine_kexec(struct kimage *image)
195 memcpy(control_page, relocate_kernel, PAGE_SIZE); 239 memcpy(control_page, relocate_kernel, PAGE_SIZE);
196 240
197 page_list[PA_CONTROL_PAGE] = virt_to_phys(control_page); 241 page_list[PA_CONTROL_PAGE] = virt_to_phys(control_page);
198 page_list[VA_CONTROL_PAGE] = (unsigned long)relocate_kernel;
199 page_list[PA_PGD] = virt_to_phys(&kexec_pgd);
200 page_list[VA_PGD] = (unsigned long)kexec_pgd;
201 page_list[PA_PUD_0] = virt_to_phys(&kexec_pud0);
202 page_list[VA_PUD_0] = (unsigned long)kexec_pud0;
203 page_list[PA_PMD_0] = virt_to_phys(&kexec_pmd0);
204 page_list[VA_PMD_0] = (unsigned long)kexec_pmd0;
205 page_list[PA_PTE_0] = virt_to_phys(&kexec_pte0);
206 page_list[VA_PTE_0] = (unsigned long)kexec_pte0;
207 page_list[PA_PUD_1] = virt_to_phys(&kexec_pud1);
208 page_list[VA_PUD_1] = (unsigned long)kexec_pud1;
209 page_list[PA_PMD_1] = virt_to_phys(&kexec_pmd1);
210 page_list[VA_PMD_1] = (unsigned long)kexec_pmd1;
211 page_list[PA_PTE_1] = virt_to_phys(&kexec_pte1);
212 page_list[VA_PTE_1] = (unsigned long)kexec_pte1;
213
214 page_list[PA_TABLE_PAGE] = 242 page_list[PA_TABLE_PAGE] =
215 (unsigned long)__pa(page_address(image->control_code_page)); 243 (unsigned long)__pa(page_address(image->control_code_page));
216 244
diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
index b7f4c929e615..5e9f4fc51385 100644
--- a/arch/x86/kernel/microcode_intel.c
+++ b/arch/x86/kernel/microcode_intel.c
@@ -87,9 +87,9 @@
87#include <linux/cpu.h> 87#include <linux/cpu.h>
88#include <linux/firmware.h> 88#include <linux/firmware.h>
89#include <linux/platform_device.h> 89#include <linux/platform_device.h>
90#include <linux/uaccess.h>
90 91
91#include <asm/msr.h> 92#include <asm/msr.h>
92#include <asm/uaccess.h>
93#include <asm/processor.h> 93#include <asm/processor.h>
94#include <asm/microcode.h> 94#include <asm/microcode.h>
95 95
@@ -196,7 +196,7 @@ static inline int update_match_cpu(struct cpu_signature *csig, int sig, int pf)
196 return (!sigmatch(sig, csig->sig, pf, csig->pf)) ? 0 : 1; 196 return (!sigmatch(sig, csig->sig, pf, csig->pf)) ? 0 : 1;
197} 197}
198 198
199static inline int 199static inline int
200update_match_revision(struct microcode_header_intel *mc_header, int rev) 200update_match_revision(struct microcode_header_intel *mc_header, int rev)
201{ 201{
202 return (mc_header->rev <= rev) ? 0 : 1; 202 return (mc_header->rev <= rev) ? 0 : 1;
@@ -442,8 +442,8 @@ static int request_microcode_fw(int cpu, struct device *device)
442 return ret; 442 return ret;
443 } 443 }
444 444
445 ret = generic_load_microcode(cpu, (void*)firmware->data, firmware->size, 445 ret = generic_load_microcode(cpu, (void *)firmware->data,
446 &get_ucode_fw); 446 firmware->size, &get_ucode_fw);
447 447
448 release_firmware(firmware); 448 release_firmware(firmware);
449 449
@@ -460,7 +460,7 @@ static int request_microcode_user(int cpu, const void __user *buf, size_t size)
460 /* We should bind the task to the CPU */ 460 /* We should bind the task to the CPU */
461 BUG_ON(cpu != raw_smp_processor_id()); 461 BUG_ON(cpu != raw_smp_processor_id());
462 462
463 return generic_load_microcode(cpu, (void*)buf, size, &get_ucode_user); 463 return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
464} 464}
465 465
466static void microcode_fini_cpu(int cpu) 466static void microcode_fini_cpu(int cpu)
diff --git a/arch/x86/kernel/module_32.c b/arch/x86/kernel/module_32.c
index 3db0a5442eb1..0edd819050e7 100644
--- a/arch/x86/kernel/module_32.c
+++ b/arch/x86/kernel/module_32.c
@@ -42,7 +42,7 @@ void module_free(struct module *mod, void *module_region)
42{ 42{
43 vfree(module_region); 43 vfree(module_region);
44 /* FIXME: If module_region == mod->init_region, trim exception 44 /* FIXME: If module_region == mod->init_region, trim exception
45 table entries. */ 45 table entries. */
46} 46}
47 47
48/* We don't need anything special. */ 48/* We don't need anything special. */
@@ -113,13 +113,13 @@ int module_finalize(const Elf_Ehdr *hdr,
113 *para = NULL; 113 *para = NULL;
114 char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; 114 char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
115 115
116 for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) { 116 for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
117 if (!strcmp(".text", secstrings + s->sh_name)) 117 if (!strcmp(".text", secstrings + s->sh_name))
118 text = s; 118 text = s;
119 if (!strcmp(".altinstructions", secstrings + s->sh_name)) 119 if (!strcmp(".altinstructions", secstrings + s->sh_name))
120 alt = s; 120 alt = s;
121 if (!strcmp(".smp_locks", secstrings + s->sh_name)) 121 if (!strcmp(".smp_locks", secstrings + s->sh_name))
122 locks= s; 122 locks = s;
123 if (!strcmp(".parainstructions", secstrings + s->sh_name)) 123 if (!strcmp(".parainstructions", secstrings + s->sh_name))
124 para = s; 124 para = s;
125 } 125 }
diff --git a/arch/x86/kernel/module_64.c b/arch/x86/kernel/module_64.c
index 6ba87830d4b1..c23880b90b5c 100644
--- a/arch/x86/kernel/module_64.c
+++ b/arch/x86/kernel/module_64.c
@@ -30,14 +30,14 @@
30#include <asm/page.h> 30#include <asm/page.h>
31#include <asm/pgtable.h> 31#include <asm/pgtable.h>
32 32
33#define DEBUGP(fmt...) 33#define DEBUGP(fmt...)
34 34
35#ifndef CONFIG_UML 35#ifndef CONFIG_UML
36void module_free(struct module *mod, void *module_region) 36void module_free(struct module *mod, void *module_region)
37{ 37{
38 vfree(module_region); 38 vfree(module_region);
39 /* FIXME: If module_region == mod->init_region, trim exception 39 /* FIXME: If module_region == mod->init_region, trim exception
40 table entries. */ 40 table entries. */
41} 41}
42 42
43void *module_alloc(unsigned long size) 43void *module_alloc(unsigned long size)
@@ -77,7 +77,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
77 Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr; 77 Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
78 Elf64_Sym *sym; 78 Elf64_Sym *sym;
79 void *loc; 79 void *loc;
80 u64 val; 80 u64 val;
81 81
82 DEBUGP("Applying relocate section %u to %u\n", relsec, 82 DEBUGP("Applying relocate section %u to %u\n", relsec,
83 sechdrs[relsec].sh_info); 83 sechdrs[relsec].sh_info);
@@ -91,11 +91,11 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
91 sym = (Elf64_Sym *)sechdrs[symindex].sh_addr 91 sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
92 + ELF64_R_SYM(rel[i].r_info); 92 + ELF64_R_SYM(rel[i].r_info);
93 93
94 DEBUGP("type %d st_value %Lx r_addend %Lx loc %Lx\n", 94 DEBUGP("type %d st_value %Lx r_addend %Lx loc %Lx\n",
95 (int)ELF64_R_TYPE(rel[i].r_info), 95 (int)ELF64_R_TYPE(rel[i].r_info),
96 sym->st_value, rel[i].r_addend, (u64)loc); 96 sym->st_value, rel[i].r_addend, (u64)loc);
97 97
98 val = sym->st_value + rel[i].r_addend; 98 val = sym->st_value + rel[i].r_addend;
99 99
100 switch (ELF64_R_TYPE(rel[i].r_info)) { 100 switch (ELF64_R_TYPE(rel[i].r_info)) {
101 case R_X86_64_NONE: 101 case R_X86_64_NONE:
@@ -113,16 +113,16 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
113 if ((s64)val != *(s32 *)loc) 113 if ((s64)val != *(s32 *)loc)
114 goto overflow; 114 goto overflow;
115 break; 115 break;
116 case R_X86_64_PC32: 116 case R_X86_64_PC32:
117 val -= (u64)loc; 117 val -= (u64)loc;
118 *(u32 *)loc = val; 118 *(u32 *)loc = val;
119#if 0 119#if 0
120 if ((s64)val != *(s32 *)loc) 120 if ((s64)val != *(s32 *)loc)
121 goto overflow; 121 goto overflow;
122#endif 122#endif
123 break; 123 break;
124 default: 124 default:
125 printk(KERN_ERR "module %s: Unknown rela relocation: %Lu\n", 125 printk(KERN_ERR "module %s: Unknown rela relocation: %llu\n",
126 me->name, ELF64_R_TYPE(rel[i].r_info)); 126 me->name, ELF64_R_TYPE(rel[i].r_info));
127 return -ENOEXEC; 127 return -ENOEXEC;
128 } 128 }
@@ -130,7 +130,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
130 return 0; 130 return 0;
131 131
132overflow: 132overflow:
133 printk(KERN_ERR "overflow in relocation type %d val %Lx\n", 133 printk(KERN_ERR "overflow in relocation type %d val %Lx\n",
134 (int)ELF64_R_TYPE(rel[i].r_info), val); 134 (int)ELF64_R_TYPE(rel[i].r_info), val);
135 printk(KERN_ERR "`%s' likely not compiled with -mcmodel=kernel\n", 135 printk(KERN_ERR "`%s' likely not compiled with -mcmodel=kernel\n",
136 me->name); 136 me->name);
@@ -143,13 +143,13 @@ int apply_relocate(Elf_Shdr *sechdrs,
143 unsigned int relsec, 143 unsigned int relsec,
144 struct module *me) 144 struct module *me)
145{ 145{
146 printk("non add relocation not supported\n"); 146 printk(KERN_ERR "non add relocation not supported\n");
147 return -ENOSYS; 147 return -ENOSYS;
148} 148}
149 149
150int module_finalize(const Elf_Ehdr *hdr, 150int module_finalize(const Elf_Ehdr *hdr,
151 const Elf_Shdr *sechdrs, 151 const Elf_Shdr *sechdrs,
152 struct module *me) 152 struct module *me)
153{ 153{
154 const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL, 154 const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL,
155 *para = NULL; 155 *para = NULL;
@@ -161,7 +161,7 @@ int module_finalize(const Elf_Ehdr *hdr,
161 if (!strcmp(".altinstructions", secstrings + s->sh_name)) 161 if (!strcmp(".altinstructions", secstrings + s->sh_name))
162 alt = s; 162 alt = s;
163 if (!strcmp(".smp_locks", secstrings + s->sh_name)) 163 if (!strcmp(".smp_locks", secstrings + s->sh_name))
164 locks= s; 164 locks = s;
165 if (!strcmp(".parainstructions", secstrings + s->sh_name)) 165 if (!strcmp(".parainstructions", secstrings + s->sh_name))
166 para = s; 166 para = s;
167 } 167 }
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index a649a4ccad43..200764453195 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -3,7 +3,7 @@
3 * compliant MP-table parsing routines. 3 * compliant MP-table parsing routines.
4 * 4 *
5 * (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk> 5 * (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
6 * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com> 6 * (c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
7 * (c) 2008 Alexey Starikovskiy <astarikovskiy@suse.de> 7 * (c) 2008 Alexey Starikovskiy <astarikovskiy@suse.de>
8 */ 8 */
9 9
@@ -29,12 +29,7 @@
29#include <asm/setup.h> 29#include <asm/setup.h>
30#include <asm/smp.h> 30#include <asm/smp.h>
31 31
32#include <mach_apic.h> 32#include <asm/genapic.h>
33#ifdef CONFIG_X86_32
34#include <mach_apicdef.h>
35#include <mach_mpparse.h>
36#endif
37
38/* 33/*
39 * Checksum an MP configuration block. 34 * Checksum an MP configuration block.
40 */ 35 */
@@ -144,11 +139,11 @@ static void __init MP_ioapic_info(struct mpc_ioapic *m)
144 if (bad_ioapic(m->apicaddr)) 139 if (bad_ioapic(m->apicaddr))
145 return; 140 return;
146 141
147 mp_ioapics[nr_ioapics].mp_apicaddr = m->apicaddr; 142 mp_ioapics[nr_ioapics].apicaddr = m->apicaddr;
148 mp_ioapics[nr_ioapics].mp_apicid = m->apicid; 143 mp_ioapics[nr_ioapics].apicid = m->apicid;
149 mp_ioapics[nr_ioapics].mp_type = m->type; 144 mp_ioapics[nr_ioapics].type = m->type;
150 mp_ioapics[nr_ioapics].mp_apicver = m->apicver; 145 mp_ioapics[nr_ioapics].apicver = m->apicver;
151 mp_ioapics[nr_ioapics].mp_flags = m->flags; 146 mp_ioapics[nr_ioapics].flags = m->flags;
152 nr_ioapics++; 147 nr_ioapics++;
153} 148}
154 149
@@ -160,55 +155,55 @@ static void print_MP_intsrc_info(struct mpc_intsrc *m)
160 m->srcbusirq, m->dstapic, m->dstirq); 155 m->srcbusirq, m->dstapic, m->dstirq);
161} 156}
162 157
163static void __init print_mp_irq_info(struct mp_config_intsrc *mp_irq) 158static void __init print_mp_irq_info(struct mpc_intsrc *mp_irq)
164{ 159{
165 apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x," 160 apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x,"
166 " IRQ %02x, APIC ID %x, APIC INT %02x\n", 161 " IRQ %02x, APIC ID %x, APIC INT %02x\n",
167 mp_irq->mp_irqtype, mp_irq->mp_irqflag & 3, 162 mp_irq->irqtype, mp_irq->irqflag & 3,
168 (mp_irq->mp_irqflag >> 2) & 3, mp_irq->mp_srcbus, 163 (mp_irq->irqflag >> 2) & 3, mp_irq->srcbus,
169 mp_irq->mp_srcbusirq, mp_irq->mp_dstapic, mp_irq->mp_dstirq); 164 mp_irq->srcbusirq, mp_irq->dstapic, mp_irq->dstirq);
170} 165}
171 166
172static void __init assign_to_mp_irq(struct mpc_intsrc *m, 167static void __init assign_to_mp_irq(struct mpc_intsrc *m,
173 struct mp_config_intsrc *mp_irq) 168 struct mpc_intsrc *mp_irq)
174{ 169{
175 mp_irq->mp_dstapic = m->dstapic; 170 mp_irq->dstapic = m->dstapic;
176 mp_irq->mp_type = m->type; 171 mp_irq->type = m->type;
177 mp_irq->mp_irqtype = m->irqtype; 172 mp_irq->irqtype = m->irqtype;
178 mp_irq->mp_irqflag = m->irqflag; 173 mp_irq->irqflag = m->irqflag;
179 mp_irq->mp_srcbus = m->srcbus; 174 mp_irq->srcbus = m->srcbus;
180 mp_irq->mp_srcbusirq = m->srcbusirq; 175 mp_irq->srcbusirq = m->srcbusirq;
181 mp_irq->mp_dstirq = m->dstirq; 176 mp_irq->dstirq = m->dstirq;
182} 177}
183 178
184static void __init assign_to_mpc_intsrc(struct mp_config_intsrc *mp_irq, 179static void __init assign_to_mpc_intsrc(struct mpc_intsrc *mp_irq,
185 struct mpc_intsrc *m) 180 struct mpc_intsrc *m)
186{ 181{
187 m->dstapic = mp_irq->mp_dstapic; 182 m->dstapic = mp_irq->dstapic;
188 m->type = mp_irq->mp_type; 183 m->type = mp_irq->type;
189 m->irqtype = mp_irq->mp_irqtype; 184 m->irqtype = mp_irq->irqtype;
190 m->irqflag = mp_irq->mp_irqflag; 185 m->irqflag = mp_irq->irqflag;
191 m->srcbus = mp_irq->mp_srcbus; 186 m->srcbus = mp_irq->srcbus;
192 m->srcbusirq = mp_irq->mp_srcbusirq; 187 m->srcbusirq = mp_irq->srcbusirq;
193 m->dstirq = mp_irq->mp_dstirq; 188 m->dstirq = mp_irq->dstirq;
194} 189}
195 190
196static int __init mp_irq_mpc_intsrc_cmp(struct mp_config_intsrc *mp_irq, 191static int __init mp_irq_mpc_intsrc_cmp(struct mpc_intsrc *mp_irq,
197 struct mpc_intsrc *m) 192 struct mpc_intsrc *m)
198{ 193{
199 if (mp_irq->mp_dstapic != m->dstapic) 194 if (mp_irq->dstapic != m->dstapic)
200 return 1; 195 return 1;
201 if (mp_irq->mp_type != m->type) 196 if (mp_irq->type != m->type)
202 return 2; 197 return 2;
203 if (mp_irq->mp_irqtype != m->irqtype) 198 if (mp_irq->irqtype != m->irqtype)
204 return 3; 199 return 3;
205 if (mp_irq->mp_irqflag != m->irqflag) 200 if (mp_irq->irqflag != m->irqflag)
206 return 4; 201 return 4;
207 if (mp_irq->mp_srcbus != m->srcbus) 202 if (mp_irq->srcbus != m->srcbus)
208 return 5; 203 return 5;
209 if (mp_irq->mp_srcbusirq != m->srcbusirq) 204 if (mp_irq->srcbusirq != m->srcbusirq)
210 return 6; 205 return 6;
211 if (mp_irq->mp_dstirq != m->dstirq) 206 if (mp_irq->dstirq != m->dstirq)
212 return 7; 207 return 7;
213 208
214 return 0; 209 return 0;
@@ -292,16 +287,7 @@ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
292 return 0; 287 return 0;
293 288
294#ifdef CONFIG_X86_32 289#ifdef CONFIG_X86_32
295 /* 290 generic_mps_oem_check(mpc, oem, str);
296 * need to make sure summit and es7000's mps_oem_check is safe to be
297 * called early via genericarch 's mps_oem_check
298 */
299 if (early) {
300#ifdef CONFIG_X86_NUMAQ
301 numaq_mps_oem_check(mpc, oem, str);
302#endif
303 } else
304 mps_oem_check(mpc, oem, str);
305#endif 291#endif
306 /* save the local APIC address, it might be non-default */ 292 /* save the local APIC address, it might be non-default */
307 if (!acpi_lapic) 293 if (!acpi_lapic)
@@ -386,13 +372,13 @@ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
386 (*x86_quirks->mpc_record)++; 372 (*x86_quirks->mpc_record)++;
387 } 373 }
388 374
389#ifdef CONFIG_X86_GENERICARCH 375#ifdef CONFIG_X86_BIGSMP
390 generic_bigsmp_probe(); 376 generic_bigsmp_probe();
391#endif 377#endif
392 378
393#ifdef CONFIG_X86_32 379 if (apic->setup_apic_routing)
394 setup_apic_routing(); 380 apic->setup_apic_routing();
395#endif 381
396 if (!num_processors) 382 if (!num_processors)
397 printk(KERN_ERR "MPTABLE: no processors registered!\n"); 383 printk(KERN_ERR "MPTABLE: no processors registered!\n");
398 return num_processors; 384 return num_processors;
@@ -417,7 +403,7 @@ static void __init construct_default_ioirq_mptable(int mpc_default_type)
417 intsrc.type = MP_INTSRC; 403 intsrc.type = MP_INTSRC;
418 intsrc.irqflag = 0; /* conforming */ 404 intsrc.irqflag = 0; /* conforming */
419 intsrc.srcbus = 0; 405 intsrc.srcbus = 0;
420 intsrc.dstapic = mp_ioapics[0].mp_apicid; 406 intsrc.dstapic = mp_ioapics[0].apicid;
421 407
422 intsrc.irqtype = mp_INT; 408 intsrc.irqtype = mp_INT;
423 409
@@ -570,14 +556,14 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type)
570 } 556 }
571} 557}
572 558
573static struct intel_mp_floating *mpf_found; 559static struct mpf_intel *mpf_found;
574 560
575/* 561/*
576 * Scan the memory blocks for an SMP configuration block. 562 * Scan the memory blocks for an SMP configuration block.
577 */ 563 */
578static void __init __get_smp_config(unsigned int early) 564static void __init __get_smp_config(unsigned int early)
579{ 565{
580 struct intel_mp_floating *mpf = mpf_found; 566 struct mpf_intel *mpf = mpf_found;
581 567
582 if (!mpf) 568 if (!mpf)
583 return; 569 return;
@@ -598,9 +584,9 @@ static void __init __get_smp_config(unsigned int early)
598 } 584 }
599 585
600 printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", 586 printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n",
601 mpf->mpf_specification); 587 mpf->specification);
602#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32) 588#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
603 if (mpf->mpf_feature2 & (1 << 7)) { 589 if (mpf->feature2 & (1 << 7)) {
604 printk(KERN_INFO " IMCR and PIC compatibility mode.\n"); 590 printk(KERN_INFO " IMCR and PIC compatibility mode.\n");
605 pic_mode = 1; 591 pic_mode = 1;
606 } else { 592 } else {
@@ -611,7 +597,7 @@ static void __init __get_smp_config(unsigned int early)
611 /* 597 /*
612 * Now see if we need to read further. 598 * Now see if we need to read further.
613 */ 599 */
614 if (mpf->mpf_feature1 != 0) { 600 if (mpf->feature1 != 0) {
615 if (early) { 601 if (early) {
616 /* 602 /*
617 * local APIC has default address 603 * local APIC has default address
@@ -621,16 +607,16 @@ static void __init __get_smp_config(unsigned int early)
621 } 607 }
622 608
623 printk(KERN_INFO "Default MP configuration #%d\n", 609 printk(KERN_INFO "Default MP configuration #%d\n",
624 mpf->mpf_feature1); 610 mpf->feature1);
625 construct_default_ISA_mptable(mpf->mpf_feature1); 611 construct_default_ISA_mptable(mpf->feature1);
626 612
627 } else if (mpf->mpf_physptr) { 613 } else if (mpf->physptr) {
628 614
629 /* 615 /*
630 * Read the physical hardware table. Anything here will 616 * Read the physical hardware table. Anything here will
631 * override the defaults. 617 * override the defaults.
632 */ 618 */
633 if (!smp_read_mpc(phys_to_virt(mpf->mpf_physptr), early)) { 619 if (!smp_read_mpc(phys_to_virt(mpf->physptr), early)) {
634#ifdef CONFIG_X86_LOCAL_APIC 620#ifdef CONFIG_X86_LOCAL_APIC
635 smp_found_config = 0; 621 smp_found_config = 0;
636#endif 622#endif
@@ -688,32 +674,32 @@ static int __init smp_scan_config(unsigned long base, unsigned long length,
688 unsigned reserve) 674 unsigned reserve)
689{ 675{
690 unsigned int *bp = phys_to_virt(base); 676 unsigned int *bp = phys_to_virt(base);
691 struct intel_mp_floating *mpf; 677 struct mpf_intel *mpf;
692 678
693 apic_printk(APIC_VERBOSE, "Scan SMP from %p for %ld bytes.\n", 679 apic_printk(APIC_VERBOSE, "Scan SMP from %p for %ld bytes.\n",
694 bp, length); 680 bp, length);
695 BUILD_BUG_ON(sizeof(*mpf) != 16); 681 BUILD_BUG_ON(sizeof(*mpf) != 16);
696 682
697 while (length > 0) { 683 while (length > 0) {
698 mpf = (struct intel_mp_floating *)bp; 684 mpf = (struct mpf_intel *)bp;
699 if ((*bp == SMP_MAGIC_IDENT) && 685 if ((*bp == SMP_MAGIC_IDENT) &&
700 (mpf->mpf_length == 1) && 686 (mpf->length == 1) &&
701 !mpf_checksum((unsigned char *)bp, 16) && 687 !mpf_checksum((unsigned char *)bp, 16) &&
702 ((mpf->mpf_specification == 1) 688 ((mpf->specification == 1)
703 || (mpf->mpf_specification == 4))) { 689 || (mpf->specification == 4))) {
704#ifdef CONFIG_X86_LOCAL_APIC 690#ifdef CONFIG_X86_LOCAL_APIC
705 smp_found_config = 1; 691 smp_found_config = 1;
706#endif 692#endif
707 mpf_found = mpf; 693 mpf_found = mpf;
708 694
709 printk(KERN_INFO "found SMP MP-table at [%p] %08lx\n", 695 printk(KERN_INFO "found SMP MP-table at [%p] %llx\n",
710 mpf, virt_to_phys(mpf)); 696 mpf, (u64)virt_to_phys(mpf));
711 697
712 if (!reserve) 698 if (!reserve)
713 return 1; 699 return 1;
714 reserve_bootmem_generic(virt_to_phys(mpf), PAGE_SIZE, 700 reserve_bootmem_generic(virt_to_phys(mpf), PAGE_SIZE,
715 BOOTMEM_DEFAULT); 701 BOOTMEM_DEFAULT);
716 if (mpf->mpf_physptr) { 702 if (mpf->physptr) {
717 unsigned long size = PAGE_SIZE; 703 unsigned long size = PAGE_SIZE;
718#ifdef CONFIG_X86_32 704#ifdef CONFIG_X86_32
719 /* 705 /*
@@ -722,14 +708,14 @@ static int __init smp_scan_config(unsigned long base, unsigned long length,
722 * the bottom is mapped now. 708 * the bottom is mapped now.
723 * PC-9800's MPC table places on the very last 709 * PC-9800's MPC table places on the very last
724 * of physical memory; so that simply reserving 710 * of physical memory; so that simply reserving
725 * PAGE_SIZE from mpg->mpf_physptr yields BUG() 711 * PAGE_SIZE from mpf->physptr yields BUG()
726 * in reserve_bootmem. 712 * in reserve_bootmem.
727 */ 713 */
728 unsigned long end = max_low_pfn * PAGE_SIZE; 714 unsigned long end = max_low_pfn * PAGE_SIZE;
729 if (mpf->mpf_physptr + size > end) 715 if (mpf->physptr + size > end)
730 size = end - mpf->mpf_physptr; 716 size = end - mpf->physptr;
731#endif 717#endif
732 reserve_bootmem_generic(mpf->mpf_physptr, size, 718 reserve_bootmem_generic(mpf->physptr, size,
733 BOOTMEM_DEFAULT); 719 BOOTMEM_DEFAULT);
734 } 720 }
735 721
@@ -809,15 +795,15 @@ static int __init get_MP_intsrc_index(struct mpc_intsrc *m)
809 /* not legacy */ 795 /* not legacy */
810 796
811 for (i = 0; i < mp_irq_entries; i++) { 797 for (i = 0; i < mp_irq_entries; i++) {
812 if (mp_irqs[i].mp_irqtype != mp_INT) 798 if (mp_irqs[i].irqtype != mp_INT)
813 continue; 799 continue;
814 800
815 if (mp_irqs[i].mp_irqflag != 0x0f) 801 if (mp_irqs[i].irqflag != 0x0f)
816 continue; 802 continue;
817 803
818 if (mp_irqs[i].mp_srcbus != m->srcbus) 804 if (mp_irqs[i].srcbus != m->srcbus)
819 continue; 805 continue;
820 if (mp_irqs[i].mp_srcbusirq != m->srcbusirq) 806 if (mp_irqs[i].srcbusirq != m->srcbusirq)
821 continue; 807 continue;
822 if (irq_used[i]) { 808 if (irq_used[i]) {
823 /* already claimed */ 809 /* already claimed */
@@ -922,10 +908,10 @@ static int __init replace_intsrc_all(struct mpc_table *mpc,
922 if (irq_used[i]) 908 if (irq_used[i])
923 continue; 909 continue;
924 910
925 if (mp_irqs[i].mp_irqtype != mp_INT) 911 if (mp_irqs[i].irqtype != mp_INT)
926 continue; 912 continue;
927 913
928 if (mp_irqs[i].mp_irqflag != 0x0f) 914 if (mp_irqs[i].irqflag != 0x0f)
929 continue; 915 continue;
930 916
931 if (nr_m_spare > 0) { 917 if (nr_m_spare > 0) {
@@ -1001,7 +987,7 @@ static int __init update_mp_table(void)
1001{ 987{
1002 char str[16]; 988 char str[16];
1003 char oem[10]; 989 char oem[10];
1004 struct intel_mp_floating *mpf; 990 struct mpf_intel *mpf;
1005 struct mpc_table *mpc, *mpc_new; 991 struct mpc_table *mpc, *mpc_new;
1006 992
1007 if (!enable_update_mptable) 993 if (!enable_update_mptable)
@@ -1014,19 +1000,19 @@ static int __init update_mp_table(void)
1014 /* 1000 /*
1015 * Now see if we need to go further. 1001 * Now see if we need to go further.
1016 */ 1002 */
1017 if (mpf->mpf_feature1 != 0) 1003 if (mpf->feature1 != 0)
1018 return 0; 1004 return 0;
1019 1005
1020 if (!mpf->mpf_physptr) 1006 if (!mpf->physptr)
1021 return 0; 1007 return 0;
1022 1008
1023 mpc = phys_to_virt(mpf->mpf_physptr); 1009 mpc = phys_to_virt(mpf->physptr);
1024 1010
1025 if (!smp_check_mpc(mpc, oem, str)) 1011 if (!smp_check_mpc(mpc, oem, str))
1026 return 0; 1012 return 0;
1027 1013
1028 printk(KERN_INFO "mpf: %lx\n", virt_to_phys(mpf)); 1014 printk(KERN_INFO "mpf: %llx\n", (u64)virt_to_phys(mpf));
1029 printk(KERN_INFO "mpf_physptr: %x\n", mpf->mpf_physptr); 1015 printk(KERN_INFO "physptr: %x\n", mpf->physptr);
1030 1016
1031 if (mpc_new_phys && mpc->length > mpc_new_length) { 1017 if (mpc_new_phys && mpc->length > mpc_new_length) {
1032 mpc_new_phys = 0; 1018 mpc_new_phys = 0;
@@ -1047,23 +1033,23 @@ static int __init update_mp_table(void)
1047 } 1033 }
1048 printk(KERN_INFO "use in-positon replacing\n"); 1034 printk(KERN_INFO "use in-positon replacing\n");
1049 } else { 1035 } else {
1050 mpf->mpf_physptr = mpc_new_phys; 1036 mpf->physptr = mpc_new_phys;
1051 mpc_new = phys_to_virt(mpc_new_phys); 1037 mpc_new = phys_to_virt(mpc_new_phys);
1052 memcpy(mpc_new, mpc, mpc->length); 1038 memcpy(mpc_new, mpc, mpc->length);
1053 mpc = mpc_new; 1039 mpc = mpc_new;
1054 /* check if we can modify that */ 1040 /* check if we can modify that */
1055 if (mpc_new_phys - mpf->mpf_physptr) { 1041 if (mpc_new_phys - mpf->physptr) {
1056 struct intel_mp_floating *mpf_new; 1042 struct mpf_intel *mpf_new;
1057 /* steal 16 bytes from [0, 1k) */ 1043 /* steal 16 bytes from [0, 1k) */
1058 printk(KERN_INFO "mpf new: %x\n", 0x400 - 16); 1044 printk(KERN_INFO "mpf new: %x\n", 0x400 - 16);
1059 mpf_new = phys_to_virt(0x400 - 16); 1045 mpf_new = phys_to_virt(0x400 - 16);
1060 memcpy(mpf_new, mpf, 16); 1046 memcpy(mpf_new, mpf, 16);
1061 mpf = mpf_new; 1047 mpf = mpf_new;
1062 mpf->mpf_physptr = mpc_new_phys; 1048 mpf->physptr = mpc_new_phys;
1063 } 1049 }
1064 mpf->mpf_checksum = 0; 1050 mpf->checksum = 0;
1065 mpf->mpf_checksum -= mpf_checksum((unsigned char *)mpf, 16); 1051 mpf->checksum -= mpf_checksum((unsigned char *)mpf, 16);
1066 printk(KERN_INFO "mpf_physptr new: %x\n", mpf->mpf_physptr); 1052 printk(KERN_INFO "physptr new: %x\n", mpf->physptr);
1067 } 1053 }
1068 1054
1069 /* 1055 /*
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index 726266695b2c..3cf3413ec626 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -35,10 +35,10 @@
35#include <linux/device.h> 35#include <linux/device.h>
36#include <linux/cpu.h> 36#include <linux/cpu.h>
37#include <linux/notifier.h> 37#include <linux/notifier.h>
38#include <linux/uaccess.h>
38 39
39#include <asm/processor.h> 40#include <asm/processor.h>
40#include <asm/msr.h> 41#include <asm/msr.h>
41#include <asm/uaccess.h>
42#include <asm/system.h> 42#include <asm/system.h>
43 43
44static struct class *msr_class; 44static struct class *msr_class;
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index 7228979f1e7f..bdfad80c3cf1 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -34,7 +34,7 @@
34 34
35#include <asm/mce.h> 35#include <asm/mce.h>
36 36
37#include <mach_traps.h> 37#include <asm/mach_traps.h>
38 38
39int unknown_nmi_panic; 39int unknown_nmi_panic;
40int nmi_watchdog_enabled; 40int nmi_watchdog_enabled;
@@ -61,11 +61,7 @@ static int endflag __initdata;
61 61
62static inline unsigned int get_nmi_count(int cpu) 62static inline unsigned int get_nmi_count(int cpu)
63{ 63{
64#ifdef CONFIG_X86_64 64 return per_cpu(irq_stat, cpu).__nmi_count;
65 return cpu_pda(cpu)->__nmi_count;
66#else
67 return nmi_count(cpu);
68#endif
69} 65}
70 66
71static inline int mce_in_progress(void) 67static inline int mce_in_progress(void)
@@ -82,12 +78,8 @@ static inline int mce_in_progress(void)
82 */ 78 */
83static inline unsigned int get_timer_irqs(int cpu) 79static inline unsigned int get_timer_irqs(int cpu)
84{ 80{
85#ifdef CONFIG_X86_64
86 return read_pda(apic_timer_irqs) + read_pda(irq0_irqs);
87#else
88 return per_cpu(irq_stat, cpu).apic_timer_irqs + 81 return per_cpu(irq_stat, cpu).apic_timer_irqs +
89 per_cpu(irq_stat, cpu).irq0_irqs; 82 per_cpu(irq_stat, cpu).irq0_irqs;
90#endif
91} 83}
92 84
93#ifdef CONFIG_SMP 85#ifdef CONFIG_SMP
diff --git a/arch/x86/kernel/numaq_32.c b/arch/x86/kernel/numaq_32.c
index f2191d4f2717..0cc41a1d2550 100644
--- a/arch/x86/kernel/numaq_32.c
+++ b/arch/x86/kernel/numaq_32.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2002, IBM Corp. 4 * Copyright (C) 2002, IBM Corp.
5 * 5 *
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 9 * it under the terms of the GNU General Public License as published by
@@ -23,17 +23,18 @@
23 * Send feedback to <gone@us.ibm.com> 23 * Send feedback to <gone@us.ibm.com>
24 */ 24 */
25 25
26#include <linux/mm.h> 26#include <linux/nodemask.h>
27#include <linux/bootmem.h> 27#include <linux/bootmem.h>
28#include <linux/mmzone.h> 28#include <linux/mmzone.h>
29#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/nodemask.h> 30#include <linux/mm.h>
31#include <asm/numaq.h> 31
32#include <asm/topology.h>
33#include <asm/processor.h> 32#include <asm/processor.h>
33#include <asm/topology.h>
34#include <asm/genapic.h> 34#include <asm/genapic.h>
35#include <asm/e820.h> 35#include <asm/numaq.h>
36#include <asm/setup.h> 36#include <asm/setup.h>
37#include <asm/e820.h>
37 38
38#define MB_TO_PAGES(addr) ((addr) << (20 - PAGE_SHIFT)) 39#define MB_TO_PAGES(addr) ((addr) << (20 - PAGE_SHIFT))
39 40
@@ -91,19 +92,20 @@ static int __init numaq_pre_time_init(void)
91} 92}
92 93
93int found_numaq; 94int found_numaq;
95
94/* 96/*
95 * Have to match translation table entries to main table entries by counter 97 * Have to match translation table entries to main table entries by counter
96 * hence the mpc_record variable .... can't see a less disgusting way of 98 * hence the mpc_record variable .... can't see a less disgusting way of
97 * doing this .... 99 * doing this ....
98 */ 100 */
99struct mpc_config_translation { 101struct mpc_config_translation {
100 unsigned char mpc_type; 102 unsigned char mpc_type;
101 unsigned char trans_len; 103 unsigned char trans_len;
102 unsigned char trans_type; 104 unsigned char trans_type;
103 unsigned char trans_quad; 105 unsigned char trans_quad;
104 unsigned char trans_global; 106 unsigned char trans_global;
105 unsigned char trans_local; 107 unsigned char trans_local;
106 unsigned short trans_reserved; 108 unsigned short trans_reserved;
107}; 109};
108 110
109/* x86_quirks member */ 111/* x86_quirks member */
@@ -236,7 +238,7 @@ static int __init numaq_setup_ioapic_ids(void)
236 238
237static int __init numaq_update_genapic(void) 239static int __init numaq_update_genapic(void)
238{ 240{
239 genapic->wakeup_cpu = wakeup_secondary_cpu_via_nmi; 241 apic->wakeup_cpu = wakeup_secondary_cpu_via_nmi;
240 242
241 return 0; 243 return 0;
242} 244}
@@ -291,3 +293,280 @@ int __init get_memcfg_numaq(void)
291 smp_dump_qct(); 293 smp_dump_qct();
292 return 1; 294 return 1;
293} 295}
296
297/*
298 * APIC driver for the IBM NUMAQ chipset.
299 */
300#define APIC_DEFINITION 1
301#include <linux/threads.h>
302#include <linux/cpumask.h>
303#include <asm/mpspec.h>
304#include <asm/genapic.h>
305#include <asm/fixmap.h>
306#include <asm/apicdef.h>
307#include <asm/ipi.h>
308#include <linux/kernel.h>
309#include <linux/string.h>
310#include <linux/init.h>
311#include <linux/numa.h>
312#include <linux/smp.h>
313#include <asm/numaq.h>
314#include <asm/io.h>
315#include <linux/mmzone.h>
316#include <linux/nodemask.h>
317
318#define NUMAQ_APIC_DFR_VALUE (APIC_DFR_CLUSTER)
319
320static inline unsigned int numaq_get_apic_id(unsigned long x)
321{
322 return (x >> 24) & 0x0F;
323}
324
325static inline void numaq_send_IPI_mask(const struct cpumask *mask, int vector)
326{
327 default_send_IPI_mask_sequence_logical(mask, vector);
328}
329
330static inline void numaq_send_IPI_allbutself(int vector)
331{
332 default_send_IPI_mask_allbutself_logical(cpu_online_mask, vector);
333}
334
335static inline void numaq_send_IPI_all(int vector)
336{
337 numaq_send_IPI_mask(cpu_online_mask, vector);
338}
339
340extern void numaq_mps_oem_check(struct mpc_table *, char *, char *);
341
342#define NUMAQ_TRAMPOLINE_PHYS_LOW (0x8)
343#define NUMAQ_TRAMPOLINE_PHYS_HIGH (0xa)
344
345/*
346 * Because we use NMIs rather than the INIT-STARTUP sequence to
347 * bootstrap the CPUs, the APIC may be in a weird state. Kick it:
348 */
349static inline void numaq_smp_callin_clear_local_apic(void)
350{
351 clear_local_APIC();
352}
353
354static inline void
355numaq_store_NMI_vector(unsigned short *high, unsigned short *low)
356{
357 printk("Storing NMI vector\n");
358 *high =
359 *((volatile unsigned short *)phys_to_virt(NUMAQ_TRAMPOLINE_PHYS_HIGH));
360 *low =
361 *((volatile unsigned short *)phys_to_virt(NUMAQ_TRAMPOLINE_PHYS_LOW));
362}
363
364static inline const cpumask_t *numaq_target_cpus(void)
365{
366 return &CPU_MASK_ALL;
367}
368
369static inline unsigned long
370numaq_check_apicid_used(physid_mask_t bitmap, int apicid)
371{
372 return physid_isset(apicid, bitmap);
373}
374
375static inline unsigned long numaq_check_apicid_present(int bit)
376{
377 return physid_isset(bit, phys_cpu_present_map);
378}
379
380#define apicid_cluster(apicid) (apicid & 0xF0)
381
382static inline int numaq_apic_id_registered(void)
383{
384 return 1;
385}
386
387static inline void numaq_init_apic_ldr(void)
388{
389 /* Already done in NUMA-Q firmware */
390}
391
392static inline void numaq_setup_apic_routing(void)
393{
394 printk("Enabling APIC mode: %s. Using %d I/O APICs\n",
395 "NUMA-Q", nr_ioapics);
396}
397
398/*
399 * Skip adding the timer int on secondary nodes, which causes
400 * a small but painful rift in the time-space continuum.
401 */
402static inline int numaq_multi_timer_check(int apic, int irq)
403{
404 return apic != 0 && irq == 0;
405}
406
407static inline physid_mask_t numaq_ioapic_phys_id_map(physid_mask_t phys_map)
408{
409 /* We don't have a good way to do this yet - hack */
410 return physids_promote(0xFUL);
411}
412
413/* Mapping from cpu number to logical apicid */
414extern u8 cpu_2_logical_apicid[];
415
416static inline int numaq_cpu_to_logical_apicid(int cpu)
417{
418 if (cpu >= nr_cpu_ids)
419 return BAD_APICID;
420 return (int)cpu_2_logical_apicid[cpu];
421}
422
423/*
424 * Supporting over 60 cpus on NUMA-Q requires a locality-dependent
425 * cpu to APIC ID relation to properly interact with the intelligent
426 * mode of the cluster controller.
427 */
428static inline int numaq_cpu_present_to_apicid(int mps_cpu)
429{
430 if (mps_cpu < 60)
431 return ((mps_cpu >> 2) << 4) | (1 << (mps_cpu & 0x3));
432 else
433 return BAD_APICID;
434}
435
436static inline int numaq_apicid_to_node(int logical_apicid)
437{
438 return logical_apicid >> 4;
439}
440
441static inline physid_mask_t numaq_apicid_to_cpu_present(int logical_apicid)
442{
443 int node = numaq_apicid_to_node(logical_apicid);
444 int cpu = __ffs(logical_apicid & 0xf);
445
446 return physid_mask_of_physid(cpu + 4*node);
447}
448
449/* Where the IO area was mapped on multiquad, always 0 otherwise */
450void *xquad_portio;
451
452static inline int numaq_check_phys_apicid_present(int boot_cpu_physical_apicid)
453{
454 return 1;
455}
456
457/*
458 * We use physical apicids here, not logical, so just return the default
459 * physical broadcast to stop people from breaking us
460 */
461static inline unsigned int numaq_cpu_mask_to_apicid(const cpumask_t *cpumask)
462{
463 return 0x0F;
464}
465
466static inline unsigned int
467numaq_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
468 const struct cpumask *andmask)
469{
470 return 0x0F;
471}
472
473/* No NUMA-Q box has a HT CPU, but it can't hurt to use the default code. */
474static inline int numaq_phys_pkg_id(int cpuid_apic, int index_msb)
475{
476 return cpuid_apic >> index_msb;
477}
478static int __numaq_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
479{
480 numaq_mps_oem_check(mpc, oem, productid);
481 return found_numaq;
482}
483
484static int probe_numaq(void)
485{
486 /* already know from get_memcfg_numaq() */
487 return found_numaq;
488}
489
490static void numaq_vector_allocation_domain(int cpu, cpumask_t *retmask)
491{
492 /* Careful. Some cpus do not strictly honor the set of cpus
493 * specified in the interrupt destination when using lowest
494 * priority interrupt delivery mode.
495 *
496 * In particular there was a hyperthreading cpu observed to
497 * deliver interrupts to the wrong hyperthread when only one
498 * hyperthread was specified in the interrupt desitination.
499 */
500 *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
501}
502
503static void numaq_setup_portio_remap(void)
504{
505 int num_quads = num_online_nodes();
506
507 if (num_quads <= 1)
508 return;
509
510 printk("Remapping cross-quad port I/O for %d quads\n", num_quads);
511 xquad_portio = ioremap(XQUAD_PORTIO_BASE, num_quads*XQUAD_PORTIO_QUAD);
512 printk("xquad_portio vaddr 0x%08lx, len %08lx\n",
513 (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD);
514}
515
516struct genapic apic_numaq = {
517
518 .name = "NUMAQ",
519 .probe = probe_numaq,
520 .acpi_madt_oem_check = NULL,
521 .apic_id_registered = numaq_apic_id_registered,
522
523 .irq_delivery_mode = dest_LowestPrio,
524 /* physical delivery on LOCAL quad: */
525 .irq_dest_mode = 0,
526
527 .target_cpus = numaq_target_cpus,
528 .disable_esr = 1,
529 .dest_logical = APIC_DEST_LOGICAL,
530 .check_apicid_used = numaq_check_apicid_used,
531 .check_apicid_present = numaq_check_apicid_present,
532
533 .vector_allocation_domain = numaq_vector_allocation_domain,
534 .init_apic_ldr = numaq_init_apic_ldr,
535
536 .ioapic_phys_id_map = numaq_ioapic_phys_id_map,
537 .setup_apic_routing = numaq_setup_apic_routing,
538 .multi_timer_check = numaq_multi_timer_check,
539 .apicid_to_node = numaq_apicid_to_node,
540 .cpu_to_logical_apicid = numaq_cpu_to_logical_apicid,
541 .cpu_present_to_apicid = numaq_cpu_present_to_apicid,
542 .apicid_to_cpu_present = numaq_apicid_to_cpu_present,
543 .setup_portio_remap = numaq_setup_portio_remap,
544 .check_phys_apicid_present = numaq_check_phys_apicid_present,
545 .enable_apic_mode = NULL,
546 .phys_pkg_id = numaq_phys_pkg_id,
547 .mps_oem_check = __numaq_mps_oem_check,
548
549 .get_apic_id = numaq_get_apic_id,
550 .set_apic_id = NULL,
551 .apic_id_mask = 0x0F << 24,
552
553 .cpu_mask_to_apicid = numaq_cpu_mask_to_apicid,
554 .cpu_mask_to_apicid_and = numaq_cpu_mask_to_apicid_and,
555
556 .send_IPI_mask = numaq_send_IPI_mask,
557 .send_IPI_mask_allbutself = NULL,
558 .send_IPI_allbutself = numaq_send_IPI_allbutself,
559 .send_IPI_all = numaq_send_IPI_all,
560 .send_IPI_self = default_send_IPI_self,
561
562 .wakeup_cpu = NULL,
563 .trampoline_phys_low = NUMAQ_TRAMPOLINE_PHYS_LOW,
564 .trampoline_phys_high = NUMAQ_TRAMPOLINE_PHYS_HIGH,
565
566 /* We don't do anything here because we use NMI's to boot instead */
567 .wait_for_init_deassert = NULL,
568
569 .smp_callin_clear_local_apic = numaq_smp_callin_clear_local_apic,
570 .store_NMI_vector = numaq_store_NMI_vector,
571 .inquire_remote_apic = NULL,
572};
diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
index 95777b0faa73..3a7c5a44082e 100644
--- a/arch/x86/kernel/paravirt-spinlocks.c
+++ b/arch/x86/kernel/paravirt-spinlocks.c
@@ -26,13 +26,3 @@ struct pv_lock_ops pv_lock_ops = {
26}; 26};
27EXPORT_SYMBOL(pv_lock_ops); 27EXPORT_SYMBOL(pv_lock_ops);
28 28
29void __init paravirt_use_bytelocks(void)
30{
31#ifdef CONFIG_SMP
32 pv_lock_ops.spin_is_locked = __byte_spin_is_locked;
33 pv_lock_ops.spin_is_contended = __byte_spin_is_contended;
34 pv_lock_ops.spin_lock = __byte_spin_lock;
35 pv_lock_ops.spin_trylock = __byte_spin_trylock;
36 pv_lock_ops.spin_unlock = __byte_spin_unlock;
37#endif
38}
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index e4c8fb608873..cea11c8e3049 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -44,6 +44,17 @@ void _paravirt_nop(void)
44{ 44{
45} 45}
46 46
47/* identity function, which can be inlined */
48u32 _paravirt_ident_32(u32 x)
49{
50 return x;
51}
52
53u64 _paravirt_ident_64(u64 x)
54{
55 return x;
56}
57
47static void __init default_banner(void) 58static void __init default_banner(void)
48{ 59{
49 printk(KERN_INFO "Booting paravirtualized kernel on %s\n", 60 printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
@@ -138,9 +149,16 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
138 if (opfunc == NULL) 149 if (opfunc == NULL)
139 /* If there's no function, patch it with a ud2a (BUG) */ 150 /* If there's no function, patch it with a ud2a (BUG) */
140 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a)); 151 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
141 else if (opfunc == paravirt_nop) 152 else if (opfunc == _paravirt_nop)
142 /* If the operation is a nop, then nop the callsite */ 153 /* If the operation is a nop, then nop the callsite */
143 ret = paravirt_patch_nop(); 154 ret = paravirt_patch_nop();
155
156 /* identity functions just return their single argument */
157 else if (opfunc == _paravirt_ident_32)
158 ret = paravirt_patch_ident_32(insnbuf, len);
159 else if (opfunc == _paravirt_ident_64)
160 ret = paravirt_patch_ident_64(insnbuf, len);
161
144 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) || 162 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
145 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) || 163 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
146 type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret32) || 164 type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret32) ||
@@ -292,10 +310,10 @@ struct pv_time_ops pv_time_ops = {
292 310
293struct pv_irq_ops pv_irq_ops = { 311struct pv_irq_ops pv_irq_ops = {
294 .init_IRQ = native_init_IRQ, 312 .init_IRQ = native_init_IRQ,
295 .save_fl = native_save_fl, 313 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
296 .restore_fl = native_restore_fl, 314 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
297 .irq_disable = native_irq_disable, 315 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
298 .irq_enable = native_irq_enable, 316 .irq_enable = __PV_IS_CALLEE_SAVE(native_irq_enable),
299 .safe_halt = native_safe_halt, 317 .safe_halt = native_safe_halt,
300 .halt = native_halt, 318 .halt = native_halt,
301#ifdef CONFIG_X86_64 319#ifdef CONFIG_X86_64
@@ -373,6 +391,14 @@ struct pv_apic_ops pv_apic_ops = {
373#endif 391#endif
374}; 392};
375 393
394#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
395/* 32-bit pagetable entries */
396#define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
397#else
398/* 64-bit pagetable entries */
399#define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
400#endif
401
376struct pv_mmu_ops pv_mmu_ops = { 402struct pv_mmu_ops pv_mmu_ops = {
377#ifndef CONFIG_X86_64 403#ifndef CONFIG_X86_64
378 .pagetable_setup_start = native_pagetable_setup_start, 404 .pagetable_setup_start = native_pagetable_setup_start,
@@ -424,22 +450,23 @@ struct pv_mmu_ops pv_mmu_ops = {
424 .pmd_clear = native_pmd_clear, 450 .pmd_clear = native_pmd_clear,
425#endif 451#endif
426 .set_pud = native_set_pud, 452 .set_pud = native_set_pud,
427 .pmd_val = native_pmd_val, 453
428 .make_pmd = native_make_pmd, 454 .pmd_val = PTE_IDENT,
455 .make_pmd = PTE_IDENT,
429 456
430#if PAGETABLE_LEVELS == 4 457#if PAGETABLE_LEVELS == 4
431 .pud_val = native_pud_val, 458 .pud_val = PTE_IDENT,
432 .make_pud = native_make_pud, 459 .make_pud = PTE_IDENT,
460
433 .set_pgd = native_set_pgd, 461 .set_pgd = native_set_pgd,
434#endif 462#endif
435#endif /* PAGETABLE_LEVELS >= 3 */ 463#endif /* PAGETABLE_LEVELS >= 3 */
436 464
437 .pte_val = native_pte_val, 465 .pte_val = PTE_IDENT,
438 .pte_flags = native_pte_flags, 466 .pgd_val = PTE_IDENT,
439 .pgd_val = native_pgd_val,
440 467
441 .make_pte = native_make_pte, 468 .make_pte = PTE_IDENT,
442 .make_pgd = native_make_pgd, 469 .make_pgd = PTE_IDENT,
443 470
444 .dup_mmap = paravirt_nop, 471 .dup_mmap = paravirt_nop,
445 .exit_mmap = paravirt_nop, 472 .exit_mmap = paravirt_nop,
diff --git a/arch/x86/kernel/paravirt_patch_32.c b/arch/x86/kernel/paravirt_patch_32.c
index 9fe644f4861d..d9f32e6d6ab6 100644
--- a/arch/x86/kernel/paravirt_patch_32.c
+++ b/arch/x86/kernel/paravirt_patch_32.c
@@ -12,6 +12,18 @@ DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %cr3, %eax");
12DEF_NATIVE(pv_cpu_ops, clts, "clts"); 12DEF_NATIVE(pv_cpu_ops, clts, "clts");
13DEF_NATIVE(pv_cpu_ops, read_tsc, "rdtsc"); 13DEF_NATIVE(pv_cpu_ops, read_tsc, "rdtsc");
14 14
15unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
16{
17 /* arg in %eax, return in %eax */
18 return 0;
19}
20
21unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len)
22{
23 /* arg in %edx:%eax, return in %edx:%eax */
24 return 0;
25}
26
15unsigned native_patch(u8 type, u16 clobbers, void *ibuf, 27unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
16 unsigned long addr, unsigned len) 28 unsigned long addr, unsigned len)
17{ 29{
diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c
index 061d01df9ae6..3f08f34f93eb 100644
--- a/arch/x86/kernel/paravirt_patch_64.c
+++ b/arch/x86/kernel/paravirt_patch_64.c
@@ -19,6 +19,21 @@ DEF_NATIVE(pv_cpu_ops, usergs_sysret64, "swapgs; sysretq");
19DEF_NATIVE(pv_cpu_ops, usergs_sysret32, "swapgs; sysretl"); 19DEF_NATIVE(pv_cpu_ops, usergs_sysret32, "swapgs; sysretl");
20DEF_NATIVE(pv_cpu_ops, swapgs, "swapgs"); 20DEF_NATIVE(pv_cpu_ops, swapgs, "swapgs");
21 21
22DEF_NATIVE(, mov32, "mov %edi, %eax");
23DEF_NATIVE(, mov64, "mov %rdi, %rax");
24
25unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
26{
27 return paravirt_patch_insns(insnbuf, len,
28 start__mov32, end__mov32);
29}
30
31unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len)
32{
33 return paravirt_patch_insns(insnbuf, len,
34 start__mov64, end__mov64);
35}
36
22unsigned native_patch(u8 type, u16 clobbers, void *ibuf, 37unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
23 unsigned long addr, unsigned len) 38 unsigned long addr, unsigned len)
24{ 39{
diff --git a/arch/x86/kernel/probe_32.c b/arch/x86/kernel/probe_32.c
new file mode 100644
index 000000000000..22337b75de62
--- /dev/null
+++ b/arch/x86/kernel/probe_32.c
@@ -0,0 +1,411 @@
1/*
2 * Default generic APIC driver. This handles up to 8 CPUs.
3 *
4 * Copyright 2003 Andi Kleen, SuSE Labs.
5 * Subject to the GNU Public License, v.2
6 *
7 * Generic x86 APIC driver probe layer.
8 */
9#include <linux/threads.h>
10#include <linux/cpumask.h>
11#include <linux/string.h>
12#include <linux/kernel.h>
13#include <linux/ctype.h>
14#include <linux/init.h>
15#include <linux/errno.h>
16#include <asm/fixmap.h>
17#include <asm/mpspec.h>
18#include <asm/apicdef.h>
19#include <asm/genapic.h>
20#include <asm/setup.h>
21
22#include <linux/threads.h>
23#include <linux/cpumask.h>
24#include <asm/mpspec.h>
25#include <asm/genapic.h>
26#include <asm/fixmap.h>
27#include <asm/apicdef.h>
28#include <linux/kernel.h>
29#include <linux/string.h>
30#include <linux/smp.h>
31#include <linux/init.h>
32#include <asm/genapic.h>
33#include <asm/ipi.h>
34
35#include <linux/smp.h>
36#include <linux/init.h>
37#include <linux/interrupt.h>
38#include <asm/acpi.h>
39#include <asm/arch_hooks.h>
40#include <asm/e820.h>
41#include <asm/setup.h>
42
43#include <asm/genapic.h>
44
45#ifdef CONFIG_HOTPLUG_CPU
46#define DEFAULT_SEND_IPI (1)
47#else
48#define DEFAULT_SEND_IPI (0)
49#endif
50
51int no_broadcast = DEFAULT_SEND_IPI;
52
53#ifdef CONFIG_X86_LOCAL_APIC
54
55static void default_vector_allocation_domain(int cpu, struct cpumask *retmask)
56{
57 /*
58 * Careful. Some cpus do not strictly honor the set of cpus
59 * specified in the interrupt destination when using lowest
60 * priority interrupt delivery mode.
61 *
62 * In particular there was a hyperthreading cpu observed to
63 * deliver interrupts to the wrong hyperthread when only one
64 * hyperthread was specified in the interrupt desitination.
65 */
66 *retmask = (cpumask_t) { { [0] = APIC_ALL_CPUS } };
67}
68
69/* should be called last. */
70static int probe_default(void)
71{
72 return 1;
73}
74
75struct genapic apic_default = {
76
77 .name = "default",
78 .probe = probe_default,
79 .acpi_madt_oem_check = NULL,
80 .apic_id_registered = default_apic_id_registered,
81
82 .irq_delivery_mode = dest_LowestPrio,
83 /* logical delivery broadcast to all CPUs: */
84 .irq_dest_mode = 1,
85
86 .target_cpus = default_target_cpus,
87 .disable_esr = 0,
88 .dest_logical = APIC_DEST_LOGICAL,
89 .check_apicid_used = default_check_apicid_used,
90 .check_apicid_present = default_check_apicid_present,
91
92 .vector_allocation_domain = default_vector_allocation_domain,
93 .init_apic_ldr = default_init_apic_ldr,
94
95 .ioapic_phys_id_map = default_ioapic_phys_id_map,
96 .setup_apic_routing = default_setup_apic_routing,
97 .multi_timer_check = NULL,
98 .apicid_to_node = default_apicid_to_node,
99 .cpu_to_logical_apicid = default_cpu_to_logical_apicid,
100 .cpu_present_to_apicid = default_cpu_present_to_apicid,
101 .apicid_to_cpu_present = default_apicid_to_cpu_present,
102 .setup_portio_remap = NULL,
103 .check_phys_apicid_present = default_check_phys_apicid_present,
104 .enable_apic_mode = NULL,
105 .phys_pkg_id = default_phys_pkg_id,
106 .mps_oem_check = NULL,
107
108 .get_apic_id = default_get_apic_id,
109 .set_apic_id = NULL,
110 .apic_id_mask = 0x0F << 24,
111
112 .cpu_mask_to_apicid = default_cpu_mask_to_apicid,
113 .cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
114
115 .send_IPI_mask = default_send_IPI_mask_logical,
116 .send_IPI_mask_allbutself = default_send_IPI_mask_allbutself_logical,
117 .send_IPI_allbutself = default_send_IPI_allbutself,
118 .send_IPI_all = default_send_IPI_all,
119 .send_IPI_self = default_send_IPI_self,
120
121 .wakeup_cpu = NULL,
122 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
123 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
124
125 .wait_for_init_deassert = default_wait_for_init_deassert,
126
127 .smp_callin_clear_local_apic = NULL,
128 .store_NMI_vector = NULL,
129 .inquire_remote_apic = default_inquire_remote_apic,
130};
131
132extern struct genapic apic_numaq;
133extern struct genapic apic_summit;
134extern struct genapic apic_bigsmp;
135extern struct genapic apic_es7000;
136extern struct genapic apic_default;
137
138struct genapic *apic = &apic_default;
139
140static struct genapic *apic_probe[] __initdata = {
141#ifdef CONFIG_X86_NUMAQ
142 &apic_numaq,
143#endif
144#ifdef CONFIG_X86_SUMMIT
145 &apic_summit,
146#endif
147#ifdef CONFIG_X86_BIGSMP
148 &apic_bigsmp,
149#endif
150#ifdef CONFIG_X86_ES7000
151 &apic_es7000,
152#endif
153 &apic_default, /* must be last */
154 NULL,
155};
156
157static int cmdline_apic __initdata;
158static int __init parse_apic(char *arg)
159{
160 int i;
161
162 if (!arg)
163 return -EINVAL;
164
165 for (i = 0; apic_probe[i]; i++) {
166 if (!strcmp(apic_probe[i]->name, arg)) {
167 apic = apic_probe[i];
168 cmdline_apic = 1;
169 return 0;
170 }
171 }
172
173 if (x86_quirks->update_genapic)
174 x86_quirks->update_genapic();
175
176 /* Parsed again by __setup for debug/verbose */
177 return 0;
178}
179early_param("apic", parse_apic);
180
181void __init generic_bigsmp_probe(void)
182{
183#ifdef CONFIG_X86_BIGSMP
184 /*
185 * This routine is used to switch to bigsmp mode when
186 * - There is no apic= option specified by the user
187 * - generic_apic_probe() has chosen apic_default as the sub_arch
188 * - we find more than 8 CPUs in acpi LAPIC listing with xAPIC support
189 */
190
191 if (!cmdline_apic && apic == &apic_default) {
192 if (apic_bigsmp.probe()) {
193 apic = &apic_bigsmp;
194 if (x86_quirks->update_genapic)
195 x86_quirks->update_genapic();
196 printk(KERN_INFO "Overriding APIC driver with %s\n",
197 apic->name);
198 }
199 }
200#endif
201}
202
203void __init generic_apic_probe(void)
204{
205 if (!cmdline_apic) {
206 int i;
207 for (i = 0; apic_probe[i]; i++) {
208 if (apic_probe[i]->probe()) {
209 apic = apic_probe[i];
210 break;
211 }
212 }
213 /* Not visible without early console */
214 if (!apic_probe[i])
215 panic("Didn't find an APIC driver");
216
217 if (x86_quirks->update_genapic)
218 x86_quirks->update_genapic();
219 }
220 printk(KERN_INFO "Using APIC driver %s\n", apic->name);
221}
222
223/* These functions can switch the APIC even after the initial ->probe() */
224
225int __init
226generic_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
227{
228 int i;
229
230 for (i = 0; apic_probe[i]; ++i) {
231 if (!apic_probe[i]->mps_oem_check)
232 continue;
233 if (!apic_probe[i]->mps_oem_check(mpc, oem, productid))
234 continue;
235
236 if (!cmdline_apic) {
237 apic = apic_probe[i];
238 if (x86_quirks->update_genapic)
239 x86_quirks->update_genapic();
240 printk(KERN_INFO "Switched to APIC driver `%s'.\n",
241 apic->name);
242 }
243 return 1;
244 }
245 return 0;
246}
247
248int __init default_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
249{
250 int i;
251
252 for (i = 0; apic_probe[i]; ++i) {
253 if (!apic_probe[i]->acpi_madt_oem_check)
254 continue;
255 if (!apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id))
256 continue;
257
258 if (!cmdline_apic) {
259 apic = apic_probe[i];
260 if (x86_quirks->update_genapic)
261 x86_quirks->update_genapic();
262 printk(KERN_INFO "Switched to APIC driver `%s'.\n",
263 apic->name);
264 }
265 return 1;
266 }
267 return 0;
268}
269
270#endif /* CONFIG_X86_LOCAL_APIC */
271
272/**
273 * pre_intr_init_hook - initialisation prior to setting up interrupt vectors
274 *
275 * Description:
276 * Perform any necessary interrupt initialisation prior to setting up
277 * the "ordinary" interrupt call gates. For legacy reasons, the ISA
278 * interrupts should be initialised here if the machine emulates a PC
279 * in any way.
280 **/
281void __init pre_intr_init_hook(void)
282{
283 if (x86_quirks->arch_pre_intr_init) {
284 if (x86_quirks->arch_pre_intr_init())
285 return;
286 }
287 init_ISA_irqs();
288}
289
290/**
291 * intr_init_hook - post gate setup interrupt initialisation
292 *
293 * Description:
294 * Fill in any interrupts that may have been left out by the general
295 * init_IRQ() routine. interrupts having to do with the machine rather
296 * than the devices on the I/O bus (like APIC interrupts in intel MP
297 * systems) are started here.
298 **/
299void __init intr_init_hook(void)
300{
301 if (x86_quirks->arch_intr_init) {
302 if (x86_quirks->arch_intr_init())
303 return;
304 }
305}
306
307/**
308 * pre_setup_arch_hook - hook called prior to any setup_arch() execution
309 *
310 * Description:
311 * generally used to activate any machine specific identification
312 * routines that may be needed before setup_arch() runs. On Voyager
313 * this is used to get the board revision and type.
314 **/
315void __init pre_setup_arch_hook(void)
316{
317}
318
319/**
320 * trap_init_hook - initialise system specific traps
321 *
322 * Description:
323 * Called as the final act of trap_init(). Used in VISWS to initialise
324 * the various board specific APIC traps.
325 **/
326void __init trap_init_hook(void)
327{
328 if (x86_quirks->arch_trap_init) {
329 if (x86_quirks->arch_trap_init())
330 return;
331 }
332}
333
334static struct irqaction irq0 = {
335 .handler = timer_interrupt,
336 .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_IRQPOLL,
337 .mask = CPU_MASK_NONE,
338 .name = "timer"
339};
340
341/**
342 * pre_time_init_hook - do any specific initialisations before.
343 *
344 **/
345void __init pre_time_init_hook(void)
346{
347 if (x86_quirks->arch_pre_time_init)
348 x86_quirks->arch_pre_time_init();
349}
350
351/**
352 * time_init_hook - do any specific initialisations for the system timer.
353 *
354 * Description:
355 * Must plug the system timer interrupt source at HZ into the IRQ listed
356 * in irq_vectors.h:TIMER_IRQ
357 **/
358void __init time_init_hook(void)
359{
360 if (x86_quirks->arch_time_init) {
361 /*
362 * A nonzero return code does not mean failure, it means
363 * that the architecture quirk does not want any
364 * generic (timer) setup to be performed after this:
365 */
366 if (x86_quirks->arch_time_init())
367 return;
368 }
369
370 irq0.mask = cpumask_of_cpu(0);
371 setup_irq(0, &irq0);
372}
373
374#ifdef CONFIG_MCA
375/**
376 * mca_nmi_hook - hook into MCA specific NMI chain
377 *
378 * Description:
379 * The MCA (Microchannel Architecture) has an NMI chain for NMI sources
380 * along the MCA bus. Use this to hook into that chain if you will need
381 * it.
382 **/
383void mca_nmi_hook(void)
384{
385 /*
386 * If I recall correctly, there's a whole bunch of other things that
387 * we can do to check for NMI problems, but that's all I know about
388 * at the moment.
389 */
390 pr_warning("NMI generated from unknown source!\n");
391}
392#endif
393
394static __init int no_ipi_broadcast(char *str)
395{
396 get_option(&str, &no_broadcast);
397 pr_info("Using %s mode\n",
398 no_broadcast ? "No IPI Broadcast" : "IPI Broadcast");
399 return 1;
400}
401__setup("no_ipi_broadcast=", no_ipi_broadcast);
402
403static int __init print_ipi_mode(void)
404{
405 pr_info("Using IPI %s mode\n",
406 no_broadcast ? "No-Shortcut" : "Shortcut");
407 return 0;
408}
409
410late_initcall(print_ipi_mode);
411
diff --git a/arch/x86/kernel/probe_roms_32.c b/arch/x86/kernel/probe_roms_32.c
index 675a48c404a5..071e7fea42e5 100644
--- a/arch/x86/kernel/probe_roms_32.c
+++ b/arch/x86/kernel/probe_roms_32.c
@@ -18,7 +18,7 @@
18#include <asm/setup.h> 18#include <asm/setup.h>
19#include <asm/sections.h> 19#include <asm/sections.h>
20#include <asm/io.h> 20#include <asm/io.h>
21#include <setup_arch.h> 21#include <asm/setup_arch.h>
22 22
23static struct resource system_rom_resource = { 23static struct resource system_rom_resource = {
24 .name = "System ROM", 24 .name = "System ROM",
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 6d12f7e37f8c..87b69d4fac16 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -350,7 +350,7 @@ static void c1e_idle(void)
350 350
351void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) 351void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
352{ 352{
353#ifdef CONFIG_X86_SMP 353#ifdef CONFIG_SMP
354 if (pm_idle == poll_idle && smp_num_siblings > 1) { 354 if (pm_idle == poll_idle && smp_num_siblings > 1) {
355 printk(KERN_WARNING "WARNING: polling idle and HT enabled," 355 printk(KERN_WARNING "WARNING: polling idle and HT enabled,"
356 " performance may degrade.\n"); 356 " performance may degrade.\n");
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index a546f55c77b4..fec79ad85dc6 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -11,6 +11,7 @@
11 11
12#include <stdarg.h> 12#include <stdarg.h>
13 13
14#include <linux/stackprotector.h>
14#include <linux/cpu.h> 15#include <linux/cpu.h>
15#include <linux/errno.h> 16#include <linux/errno.h>
16#include <linux/sched.h> 17#include <linux/sched.h>
@@ -66,9 +67,6 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
66DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; 67DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
67EXPORT_PER_CPU_SYMBOL(current_task); 68EXPORT_PER_CPU_SYMBOL(current_task);
68 69
69DEFINE_PER_CPU(int, cpu_number);
70EXPORT_PER_CPU_SYMBOL(cpu_number);
71
72/* 70/*
73 * Return saved PC of a blocked thread. 71 * Return saved PC of a blocked thread.
74 */ 72 */
@@ -94,6 +92,15 @@ void cpu_idle(void)
94{ 92{
95 int cpu = smp_processor_id(); 93 int cpu = smp_processor_id();
96 94
95 /*
96 * If we're the non-boot CPU, nothing set the stack canary up
97 * for us. CPU0 already has it initialized but no harm in
98 * doing it again. This is a good place for updating it, as
99 * we wont ever return from this function (so the invalid
100 * canaries already on the stack wont ever trigger).
101 */
102 boot_init_stack_canary();
103
97 current_thread_info()->status |= TS_POLLING; 104 current_thread_info()->status |= TS_POLLING;
98 105
99 /* endless idle loop with no priority at all */ 106 /* endless idle loop with no priority at all */
@@ -111,7 +118,6 @@ void cpu_idle(void)
111 play_dead(); 118 play_dead();
112 119
113 local_irq_disable(); 120 local_irq_disable();
114 __get_cpu_var(irq_stat).idle_timestamp = jiffies;
115 /* Don't trace irqs off for idle */ 121 /* Don't trace irqs off for idle */
116 stop_critical_timings(); 122 stop_critical_timings();
117 pm_idle(); 123 pm_idle();
@@ -135,7 +141,7 @@ void __show_regs(struct pt_regs *regs, int all)
135 if (user_mode_vm(regs)) { 141 if (user_mode_vm(regs)) {
136 sp = regs->sp; 142 sp = regs->sp;
137 ss = regs->ss & 0xffff; 143 ss = regs->ss & 0xffff;
138 savesegment(gs, gs); 144 gs = get_user_gs(regs);
139 } else { 145 } else {
140 sp = (unsigned long) (&regs->sp); 146 sp = (unsigned long) (&regs->sp);
141 savesegment(ss, ss); 147 savesegment(ss, ss);
@@ -216,6 +222,7 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
216 regs.ds = __USER_DS; 222 regs.ds = __USER_DS;
217 regs.es = __USER_DS; 223 regs.es = __USER_DS;
218 regs.fs = __KERNEL_PERCPU; 224 regs.fs = __KERNEL_PERCPU;
225 regs.gs = __KERNEL_STACK_CANARY;
219 regs.orig_ax = -1; 226 regs.orig_ax = -1;
220 regs.ip = (unsigned long) kernel_thread_helper; 227 regs.ip = (unsigned long) kernel_thread_helper;
221 regs.cs = __KERNEL_CS | get_kernel_rpl(); 228 regs.cs = __KERNEL_CS | get_kernel_rpl();
@@ -308,7 +315,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
308 315
309 p->thread.ip = (unsigned long) ret_from_fork; 316 p->thread.ip = (unsigned long) ret_from_fork;
310 317
311 savesegment(gs, p->thread.gs); 318 task_user_gs(p) = get_user_gs(regs);
312 319
313 tsk = current; 320 tsk = current;
314 if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) { 321 if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
@@ -346,7 +353,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
346void 353void
347start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) 354start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
348{ 355{
349 __asm__("movl %0, %%gs" : : "r"(0)); 356 set_user_gs(regs, 0);
350 regs->fs = 0; 357 regs->fs = 0;
351 set_fs(USER_DS); 358 set_fs(USER_DS);
352 regs->ds = __USER_DS; 359 regs->ds = __USER_DS;
@@ -543,7 +550,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
543 * used %fs or %gs (it does not today), or if the kernel is 550 * used %fs or %gs (it does not today), or if the kernel is
544 * running inside of a hypervisor layer. 551 * running inside of a hypervisor layer.
545 */ 552 */
546 savesegment(gs, prev->gs); 553 lazy_save_gs(prev->gs);
547 554
548 /* 555 /*
549 * Load the per-thread Thread-Local Storage descriptor. 556 * Load the per-thread Thread-Local Storage descriptor.
@@ -589,31 +596,31 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
589 * Restore %gs if needed (which is common) 596 * Restore %gs if needed (which is common)
590 */ 597 */
591 if (prev->gs | next->gs) 598 if (prev->gs | next->gs)
592 loadsegment(gs, next->gs); 599 lazy_load_gs(next->gs);
593 600
594 x86_write_percpu(current_task, next_p); 601 percpu_write(current_task, next_p);
595 602
596 return prev_p; 603 return prev_p;
597} 604}
598 605
599asmlinkage int sys_fork(struct pt_regs regs) 606int sys_fork(struct pt_regs *regs)
600{ 607{
601 return do_fork(SIGCHLD, regs.sp, &regs, 0, NULL, NULL); 608 return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL);
602} 609}
603 610
604asmlinkage int sys_clone(struct pt_regs regs) 611int sys_clone(struct pt_regs *regs)
605{ 612{
606 unsigned long clone_flags; 613 unsigned long clone_flags;
607 unsigned long newsp; 614 unsigned long newsp;
608 int __user *parent_tidptr, *child_tidptr; 615 int __user *parent_tidptr, *child_tidptr;
609 616
610 clone_flags = regs.bx; 617 clone_flags = regs->bx;
611 newsp = regs.cx; 618 newsp = regs->cx;
612 parent_tidptr = (int __user *)regs.dx; 619 parent_tidptr = (int __user *)regs->dx;
613 child_tidptr = (int __user *)regs.di; 620 child_tidptr = (int __user *)regs->di;
614 if (!newsp) 621 if (!newsp)
615 newsp = regs.sp; 622 newsp = regs->sp;
616 return do_fork(clone_flags, newsp, &regs, 0, parent_tidptr, child_tidptr); 623 return do_fork(clone_flags, newsp, regs, 0, parent_tidptr, child_tidptr);
617} 624}
618 625
619/* 626/*
@@ -626,27 +633,27 @@ asmlinkage int sys_clone(struct pt_regs regs)
626 * do not have enough call-clobbered registers to hold all 633 * do not have enough call-clobbered registers to hold all
627 * the information you need. 634 * the information you need.
628 */ 635 */
629asmlinkage int sys_vfork(struct pt_regs regs) 636int sys_vfork(struct pt_regs *regs)
630{ 637{
631 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.sp, &regs, 0, NULL, NULL); 638 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0, NULL, NULL);
632} 639}
633 640
634/* 641/*
635 * sys_execve() executes a new program. 642 * sys_execve() executes a new program.
636 */ 643 */
637asmlinkage int sys_execve(struct pt_regs regs) 644int sys_execve(struct pt_regs *regs)
638{ 645{
639 int error; 646 int error;
640 char *filename; 647 char *filename;
641 648
642 filename = getname((char __user *) regs.bx); 649 filename = getname((char __user *) regs->bx);
643 error = PTR_ERR(filename); 650 error = PTR_ERR(filename);
644 if (IS_ERR(filename)) 651 if (IS_ERR(filename))
645 goto out; 652 goto out;
646 error = do_execve(filename, 653 error = do_execve(filename,
647 (char __user * __user *) regs.cx, 654 (char __user * __user *) regs->cx,
648 (char __user * __user *) regs.dx, 655 (char __user * __user *) regs->dx,
649 &regs); 656 regs);
650 if (error == 0) { 657 if (error == 0) {
651 /* Make sure we don't return using sysenter.. */ 658 /* Make sure we don't return using sysenter.. */
652 set_thread_flag(TIF_IRET); 659 set_thread_flag(TIF_IRET);
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 85b4cb5c1980..836ef6575f01 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -16,6 +16,7 @@
16 16
17#include <stdarg.h> 17#include <stdarg.h>
18 18
19#include <linux/stackprotector.h>
19#include <linux/cpu.h> 20#include <linux/cpu.h>
20#include <linux/errno.h> 21#include <linux/errno.h>
21#include <linux/sched.h> 22#include <linux/sched.h>
@@ -47,7 +48,6 @@
47#include <asm/processor.h> 48#include <asm/processor.h>
48#include <asm/i387.h> 49#include <asm/i387.h>
49#include <asm/mmu_context.h> 50#include <asm/mmu_context.h>
50#include <asm/pda.h>
51#include <asm/prctl.h> 51#include <asm/prctl.h>
52#include <asm/desc.h> 52#include <asm/desc.h>
53#include <asm/proto.h> 53#include <asm/proto.h>
@@ -58,6 +58,12 @@
58 58
59asmlinkage extern void ret_from_fork(void); 59asmlinkage extern void ret_from_fork(void);
60 60
61DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
62EXPORT_PER_CPU_SYMBOL(current_task);
63
64DEFINE_PER_CPU(unsigned long, old_rsp);
65static DEFINE_PER_CPU(unsigned char, is_idle);
66
61unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED; 67unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
62 68
63static ATOMIC_NOTIFIER_HEAD(idle_notifier); 69static ATOMIC_NOTIFIER_HEAD(idle_notifier);
@@ -76,13 +82,13 @@ EXPORT_SYMBOL_GPL(idle_notifier_unregister);
76 82
77void enter_idle(void) 83void enter_idle(void)
78{ 84{
79 write_pda(isidle, 1); 85 percpu_write(is_idle, 1);
80 atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL); 86 atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
81} 87}
82 88
83static void __exit_idle(void) 89static void __exit_idle(void)
84{ 90{
85 if (test_and_clear_bit_pda(0, isidle) == 0) 91 if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
86 return; 92 return;
87 atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL); 93 atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
88} 94}
@@ -112,6 +118,16 @@ static inline void play_dead(void)
112void cpu_idle(void) 118void cpu_idle(void)
113{ 119{
114 current_thread_info()->status |= TS_POLLING; 120 current_thread_info()->status |= TS_POLLING;
121
122 /*
123 * If we're the non-boot CPU, nothing set the stack canary up
124 * for us. CPU0 already has it initialized but no harm in
125 * doing it again. This is a good place for updating it, as
126 * we wont ever return from this function (so the invalid
127 * canaries already on the stack wont ever trigger).
128 */
129 boot_init_stack_canary();
130
115 /* endless idle loop with no priority at all */ 131 /* endless idle loop with no priority at all */
116 while (1) { 132 while (1) {
117 tick_nohz_stop_sched_tick(1); 133 tick_nohz_stop_sched_tick(1);
@@ -397,7 +413,7 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
397 load_gs_index(0); 413 load_gs_index(0);
398 regs->ip = new_ip; 414 regs->ip = new_ip;
399 regs->sp = new_sp; 415 regs->sp = new_sp;
400 write_pda(oldrsp, new_sp); 416 percpu_write(old_rsp, new_sp);
401 regs->cs = __USER_CS; 417 regs->cs = __USER_CS;
402 regs->ss = __USER_DS; 418 regs->ss = __USER_DS;
403 regs->flags = 0x200; 419 regs->flags = 0x200;
@@ -618,21 +634,13 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
618 /* 634 /*
619 * Switch the PDA and FPU contexts. 635 * Switch the PDA and FPU contexts.
620 */ 636 */
621 prev->usersp = read_pda(oldrsp); 637 prev->usersp = percpu_read(old_rsp);
622 write_pda(oldrsp, next->usersp); 638 percpu_write(old_rsp, next->usersp);
623 write_pda(pcurrent, next_p); 639 percpu_write(current_task, next_p);
624 640
625 write_pda(kernelstack, 641 percpu_write(kernel_stack,
626 (unsigned long)task_stack_page(next_p) + 642 (unsigned long)task_stack_page(next_p) +
627 THREAD_SIZE - PDA_STACKOFFSET); 643 THREAD_SIZE - KERNEL_STACK_OFFSET);
628#ifdef CONFIG_CC_STACKPROTECTOR
629 write_pda(stack_canary, next_p->stack_canary);
630 /*
631 * Build time only check to make sure the stack_canary is at
632 * offset 40 in the pda; this is a gcc ABI requirement
633 */
634 BUILD_BUG_ON(offsetof(struct x8664_pda, stack_canary) != 40);
635#endif
636 644
637 /* 645 /*
638 * Now maybe reload the debug registers and handle I/O bitmaps 646 * Now maybe reload the debug registers and handle I/O bitmaps
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 0a5df5f82fb9..7ec39ab37a2d 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -75,10 +75,7 @@ static inline bool invalid_selector(u16 value)
75static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno) 75static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno)
76{ 76{
77 BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0); 77 BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0);
78 regno >>= 2; 78 return &regs->bx + (regno >> 2);
79 if (regno > FS)
80 --regno;
81 return &regs->bx + regno;
82} 79}
83 80
84static u16 get_segment_reg(struct task_struct *task, unsigned long offset) 81static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
@@ -90,9 +87,10 @@ static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
90 if (offset != offsetof(struct user_regs_struct, gs)) 87 if (offset != offsetof(struct user_regs_struct, gs))
91 retval = *pt_regs_access(task_pt_regs(task), offset); 88 retval = *pt_regs_access(task_pt_regs(task), offset);
92 else { 89 else {
93 retval = task->thread.gs;
94 if (task == current) 90 if (task == current)
95 savesegment(gs, retval); 91 retval = get_user_gs(task_pt_regs(task));
92 else
93 retval = task_user_gs(task);
96 } 94 }
97 return retval; 95 return retval;
98} 96}
@@ -126,13 +124,10 @@ static int set_segment_reg(struct task_struct *task,
126 break; 124 break;
127 125
128 case offsetof(struct user_regs_struct, gs): 126 case offsetof(struct user_regs_struct, gs):
129 task->thread.gs = value;
130 if (task == current) 127 if (task == current)
131 /* 128 set_user_gs(task_pt_regs(task), value);
132 * The user-mode %gs is not affected by 129 else
133 * kernel entry, so we must update the CPU. 130 task_user_gs(task) = value;
134 */
135 loadsegment(gs, value);
136 } 131 }
137 132
138 return 0; 133 return 0;
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 2b46eb41643b..32e8f0af292c 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -14,6 +14,7 @@
14#include <asm/reboot.h> 14#include <asm/reboot.h>
15#include <asm/pci_x86.h> 15#include <asm/pci_x86.h>
16#include <asm/virtext.h> 16#include <asm/virtext.h>
17#include <asm/cpu.h>
17 18
18#ifdef CONFIG_X86_32 19#ifdef CONFIG_X86_32
19# include <linux/dmi.h> 20# include <linux/dmi.h>
@@ -23,7 +24,7 @@
23# include <asm/iommu.h> 24# include <asm/iommu.h>
24#endif 25#endif
25 26
26#include <mach_ipi.h> 27#include <asm/genapic.h>
27 28
28/* 29/*
29 * Power off function, if any 30 * Power off function, if any
@@ -650,7 +651,7 @@ static int crash_nmi_callback(struct notifier_block *self,
650 651
651static void smp_send_nmi_allbutself(void) 652static void smp_send_nmi_allbutself(void)
652{ 653{
653 send_IPI_allbutself(NMI_VECTOR); 654 apic->send_IPI_allbutself(NMI_VECTOR);
654} 655}
655 656
656static struct notifier_block crash_nmi_nb = { 657static struct notifier_block crash_nmi_nb = {
diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
index f5afe665a82b..b0bbdd4829c9 100644
--- a/arch/x86/kernel/relocate_kernel_64.S
+++ b/arch/x86/kernel/relocate_kernel_64.S
@@ -29,122 +29,6 @@ relocate_kernel:
29 * %rdx start address 29 * %rdx start address
30 */ 30 */
31 31
32 /* map the control page at its virtual address */
33
34 movq $0x0000ff8000000000, %r10 /* mask */
35 mov $(39 - 3), %cl /* bits to shift */
36 movq PTR(VA_CONTROL_PAGE)(%rsi), %r11 /* address to map */
37
38 movq %r11, %r9
39 andq %r10, %r9
40 shrq %cl, %r9
41
42 movq PTR(VA_PGD)(%rsi), %r8
43 addq %r8, %r9
44 movq PTR(PA_PUD_0)(%rsi), %r8
45 orq $PAGE_ATTR, %r8
46 movq %r8, (%r9)
47
48 shrq $9, %r10
49 sub $9, %cl
50
51 movq %r11, %r9
52 andq %r10, %r9
53 shrq %cl, %r9
54
55 movq PTR(VA_PUD_0)(%rsi), %r8
56 addq %r8, %r9
57 movq PTR(PA_PMD_0)(%rsi), %r8
58 orq $PAGE_ATTR, %r8
59 movq %r8, (%r9)
60
61 shrq $9, %r10
62 sub $9, %cl
63
64 movq %r11, %r9
65 andq %r10, %r9
66 shrq %cl, %r9
67
68 movq PTR(VA_PMD_0)(%rsi), %r8
69 addq %r8, %r9
70 movq PTR(PA_PTE_0)(%rsi), %r8
71 orq $PAGE_ATTR, %r8
72 movq %r8, (%r9)
73
74 shrq $9, %r10
75 sub $9, %cl
76
77 movq %r11, %r9
78 andq %r10, %r9
79 shrq %cl, %r9
80
81 movq PTR(VA_PTE_0)(%rsi), %r8
82 addq %r8, %r9
83 movq PTR(PA_CONTROL_PAGE)(%rsi), %r8
84 orq $PAGE_ATTR, %r8
85 movq %r8, (%r9)
86
87 /* identity map the control page at its physical address */
88
89 movq $0x0000ff8000000000, %r10 /* mask */
90 mov $(39 - 3), %cl /* bits to shift */
91 movq PTR(PA_CONTROL_PAGE)(%rsi), %r11 /* address to map */
92
93 movq %r11, %r9
94 andq %r10, %r9
95 shrq %cl, %r9
96
97 movq PTR(VA_PGD)(%rsi), %r8
98 addq %r8, %r9
99 movq PTR(PA_PUD_1)(%rsi), %r8
100 orq $PAGE_ATTR, %r8
101 movq %r8, (%r9)
102
103 shrq $9, %r10
104 sub $9, %cl
105
106 movq %r11, %r9
107 andq %r10, %r9
108 shrq %cl, %r9
109
110 movq PTR(VA_PUD_1)(%rsi), %r8
111 addq %r8, %r9
112 movq PTR(PA_PMD_1)(%rsi), %r8
113 orq $PAGE_ATTR, %r8
114 movq %r8, (%r9)
115
116 shrq $9, %r10
117 sub $9, %cl
118
119 movq %r11, %r9
120 andq %r10, %r9
121 shrq %cl, %r9
122
123 movq PTR(VA_PMD_1)(%rsi), %r8
124 addq %r8, %r9
125 movq PTR(PA_PTE_1)(%rsi), %r8
126 orq $PAGE_ATTR, %r8
127 movq %r8, (%r9)
128
129 shrq $9, %r10
130 sub $9, %cl
131
132 movq %r11, %r9
133 andq %r10, %r9
134 shrq %cl, %r9
135
136 movq PTR(VA_PTE_1)(%rsi), %r8
137 addq %r8, %r9
138 movq PTR(PA_CONTROL_PAGE)(%rsi), %r8
139 orq $PAGE_ATTR, %r8
140 movq %r8, (%r9)
141
142relocate_new_kernel:
143 /* %rdi indirection_page
144 * %rsi page_list
145 * %rdx start address
146 */
147
148 /* zero out flags, and disable interrupts */ 32 /* zero out flags, and disable interrupts */
149 pushq $0 33 pushq $0
150 popfq 34 popfq
@@ -156,9 +40,8 @@ relocate_new_kernel:
156 /* get physical address of page table now too */ 40 /* get physical address of page table now too */
157 movq PTR(PA_TABLE_PAGE)(%rsi), %rcx 41 movq PTR(PA_TABLE_PAGE)(%rsi), %rcx
158 42
159 /* switch to new set of page tables */ 43 /* Switch to the identity mapped page tables */
160 movq PTR(PA_PGD)(%rsi), %r9 44 movq %rcx, %cr3
161 movq %r9, %cr3
162 45
163 /* setup a new stack at the end of the physical control page */ 46 /* setup a new stack at the end of the physical control page */
164 lea PAGE_SIZE(%r8), %rsp 47 lea PAGE_SIZE(%r8), %rsp
@@ -194,9 +77,7 @@ identity_mapped:
194 jmp 1f 77 jmp 1f
1951: 781:
196 79
197 /* Switch to the identity mapped page tables, 80 /* Flush the TLB (needed?) */
198 * and flush the TLB.
199 */
200 movq %rcx, %cr3 81 movq %rcx, %cr3
201 82
202 /* Do the copies */ 83 /* Do the copies */
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index c461f6d69074..8fce6c714514 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -81,7 +81,7 @@
81#include <asm/io_apic.h> 81#include <asm/io_apic.h>
82#include <asm/ist.h> 82#include <asm/ist.h>
83#include <asm/vmi.h> 83#include <asm/vmi.h>
84#include <setup_arch.h> 84#include <asm/setup_arch.h>
85#include <asm/bios_ebda.h> 85#include <asm/bios_ebda.h>
86#include <asm/cacheflush.h> 86#include <asm/cacheflush.h>
87#include <asm/processor.h> 87#include <asm/processor.h>
@@ -89,7 +89,7 @@
89 89
90#include <asm/system.h> 90#include <asm/system.h>
91#include <asm/vsyscall.h> 91#include <asm/vsyscall.h>
92#include <asm/smp.h> 92#include <asm/cpu.h>
93#include <asm/desc.h> 93#include <asm/desc.h>
94#include <asm/dma.h> 94#include <asm/dma.h>
95#include <asm/iommu.h> 95#include <asm/iommu.h>
@@ -97,7 +97,7 @@
97#include <asm/mmu_context.h> 97#include <asm/mmu_context.h>
98#include <asm/proto.h> 98#include <asm/proto.h>
99 99
100#include <mach_apic.h> 100#include <asm/genapic.h>
101#include <asm/paravirt.h> 101#include <asm/paravirt.h>
102#include <asm/hypervisor.h> 102#include <asm/hypervisor.h>
103 103
@@ -112,6 +112,20 @@
112#define ARCH_SETUP 112#define ARCH_SETUP
113#endif 113#endif
114 114
115unsigned int boot_cpu_id __read_mostly;
116
117#ifdef CONFIG_X86_64
118int default_cpu_present_to_apicid(int mps_cpu)
119{
120 return __default_cpu_present_to_apicid(mps_cpu);
121}
122
123int default_check_phys_apicid_present(int boot_cpu_physical_apicid)
124{
125 return __default_check_phys_apicid_present(boot_cpu_physical_apicid);
126}
127#endif
128
115#ifndef CONFIG_DEBUG_BOOT_PARAMS 129#ifndef CONFIG_DEBUG_BOOT_PARAMS
116struct boot_params __initdata boot_params; 130struct boot_params __initdata boot_params;
117#else 131#else
@@ -588,10 +602,9 @@ early_param("elfcorehdr", setup_elfcorehdr);
588 602
589static int __init default_update_genapic(void) 603static int __init default_update_genapic(void)
590{ 604{
591#ifdef CONFIG_X86_SMP 605#ifdef CONFIG_SMP
592# if defined(CONFIG_X86_GENERICARCH) || defined(CONFIG_X86_64) 606 if (!apic->wakeup_cpu)
593 genapic->wakeup_cpu = wakeup_secondary_cpu_via_init; 607 apic->wakeup_cpu = wakeup_secondary_cpu_via_init;
594# endif
595#endif 608#endif
596 609
597 return 0; 610 return 0;
@@ -892,12 +905,11 @@ void __init setup_arch(char **cmdline_p)
892 */ 905 */
893 acpi_reserve_bootmem(); 906 acpi_reserve_bootmem();
894#endif 907#endif
895#ifdef CONFIG_X86_FIND_SMP_CONFIG
896 /* 908 /*
897 * Find and reserve possible boot-time SMP configuration: 909 * Find and reserve possible boot-time SMP configuration:
898 */ 910 */
899 find_smp_config(); 911 find_smp_config();
900#endif 912
901 reserve_crashkernel(); 913 reserve_crashkernel();
902 914
903#ifdef CONFIG_X86_64 915#ifdef CONFIG_X86_64
@@ -924,9 +936,7 @@ void __init setup_arch(char **cmdline_p)
924 map_vsyscall(); 936 map_vsyscall();
925#endif 937#endif
926 938
927#ifdef CONFIG_X86_GENERICARCH
928 generic_apic_probe(); 939 generic_apic_probe();
929#endif
930 940
931 early_quirks(); 941 early_quirks();
932 942
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index 01161077a49c..d992e6cff730 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -13,145 +13,47 @@
13#include <asm/mpspec.h> 13#include <asm/mpspec.h>
14#include <asm/apicdef.h> 14#include <asm/apicdef.h>
15#include <asm/highmem.h> 15#include <asm/highmem.h>
16#include <asm/proto.h>
17#include <asm/cpumask.h>
18#include <asm/cpu.h>
19#include <asm/stackprotector.h>
16 20
17#ifdef CONFIG_X86_LOCAL_APIC 21#ifdef CONFIG_DEBUG_PER_CPU_MAPS
18unsigned int num_processors; 22# define DBG(x...) printk(KERN_DEBUG x)
19unsigned disabled_cpus __cpuinitdata;
20/* Processor that is doing the boot up */
21unsigned int boot_cpu_physical_apicid = -1U;
22EXPORT_SYMBOL(boot_cpu_physical_apicid);
23unsigned int max_physical_apicid;
24
25/* Bitmask of physically existing CPUs */
26physid_mask_t phys_cpu_present_map;
27#endif
28
29/* map cpu index to physical APIC ID */
30DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID);
31DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID);
32EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
33EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
34
35#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
36#define X86_64_NUMA 1
37
38/* map cpu index to node index */
39DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
40EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
41
42/* which logical CPUs are on which nodes */
43cpumask_t *node_to_cpumask_map;
44EXPORT_SYMBOL(node_to_cpumask_map);
45
46/* setup node_to_cpumask_map */
47static void __init setup_node_to_cpumask_map(void);
48
49#else 23#else
50static inline void setup_node_to_cpumask_map(void) { } 24# define DBG(x...)
51#endif 25#endif
52 26
53#if defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) && defined(CONFIG_X86_SMP) 27DEFINE_PER_CPU(int, cpu_number);
54/* 28EXPORT_PER_CPU_SYMBOL(cpu_number);
55 * Copy data used in early init routines from the initial arrays to the
56 * per cpu data areas. These arrays then become expendable and the
57 * *_early_ptr's are zeroed indicating that the static arrays are gone.
58 */
59static void __init setup_per_cpu_maps(void)
60{
61 int cpu;
62 29
63 for_each_possible_cpu(cpu) { 30#ifdef CONFIG_X86_64
64 per_cpu(x86_cpu_to_apicid, cpu) = 31#define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
65 early_per_cpu_map(x86_cpu_to_apicid, cpu); 32#else
66 per_cpu(x86_bios_cpu_apicid, cpu) = 33#define BOOT_PERCPU_OFFSET 0
67 early_per_cpu_map(x86_bios_cpu_apicid, cpu);
68#ifdef X86_64_NUMA
69 per_cpu(x86_cpu_to_node_map, cpu) =
70 early_per_cpu_map(x86_cpu_to_node_map, cpu);
71#endif 34#endif
72 }
73 35
74 /* indicate the early static arrays will soon be gone */ 36DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
75 early_per_cpu_ptr(x86_cpu_to_apicid) = NULL; 37EXPORT_PER_CPU_SYMBOL(this_cpu_off);
76 early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
77#ifdef X86_64_NUMA
78 early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
79#endif
80}
81 38
82#ifdef CONFIG_X86_32 39unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
83/* 40 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
84 * Great future not-so-futuristic plan: make i386 and x86_64 do it 41};
85 * the same way
86 */
87unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
88EXPORT_SYMBOL(__per_cpu_offset); 42EXPORT_SYMBOL(__per_cpu_offset);
89static inline void setup_cpu_pda_map(void) { }
90
91#elif !defined(CONFIG_SMP)
92static inline void setup_cpu_pda_map(void) { }
93
94#else /* CONFIG_SMP && CONFIG_X86_64 */
95
96/*
97 * Allocate cpu_pda pointer table and array via alloc_bootmem.
98 */
99static void __init setup_cpu_pda_map(void)
100{
101 char *pda;
102 struct x8664_pda **new_cpu_pda;
103 unsigned long size;
104 int cpu;
105
106 size = roundup(sizeof(struct x8664_pda), cache_line_size());
107
108 /* allocate cpu_pda array and pointer table */
109 {
110 unsigned long tsize = nr_cpu_ids * sizeof(void *);
111 unsigned long asize = size * (nr_cpu_ids - 1);
112 43
113 tsize = roundup(tsize, cache_line_size()); 44static inline void setup_percpu_segment(int cpu)
114 new_cpu_pda = alloc_bootmem(tsize + asize);
115 pda = (char *)new_cpu_pda + tsize;
116 }
117
118 /* initialize pointer table to static pda's */
119 for_each_possible_cpu(cpu) {
120 if (cpu == 0) {
121 /* leave boot cpu pda in place */
122 new_cpu_pda[0] = cpu_pda(0);
123 continue;
124 }
125 new_cpu_pda[cpu] = (struct x8664_pda *)pda;
126 new_cpu_pda[cpu]->in_bootmem = 1;
127 pda += size;
128 }
129
130 /* point to new pointer table */
131 _cpu_pda = new_cpu_pda;
132}
133
134#endif /* CONFIG_SMP && CONFIG_X86_64 */
135
136#ifdef CONFIG_X86_64
137
138/* correctly size the local cpu masks */
139static void __init setup_cpu_local_masks(void)
140{ 45{
141 alloc_bootmem_cpumask_var(&cpu_initialized_mask); 46#ifdef CONFIG_X86_32
142 alloc_bootmem_cpumask_var(&cpu_callin_mask); 47 struct desc_struct gdt;
143 alloc_bootmem_cpumask_var(&cpu_callout_mask);
144 alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
145}
146
147#else /* CONFIG_X86_32 */
148 48
149static inline void setup_cpu_local_masks(void) 49 pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
150{ 50 0x2 | DESCTYPE_S, 0x8);
51 gdt.s = 1;
52 write_gdt_entry(get_cpu_gdt_table(cpu),
53 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
54#endif
151} 55}
152 56
153#endif /* CONFIG_X86_32 */
154
155/* 57/*
156 * Great future plan: 58 * Great future plan:
157 * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data. 59 * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
@@ -159,18 +61,12 @@ static inline void setup_cpu_local_masks(void)
159 */ 61 */
160void __init setup_per_cpu_areas(void) 62void __init setup_per_cpu_areas(void)
161{ 63{
162 ssize_t size, old_size; 64 ssize_t size;
163 char *ptr; 65 char *ptr;
164 int cpu; 66 int cpu;
165 unsigned long align = 1;
166
167 /* Setup cpu_pda map */
168 setup_cpu_pda_map();
169 67
170 /* Copy section for each CPU (we discard the original) */ 68 /* Copy section for each CPU (we discard the original) */
171 old_size = PERCPU_ENOUGH_ROOM; 69 size = roundup(PERCPU_ENOUGH_ROOM, PAGE_SIZE);
172 align = max_t(unsigned long, PAGE_SIZE, align);
173 size = roundup(old_size, align);
174 70
175 pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n", 71 pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
176 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); 72 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
@@ -179,30 +75,68 @@ void __init setup_per_cpu_areas(void)
179 75
180 for_each_possible_cpu(cpu) { 76 for_each_possible_cpu(cpu) {
181#ifndef CONFIG_NEED_MULTIPLE_NODES 77#ifndef CONFIG_NEED_MULTIPLE_NODES
182 ptr = __alloc_bootmem(size, align, 78 ptr = alloc_bootmem_pages(size);
183 __pa(MAX_DMA_ADDRESS));
184#else 79#else
185 int node = early_cpu_to_node(cpu); 80 int node = early_cpu_to_node(cpu);
186 if (!node_online(node) || !NODE_DATA(node)) { 81 if (!node_online(node) || !NODE_DATA(node)) {
187 ptr = __alloc_bootmem(size, align, 82 ptr = alloc_bootmem_pages(size);
188 __pa(MAX_DMA_ADDRESS));
189 pr_info("cpu %d has no node %d or node-local memory\n", 83 pr_info("cpu %d has no node %d or node-local memory\n",
190 cpu, node); 84 cpu, node);
191 pr_debug("per cpu data for cpu%d at %016lx\n", 85 pr_debug("per cpu data for cpu%d at %016lx\n",
192 cpu, __pa(ptr)); 86 cpu, __pa(ptr));
193 } else { 87 } else {
194 ptr = __alloc_bootmem_node(NODE_DATA(node), size, align, 88 ptr = alloc_bootmem_pages_node(NODE_DATA(node), size);
195 __pa(MAX_DMA_ADDRESS));
196 pr_debug("per cpu data for cpu%d on node%d at %016lx\n", 89 pr_debug("per cpu data for cpu%d on node%d at %016lx\n",
197 cpu, node, __pa(ptr)); 90 cpu, node, __pa(ptr));
198 } 91 }
199#endif 92#endif
93
94 memcpy(ptr, __per_cpu_load, __per_cpu_end - __per_cpu_start);
200 per_cpu_offset(cpu) = ptr - __per_cpu_start; 95 per_cpu_offset(cpu) = ptr - __per_cpu_start;
201 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); 96 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
97 per_cpu(cpu_number, cpu) = cpu;
98 setup_percpu_segment(cpu);
99 setup_stack_canary_segment(cpu);
100 /*
101 * Copy data used in early init routines from the
102 * initial arrays to the per cpu data areas. These
103 * arrays then become expendable and the *_early_ptr's
104 * are zeroed indicating that the static arrays are
105 * gone.
106 */
107#ifdef CONFIG_X86_LOCAL_APIC
108 per_cpu(x86_cpu_to_apicid, cpu) =
109 early_per_cpu_map(x86_cpu_to_apicid, cpu);
110 per_cpu(x86_bios_cpu_apicid, cpu) =
111 early_per_cpu_map(x86_bios_cpu_apicid, cpu);
112#endif
113#ifdef CONFIG_X86_64
114 per_cpu(irq_stack_ptr, cpu) =
115 per_cpu(irq_stack_union.irq_stack, cpu) +
116 IRQ_STACK_SIZE - 64;
117#ifdef CONFIG_NUMA
118 per_cpu(x86_cpu_to_node_map, cpu) =
119 early_per_cpu_map(x86_cpu_to_node_map, cpu);
120#endif
121#endif
122 /*
123 * Up to this point, the boot CPU has been using .data.init
124 * area. Reload any changed state for the boot CPU.
125 */
126 if (cpu == boot_cpu_id)
127 switch_to_new_gdt(cpu);
128
129 DBG("PERCPU: cpu %4d %p\n", cpu, ptr);
202 } 130 }
203 131
204 /* Setup percpu data maps */ 132 /* indicate the early static arrays will soon be gone */
205 setup_per_cpu_maps(); 133#ifdef CONFIG_X86_LOCAL_APIC
134 early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
135 early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
136#endif
137#if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
138 early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
139#endif
206 140
207 /* Setup node to cpumask map */ 141 /* Setup node to cpumask map */
208 setup_node_to_cpumask_map(); 142 setup_node_to_cpumask_map();
@@ -210,199 +144,3 @@ void __init setup_per_cpu_areas(void)
210 /* Setup cpu initialized, callin, callout masks */ 144 /* Setup cpu initialized, callin, callout masks */
211 setup_cpu_local_masks(); 145 setup_cpu_local_masks();
212} 146}
213
214#endif
215
216#ifdef X86_64_NUMA
217
218/*
219 * Allocate node_to_cpumask_map based on number of available nodes
220 * Requires node_possible_map to be valid.
221 *
222 * Note: node_to_cpumask() is not valid until after this is done.
223 */
224static void __init setup_node_to_cpumask_map(void)
225{
226 unsigned int node, num = 0;
227 cpumask_t *map;
228
229 /* setup nr_node_ids if not done yet */
230 if (nr_node_ids == MAX_NUMNODES) {
231 for_each_node_mask(node, node_possible_map)
232 num = node;
233 nr_node_ids = num + 1;
234 }
235
236 /* allocate the map */
237 map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t));
238
239 pr_debug("Node to cpumask map at %p for %d nodes\n",
240 map, nr_node_ids);
241
242 /* node_to_cpumask() will now work */
243 node_to_cpumask_map = map;
244}
245
246void __cpuinit numa_set_node(int cpu, int node)
247{
248 int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
249
250 if (cpu_pda(cpu) && node != NUMA_NO_NODE)
251 cpu_pda(cpu)->nodenumber = node;
252
253 if (cpu_to_node_map)
254 cpu_to_node_map[cpu] = node;
255
256 else if (per_cpu_offset(cpu))
257 per_cpu(x86_cpu_to_node_map, cpu) = node;
258
259 else
260 pr_debug("Setting node for non-present cpu %d\n", cpu);
261}
262
263void __cpuinit numa_clear_node(int cpu)
264{
265 numa_set_node(cpu, NUMA_NO_NODE);
266}
267
268#ifndef CONFIG_DEBUG_PER_CPU_MAPS
269
270void __cpuinit numa_add_cpu(int cpu)
271{
272 cpu_set(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
273}
274
275void __cpuinit numa_remove_cpu(int cpu)
276{
277 cpu_clear(cpu, node_to_cpumask_map[cpu_to_node(cpu)]);
278}
279
280#else /* CONFIG_DEBUG_PER_CPU_MAPS */
281
282/*
283 * --------- debug versions of the numa functions ---------
284 */
285static void __cpuinit numa_set_cpumask(int cpu, int enable)
286{
287 int node = cpu_to_node(cpu);
288 cpumask_t *mask;
289 char buf[64];
290
291 if (node_to_cpumask_map == NULL) {
292 printk(KERN_ERR "node_to_cpumask_map NULL\n");
293 dump_stack();
294 return;
295 }
296
297 mask = &node_to_cpumask_map[node];
298 if (enable)
299 cpu_set(cpu, *mask);
300 else
301 cpu_clear(cpu, *mask);
302
303 cpulist_scnprintf(buf, sizeof(buf), mask);
304 printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
305 enable ? "numa_add_cpu" : "numa_remove_cpu", cpu, node, buf);
306}
307
308void __cpuinit numa_add_cpu(int cpu)
309{
310 numa_set_cpumask(cpu, 1);
311}
312
313void __cpuinit numa_remove_cpu(int cpu)
314{
315 numa_set_cpumask(cpu, 0);
316}
317
318int cpu_to_node(int cpu)
319{
320 if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
321 printk(KERN_WARNING
322 "cpu_to_node(%d): usage too early!\n", cpu);
323 dump_stack();
324 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
325 }
326 return per_cpu(x86_cpu_to_node_map, cpu);
327}
328EXPORT_SYMBOL(cpu_to_node);
329
330/*
331 * Same function as cpu_to_node() but used if called before the
332 * per_cpu areas are setup.
333 */
334int early_cpu_to_node(int cpu)
335{
336 if (early_per_cpu_ptr(x86_cpu_to_node_map))
337 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
338
339 if (!per_cpu_offset(cpu)) {
340 printk(KERN_WARNING
341 "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
342 dump_stack();
343 return NUMA_NO_NODE;
344 }
345 return per_cpu(x86_cpu_to_node_map, cpu);
346}
347
348
349/* empty cpumask */
350static const cpumask_t cpu_mask_none;
351
352/*
353 * Returns a pointer to the bitmask of CPUs on Node 'node'.
354 */
355const cpumask_t *cpumask_of_node(int node)
356{
357 if (node_to_cpumask_map == NULL) {
358 printk(KERN_WARNING
359 "cpumask_of_node(%d): no node_to_cpumask_map!\n",
360 node);
361 dump_stack();
362 return (const cpumask_t *)&cpu_online_map;
363 }
364 if (node >= nr_node_ids) {
365 printk(KERN_WARNING
366 "cpumask_of_node(%d): node > nr_node_ids(%d)\n",
367 node, nr_node_ids);
368 dump_stack();
369 return &cpu_mask_none;
370 }
371 return &node_to_cpumask_map[node];
372}
373EXPORT_SYMBOL(cpumask_of_node);
374
375/*
376 * Returns a bitmask of CPUs on Node 'node'.
377 *
378 * Side note: this function creates the returned cpumask on the stack
379 * so with a high NR_CPUS count, excessive stack space is used. The
380 * node_to_cpumask_ptr function should be used whenever possible.
381 */
382cpumask_t node_to_cpumask(int node)
383{
384 if (node_to_cpumask_map == NULL) {
385 printk(KERN_WARNING
386 "node_to_cpumask(%d): no node_to_cpumask_map!\n", node);
387 dump_stack();
388 return cpu_online_map;
389 }
390 if (node >= nr_node_ids) {
391 printk(KERN_WARNING
392 "node_to_cpumask(%d): node > nr_node_ids(%d)\n",
393 node, nr_node_ids);
394 dump_stack();
395 return cpu_mask_none;
396 }
397 return node_to_cpumask_map[node];
398}
399EXPORT_SYMBOL(node_to_cpumask);
400
401/*
402 * --------- end of debug versions of the numa functions ---------
403 */
404
405#endif /* CONFIG_DEBUG_PER_CPU_MAPS */
406
407#endif /* X86_64_NUMA */
408
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index df0587f24c54..7cdcd16885ed 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -50,27 +50,23 @@
50# define FIX_EFLAGS __FIX_EFLAGS 50# define FIX_EFLAGS __FIX_EFLAGS
51#endif 51#endif
52 52
53#define COPY(x) { \ 53#define COPY(x) do { \
54 err |= __get_user(regs->x, &sc->x); \ 54 get_user_ex(regs->x, &sc->x); \
55} 55} while (0)
56 56
57#define COPY_SEG(seg) { \ 57#define GET_SEG(seg) ({ \
58 unsigned short tmp; \ 58 unsigned short tmp; \
59 err |= __get_user(tmp, &sc->seg); \ 59 get_user_ex(tmp, &sc->seg); \
60 regs->seg = tmp; \ 60 tmp; \
61} 61})
62 62
63#define COPY_SEG_CPL3(seg) { \ 63#define COPY_SEG(seg) do { \
64 unsigned short tmp; \ 64 regs->seg = GET_SEG(seg); \
65 err |= __get_user(tmp, &sc->seg); \ 65} while (0)
66 regs->seg = tmp | 3; \
67}
68 66
69#define GET_SEG(seg) { \ 67#define COPY_SEG_CPL3(seg) do { \
70 unsigned short tmp; \ 68 regs->seg = GET_SEG(seg) | 3; \
71 err |= __get_user(tmp, &sc->seg); \ 69} while (0)
72 loadsegment(seg, tmp); \
73}
74 70
75static int 71static int
76restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, 72restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
@@ -83,45 +79,49 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
83 /* Always make any pending restarted system calls return -EINTR */ 79 /* Always make any pending restarted system calls return -EINTR */
84 current_thread_info()->restart_block.fn = do_no_restart_syscall; 80 current_thread_info()->restart_block.fn = do_no_restart_syscall;
85 81
82 get_user_try {
83
86#ifdef CONFIG_X86_32 84#ifdef CONFIG_X86_32
87 GET_SEG(gs); 85 set_user_gs(regs, GET_SEG(gs));
88 COPY_SEG(fs); 86 COPY_SEG(fs);
89 COPY_SEG(es); 87 COPY_SEG(es);
90 COPY_SEG(ds); 88 COPY_SEG(ds);
91#endif /* CONFIG_X86_32 */ 89#endif /* CONFIG_X86_32 */
92 90
93 COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx); 91 COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
94 COPY(dx); COPY(cx); COPY(ip); 92 COPY(dx); COPY(cx); COPY(ip);
95 93
96#ifdef CONFIG_X86_64 94#ifdef CONFIG_X86_64
97 COPY(r8); 95 COPY(r8);
98 COPY(r9); 96 COPY(r9);
99 COPY(r10); 97 COPY(r10);
100 COPY(r11); 98 COPY(r11);
101 COPY(r12); 99 COPY(r12);
102 COPY(r13); 100 COPY(r13);
103 COPY(r14); 101 COPY(r14);
104 COPY(r15); 102 COPY(r15);
105#endif /* CONFIG_X86_64 */ 103#endif /* CONFIG_X86_64 */
106 104
107#ifdef CONFIG_X86_32 105#ifdef CONFIG_X86_32
108 COPY_SEG_CPL3(cs); 106 COPY_SEG_CPL3(cs);
109 COPY_SEG_CPL3(ss); 107 COPY_SEG_CPL3(ss);
110#else /* !CONFIG_X86_32 */ 108#else /* !CONFIG_X86_32 */
111 /* Kernel saves and restores only the CS segment register on signals, 109 /* Kernel saves and restores only the CS segment register on signals,
112 * which is the bare minimum needed to allow mixed 32/64-bit code. 110 * which is the bare minimum needed to allow mixed 32/64-bit code.
113 * App's signal handler can save/restore other segments if needed. */ 111 * App's signal handler can save/restore other segments if needed. */
114 COPY_SEG_CPL3(cs); 112 COPY_SEG_CPL3(cs);
115#endif /* CONFIG_X86_32 */ 113#endif /* CONFIG_X86_32 */
116 114
117 err |= __get_user(tmpflags, &sc->flags); 115 get_user_ex(tmpflags, &sc->flags);
118 regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS); 116 regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
119 regs->orig_ax = -1; /* disable syscall checks */ 117 regs->orig_ax = -1; /* disable syscall checks */
118
119 get_user_ex(buf, &sc->fpstate);
120 err |= restore_i387_xstate(buf);
120 121
121 err |= __get_user(buf, &sc->fpstate); 122 get_user_ex(*pax, &sc->ax);
122 err |= restore_i387_xstate(buf); 123 } get_user_catch(err);
123 124
124 err |= __get_user(*pax, &sc->ax);
125 return err; 125 return err;
126} 126}
127 127
@@ -131,57 +131,55 @@ setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
131{ 131{
132 int err = 0; 132 int err = 0;
133 133
134#ifdef CONFIG_X86_32 134 put_user_try {
135 {
136 unsigned int tmp;
137 135
138 savesegment(gs, tmp); 136#ifdef CONFIG_X86_32
139 err |= __put_user(tmp, (unsigned int __user *)&sc->gs); 137 put_user_ex(get_user_gs(regs), (unsigned int __user *)&sc->gs);
140 } 138 put_user_ex(regs->fs, (unsigned int __user *)&sc->fs);
141 err |= __put_user(regs->fs, (unsigned int __user *)&sc->fs); 139 put_user_ex(regs->es, (unsigned int __user *)&sc->es);
142 err |= __put_user(regs->es, (unsigned int __user *)&sc->es); 140 put_user_ex(regs->ds, (unsigned int __user *)&sc->ds);
143 err |= __put_user(regs->ds, (unsigned int __user *)&sc->ds);
144#endif /* CONFIG_X86_32 */ 141#endif /* CONFIG_X86_32 */
145 142
146 err |= __put_user(regs->di, &sc->di); 143 put_user_ex(regs->di, &sc->di);
147 err |= __put_user(regs->si, &sc->si); 144 put_user_ex(regs->si, &sc->si);
148 err |= __put_user(regs->bp, &sc->bp); 145 put_user_ex(regs->bp, &sc->bp);
149 err |= __put_user(regs->sp, &sc->sp); 146 put_user_ex(regs->sp, &sc->sp);
150 err |= __put_user(regs->bx, &sc->bx); 147 put_user_ex(regs->bx, &sc->bx);
151 err |= __put_user(regs->dx, &sc->dx); 148 put_user_ex(regs->dx, &sc->dx);
152 err |= __put_user(regs->cx, &sc->cx); 149 put_user_ex(regs->cx, &sc->cx);
153 err |= __put_user(regs->ax, &sc->ax); 150 put_user_ex(regs->ax, &sc->ax);
154#ifdef CONFIG_X86_64 151#ifdef CONFIG_X86_64
155 err |= __put_user(regs->r8, &sc->r8); 152 put_user_ex(regs->r8, &sc->r8);
156 err |= __put_user(regs->r9, &sc->r9); 153 put_user_ex(regs->r9, &sc->r9);
157 err |= __put_user(regs->r10, &sc->r10); 154 put_user_ex(regs->r10, &sc->r10);
158 err |= __put_user(regs->r11, &sc->r11); 155 put_user_ex(regs->r11, &sc->r11);
159 err |= __put_user(regs->r12, &sc->r12); 156 put_user_ex(regs->r12, &sc->r12);
160 err |= __put_user(regs->r13, &sc->r13); 157 put_user_ex(regs->r13, &sc->r13);
161 err |= __put_user(regs->r14, &sc->r14); 158 put_user_ex(regs->r14, &sc->r14);
162 err |= __put_user(regs->r15, &sc->r15); 159 put_user_ex(regs->r15, &sc->r15);
163#endif /* CONFIG_X86_64 */ 160#endif /* CONFIG_X86_64 */
164 161
165 err |= __put_user(current->thread.trap_no, &sc->trapno); 162 put_user_ex(current->thread.trap_no, &sc->trapno);
166 err |= __put_user(current->thread.error_code, &sc->err); 163 put_user_ex(current->thread.error_code, &sc->err);
167 err |= __put_user(regs->ip, &sc->ip); 164 put_user_ex(regs->ip, &sc->ip);
168#ifdef CONFIG_X86_32 165#ifdef CONFIG_X86_32
169 err |= __put_user(regs->cs, (unsigned int __user *)&sc->cs); 166 put_user_ex(regs->cs, (unsigned int __user *)&sc->cs);
170 err |= __put_user(regs->flags, &sc->flags); 167 put_user_ex(regs->flags, &sc->flags);
171 err |= __put_user(regs->sp, &sc->sp_at_signal); 168 put_user_ex(regs->sp, &sc->sp_at_signal);
172 err |= __put_user(regs->ss, (unsigned int __user *)&sc->ss); 169 put_user_ex(regs->ss, (unsigned int __user *)&sc->ss);
173#else /* !CONFIG_X86_32 */ 170#else /* !CONFIG_X86_32 */
174 err |= __put_user(regs->flags, &sc->flags); 171 put_user_ex(regs->flags, &sc->flags);
175 err |= __put_user(regs->cs, &sc->cs); 172 put_user_ex(regs->cs, &sc->cs);
176 err |= __put_user(0, &sc->gs); 173 put_user_ex(0, &sc->gs);
177 err |= __put_user(0, &sc->fs); 174 put_user_ex(0, &sc->fs);
178#endif /* CONFIG_X86_32 */ 175#endif /* CONFIG_X86_32 */
179 176
180 err |= __put_user(fpstate, &sc->fpstate); 177 put_user_ex(fpstate, &sc->fpstate);
181 178
182 /* non-iBCS2 extensions.. */ 179 /* non-iBCS2 extensions.. */
183 err |= __put_user(mask, &sc->oldmask); 180 put_user_ex(mask, &sc->oldmask);
184 err |= __put_user(current->thread.cr2, &sc->cr2); 181 put_user_ex(current->thread.cr2, &sc->cr2);
182 } put_user_catch(err);
185 183
186 return err; 184 return err;
187} 185}
@@ -336,43 +334,41 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
336 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) 334 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
337 return -EFAULT; 335 return -EFAULT;
338 336
339 err |= __put_user(sig, &frame->sig); 337 put_user_try {
340 err |= __put_user(&frame->info, &frame->pinfo); 338 put_user_ex(sig, &frame->sig);
341 err |= __put_user(&frame->uc, &frame->puc); 339 put_user_ex(&frame->info, &frame->pinfo);
342 err |= copy_siginfo_to_user(&frame->info, info); 340 put_user_ex(&frame->uc, &frame->puc);
343 if (err) 341 err |= copy_siginfo_to_user(&frame->info, info);
344 return -EFAULT;
345
346 /* Create the ucontext. */
347 if (cpu_has_xsave)
348 err |= __put_user(UC_FP_XSTATE, &frame->uc.uc_flags);
349 else
350 err |= __put_user(0, &frame->uc.uc_flags);
351 err |= __put_user(0, &frame->uc.uc_link);
352 err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
353 err |= __put_user(sas_ss_flags(regs->sp),
354 &frame->uc.uc_stack.ss_flags);
355 err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
356 err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
357 regs, set->sig[0]);
358 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
359 if (err)
360 return -EFAULT;
361 342
362 /* Set up to return from userspace. */ 343 /* Create the ucontext. */
363 restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn); 344 if (cpu_has_xsave)
364 if (ka->sa.sa_flags & SA_RESTORER) 345 put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags);
365 restorer = ka->sa.sa_restorer; 346 else
366 err |= __put_user(restorer, &frame->pretcode); 347 put_user_ex(0, &frame->uc.uc_flags);
348 put_user_ex(0, &frame->uc.uc_link);
349 put_user_ex(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
350 put_user_ex(sas_ss_flags(regs->sp),
351 &frame->uc.uc_stack.ss_flags);
352 put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
353 err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
354 regs, set->sig[0]);
355 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
356
357 /* Set up to return from userspace. */
358 restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
359 if (ka->sa.sa_flags & SA_RESTORER)
360 restorer = ka->sa.sa_restorer;
361 put_user_ex(restorer, &frame->pretcode);
367 362
368 /* 363 /*
369 * This is movl $__NR_rt_sigreturn, %ax ; int $0x80 364 * This is movl $__NR_rt_sigreturn, %ax ; int $0x80
370 * 365 *
371 * WE DO NOT USE IT ANY MORE! It's only left here for historical 366 * WE DO NOT USE IT ANY MORE! It's only left here for historical
372 * reasons and because gdb uses it as a signature to notice 367 * reasons and because gdb uses it as a signature to notice
373 * signal handler stack frames. 368 * signal handler stack frames.
374 */ 369 */
375 err |= __put_user(*((u64 *)&rt_retcode), (u64 *)frame->retcode); 370 put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
371 } put_user_catch(err);
376 372
377 if (err) 373 if (err)
378 return -EFAULT; 374 return -EFAULT;
@@ -436,28 +432,30 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
436 return -EFAULT; 432 return -EFAULT;
437 } 433 }
438 434
439 /* Create the ucontext. */ 435 put_user_try {
440 if (cpu_has_xsave) 436 /* Create the ucontext. */
441 err |= __put_user(UC_FP_XSTATE, &frame->uc.uc_flags); 437 if (cpu_has_xsave)
442 else 438 put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags);
443 err |= __put_user(0, &frame->uc.uc_flags); 439 else
444 err |= __put_user(0, &frame->uc.uc_link); 440 put_user_ex(0, &frame->uc.uc_flags);
445 err |= __put_user(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp); 441 put_user_ex(0, &frame->uc.uc_link);
446 err |= __put_user(sas_ss_flags(regs->sp), 442 put_user_ex(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
447 &frame->uc.uc_stack.ss_flags); 443 put_user_ex(sas_ss_flags(regs->sp),
448 err |= __put_user(me->sas_ss_size, &frame->uc.uc_stack.ss_size); 444 &frame->uc.uc_stack.ss_flags);
449 err |= setup_sigcontext(&frame->uc.uc_mcontext, fp, regs, set->sig[0]); 445 put_user_ex(me->sas_ss_size, &frame->uc.uc_stack.ss_size);
450 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); 446 err |= setup_sigcontext(&frame->uc.uc_mcontext, fp, regs, set->sig[0]);
451 447 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
452 /* Set up to return from userspace. If provided, use a stub 448
453 already in userspace. */ 449 /* Set up to return from userspace. If provided, use a stub
454 /* x86-64 should always use SA_RESTORER. */ 450 already in userspace. */
455 if (ka->sa.sa_flags & SA_RESTORER) { 451 /* x86-64 should always use SA_RESTORER. */
456 err |= __put_user(ka->sa.sa_restorer, &frame->pretcode); 452 if (ka->sa.sa_flags & SA_RESTORER) {
457 } else { 453 put_user_ex(ka->sa.sa_restorer, &frame->pretcode);
458 /* could use a vstub here */ 454 } else {
459 return -EFAULT; 455 /* could use a vstub here */
460 } 456 err |= -EFAULT;
457 }
458 } put_user_catch(err);
461 459
462 if (err) 460 if (err)
463 return -EFAULT; 461 return -EFAULT;
@@ -509,31 +507,41 @@ sys_sigaction(int sig, const struct old_sigaction __user *act,
509 struct old_sigaction __user *oact) 507 struct old_sigaction __user *oact)
510{ 508{
511 struct k_sigaction new_ka, old_ka; 509 struct k_sigaction new_ka, old_ka;
512 int ret; 510 int ret = 0;
513 511
514 if (act) { 512 if (act) {
515 old_sigset_t mask; 513 old_sigset_t mask;
516 514
517 if (!access_ok(VERIFY_READ, act, sizeof(*act)) || 515 if (!access_ok(VERIFY_READ, act, sizeof(*act)))
518 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
519 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
520 return -EFAULT; 516 return -EFAULT;
521 517
522 __get_user(new_ka.sa.sa_flags, &act->sa_flags); 518 get_user_try {
523 __get_user(mask, &act->sa_mask); 519 get_user_ex(new_ka.sa.sa_handler, &act->sa_handler);
520 get_user_ex(new_ka.sa.sa_flags, &act->sa_flags);
521 get_user_ex(mask, &act->sa_mask);
522 get_user_ex(new_ka.sa.sa_restorer, &act->sa_restorer);
523 } get_user_catch(ret);
524
525 if (ret)
526 return -EFAULT;
524 siginitset(&new_ka.sa.sa_mask, mask); 527 siginitset(&new_ka.sa.sa_mask, mask);
525 } 528 }
526 529
527 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); 530 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
528 531
529 if (!ret && oact) { 532 if (!ret && oact) {
530 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || 533 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)))
531 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
532 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
533 return -EFAULT; 534 return -EFAULT;
534 535
535 __put_user(old_ka.sa.sa_flags, &oact->sa_flags); 536 put_user_try {
536 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); 537 put_user_ex(old_ka.sa.sa_handler, &oact->sa_handler);
538 put_user_ex(old_ka.sa.sa_flags, &oact->sa_flags);
539 put_user_ex(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
540 put_user_ex(old_ka.sa.sa_restorer, &oact->sa_restorer);
541 } put_user_catch(ret);
542
543 if (ret)
544 return -EFAULT;
537 } 545 }
538 546
539 return ret; 547 return ret;
@@ -541,14 +549,9 @@ sys_sigaction(int sig, const struct old_sigaction __user *act,
541#endif /* CONFIG_X86_32 */ 549#endif /* CONFIG_X86_32 */
542 550
543#ifdef CONFIG_X86_32 551#ifdef CONFIG_X86_32
544asmlinkage int sys_sigaltstack(unsigned long bx) 552int sys_sigaltstack(struct pt_regs *regs)
545{ 553{
546 /* 554 const stack_t __user *uss = (const stack_t __user *)regs->bx;
547 * This is needed to make gcc realize it doesn't own the
548 * "struct pt_regs"
549 */
550 struct pt_regs *regs = (struct pt_regs *)&bx;
551 const stack_t __user *uss = (const stack_t __user *)bx;
552 stack_t __user *uoss = (stack_t __user *)regs->cx; 555 stack_t __user *uoss = (stack_t __user *)regs->cx;
553 556
554 return do_sigaltstack(uss, uoss, regs->sp); 557 return do_sigaltstack(uss, uoss, regs->sp);
@@ -566,14 +569,12 @@ sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
566 * Do a signal return; undo the signal stack. 569 * Do a signal return; undo the signal stack.
567 */ 570 */
568#ifdef CONFIG_X86_32 571#ifdef CONFIG_X86_32
569asmlinkage unsigned long sys_sigreturn(unsigned long __unused) 572unsigned long sys_sigreturn(struct pt_regs *regs)
570{ 573{
571 struct sigframe __user *frame; 574 struct sigframe __user *frame;
572 struct pt_regs *regs;
573 unsigned long ax; 575 unsigned long ax;
574 sigset_t set; 576 sigset_t set;
575 577
576 regs = (struct pt_regs *) &__unused;
577 frame = (struct sigframe __user *)(regs->sp - 8); 578 frame = (struct sigframe __user *)(regs->sp - 8);
578 579
579 if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 580 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
@@ -600,7 +601,7 @@ badframe:
600} 601}
601#endif /* CONFIG_X86_32 */ 602#endif /* CONFIG_X86_32 */
602 603
603static long do_rt_sigreturn(struct pt_regs *regs) 604long sys_rt_sigreturn(struct pt_regs *regs)
604{ 605{
605 struct rt_sigframe __user *frame; 606 struct rt_sigframe __user *frame;
606 unsigned long ax; 607 unsigned long ax;
@@ -631,25 +632,6 @@ badframe:
631 return 0; 632 return 0;
632} 633}
633 634
634#ifdef CONFIG_X86_32
635/*
636 * Note: do not pass in pt_regs directly as with tail-call optimization
637 * GCC will incorrectly stomp on the caller's frame and corrupt user-space
638 * register state:
639 */
640asmlinkage int sys_rt_sigreturn(unsigned long __unused)
641{
642 struct pt_regs *regs = (struct pt_regs *)&__unused;
643
644 return do_rt_sigreturn(regs);
645}
646#else /* !CONFIG_X86_32 */
647asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
648{
649 return do_rt_sigreturn(regs);
650}
651#endif /* CONFIG_X86_32 */
652
653/* 635/*
654 * OK, we're invoking a handler: 636 * OK, we're invoking a handler:
655 */ 637 */
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index e6faa3316bd2..eaaffae31cc0 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -2,7 +2,7 @@
2 * Intel SMP support routines. 2 * Intel SMP support routines.
3 * 3 *
4 * (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk> 4 * (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
5 * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com> 5 * (c) 1998-99, 2000, 2009 Ingo Molnar <mingo@redhat.com>
6 * (c) 2002,2003 Andi Kleen, SuSE Labs. 6 * (c) 2002,2003 Andi Kleen, SuSE Labs.
7 * 7 *
8 * i386 and x86_64 integration by Glauber Costa <gcosta@redhat.com> 8 * i386 and x86_64 integration by Glauber Costa <gcosta@redhat.com>
@@ -26,8 +26,7 @@
26#include <asm/tlbflush.h> 26#include <asm/tlbflush.h>
27#include <asm/mmu_context.h> 27#include <asm/mmu_context.h>
28#include <asm/proto.h> 28#include <asm/proto.h>
29#include <mach_ipi.h> 29#include <asm/genapic.h>
30#include <mach_apic.h>
31/* 30/*
32 * Some notes on x86 processor bugs affecting SMP operation: 31 * Some notes on x86 processor bugs affecting SMP operation:
33 * 32 *
@@ -118,12 +117,12 @@ static void native_smp_send_reschedule(int cpu)
118 WARN_ON(1); 117 WARN_ON(1);
119 return; 118 return;
120 } 119 }
121 send_IPI_mask(cpumask_of(cpu), RESCHEDULE_VECTOR); 120 apic->send_IPI_mask(cpumask_of(cpu), RESCHEDULE_VECTOR);
122} 121}
123 122
124void native_send_call_func_single_ipi(int cpu) 123void native_send_call_func_single_ipi(int cpu)
125{ 124{
126 send_IPI_mask(cpumask_of(cpu), CALL_FUNCTION_SINGLE_VECTOR); 125 apic->send_IPI_mask(cpumask_of(cpu), CALL_FUNCTION_SINGLE_VECTOR);
127} 126}
128 127
129void native_send_call_func_ipi(const struct cpumask *mask) 128void native_send_call_func_ipi(const struct cpumask *mask)
@@ -131,7 +130,7 @@ void native_send_call_func_ipi(const struct cpumask *mask)
131 cpumask_var_t allbutself; 130 cpumask_var_t allbutself;
132 131
133 if (!alloc_cpumask_var(&allbutself, GFP_ATOMIC)) { 132 if (!alloc_cpumask_var(&allbutself, GFP_ATOMIC)) {
134 send_IPI_mask(mask, CALL_FUNCTION_VECTOR); 133 apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
135 return; 134 return;
136 } 135 }
137 136
@@ -140,9 +139,9 @@ void native_send_call_func_ipi(const struct cpumask *mask)
140 139
141 if (cpumask_equal(mask, allbutself) && 140 if (cpumask_equal(mask, allbutself) &&
142 cpumask_equal(cpu_online_mask, cpu_callout_mask)) 141 cpumask_equal(cpu_online_mask, cpu_callout_mask))
143 send_IPI_allbutself(CALL_FUNCTION_VECTOR); 142 apic->send_IPI_allbutself(CALL_FUNCTION_VECTOR);
144 else 143 else
145 send_IPI_mask(mask, CALL_FUNCTION_VECTOR); 144 apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
146 145
147 free_cpumask_var(allbutself); 146 free_cpumask_var(allbutself);
148} 147}
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index bb1a3b1fc87f..af57f88186e7 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -2,7 +2,7 @@
2 * x86 SMP booting functions 2 * x86 SMP booting functions
3 * 3 *
4 * (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk> 4 * (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
5 * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com> 5 * (c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
6 * Copyright 2001 Andi Kleen, SuSE Labs. 6 * Copyright 2001 Andi Kleen, SuSE Labs.
7 * 7 *
8 * Much of the core SMP work is based on previous work by Thomas Radke, to 8 * Much of the core SMP work is based on previous work by Thomas Radke, to
@@ -53,7 +53,6 @@
53#include <asm/nmi.h> 53#include <asm/nmi.h>
54#include <asm/irq.h> 54#include <asm/irq.h>
55#include <asm/idle.h> 55#include <asm/idle.h>
56#include <asm/smp.h>
57#include <asm/trampoline.h> 56#include <asm/trampoline.h>
58#include <asm/cpu.h> 57#include <asm/cpu.h>
59#include <asm/numa.h> 58#include <asm/numa.h>
@@ -63,11 +62,11 @@
63#include <asm/vmi.h> 62#include <asm/vmi.h>
64#include <asm/genapic.h> 63#include <asm/genapic.h>
65#include <asm/setup.h> 64#include <asm/setup.h>
65#include <asm/uv/uv.h>
66#include <linux/mc146818rtc.h> 66#include <linux/mc146818rtc.h>
67 67
68#include <mach_apic.h> 68#include <asm/genapic.h>
69#include <mach_wakecpu.h> 69#include <asm/smpboot_hooks.h>
70#include <smpboot_hooks.h>
71 70
72#ifdef CONFIG_X86_32 71#ifdef CONFIG_X86_32
73u8 apicid_2_node[MAX_APICID]; 72u8 apicid_2_node[MAX_APICID];
@@ -163,7 +162,7 @@ static void map_cpu_to_logical_apicid(void)
163{ 162{
164 int cpu = smp_processor_id(); 163 int cpu = smp_processor_id();
165 int apicid = logical_smp_processor_id(); 164 int apicid = logical_smp_processor_id();
166 int node = apicid_to_node(apicid); 165 int node = apic->apicid_to_node(apicid);
167 166
168 if (!node_online(node)) 167 if (!node_online(node))
169 node = first_online_node; 168 node = first_online_node;
@@ -196,7 +195,8 @@ static void __cpuinit smp_callin(void)
196 * our local APIC. We have to wait for the IPI or we'll 195 * our local APIC. We have to wait for the IPI or we'll
197 * lock up on an APIC access. 196 * lock up on an APIC access.
198 */ 197 */
199 wait_for_init_deassert(&init_deasserted); 198 if (apic->wait_for_init_deassert)
199 apic->wait_for_init_deassert(&init_deasserted);
200 200
201 /* 201 /*
202 * (This works even if the APIC is not enabled.) 202 * (This works even if the APIC is not enabled.)
@@ -243,7 +243,8 @@ static void __cpuinit smp_callin(void)
243 */ 243 */
244 244
245 pr_debug("CALLIN, before setup_local_APIC().\n"); 245 pr_debug("CALLIN, before setup_local_APIC().\n");
246 smp_callin_clear_local_apic(); 246 if (apic->smp_callin_clear_local_apic)
247 apic->smp_callin_clear_local_apic();
247 setup_local_APIC(); 248 setup_local_APIC();
248 end_local_APIC_setup(); 249 end_local_APIC_setup();
249 map_cpu_to_logical_apicid(); 250 map_cpu_to_logical_apicid();
@@ -583,7 +584,7 @@ wakeup_secondary_cpu_via_nmi(int logical_apicid, unsigned long start_eip)
583 /* Target chip */ 584 /* Target chip */
584 /* Boot on the stack */ 585 /* Boot on the stack */
585 /* Kick the second */ 586 /* Kick the second */
586 apic_icr_write(APIC_DM_NMI | APIC_DEST_LOGICAL, logical_apicid); 587 apic_icr_write(APIC_DM_NMI | apic->dest_logical, logical_apicid);
587 588
588 pr_debug("Waiting for send to finish...\n"); 589 pr_debug("Waiting for send to finish...\n");
589 send_status = safe_apic_wait_icr_idle(); 590 send_status = safe_apic_wait_icr_idle();
@@ -745,57 +746,11 @@ static void __cpuinit do_fork_idle(struct work_struct *work)
745 complete(&c_idle->done); 746 complete(&c_idle->done);
746} 747}
747 748
748#ifdef CONFIG_X86_64
749
750/* __ref because it's safe to call free_bootmem when after_bootmem == 0. */
751static void __ref free_bootmem_pda(struct x8664_pda *oldpda)
752{
753 if (!after_bootmem)
754 free_bootmem((unsigned long)oldpda, sizeof(*oldpda));
755}
756
757/*
758 * Allocate node local memory for the AP pda.
759 *
760 * Must be called after the _cpu_pda pointer table is initialized.
761 */
762int __cpuinit get_local_pda(int cpu)
763{
764 struct x8664_pda *oldpda, *newpda;
765 unsigned long size = sizeof(struct x8664_pda);
766 int node = cpu_to_node(cpu);
767
768 if (cpu_pda(cpu) && !cpu_pda(cpu)->in_bootmem)
769 return 0;
770
771 oldpda = cpu_pda(cpu);
772 newpda = kmalloc_node(size, GFP_ATOMIC, node);
773 if (!newpda) {
774 printk(KERN_ERR "Could not allocate node local PDA "
775 "for CPU %d on node %d\n", cpu, node);
776
777 if (oldpda)
778 return 0; /* have a usable pda */
779 else
780 return -1;
781 }
782
783 if (oldpda) {
784 memcpy(newpda, oldpda, size);
785 free_bootmem_pda(oldpda);
786 }
787
788 newpda->in_bootmem = 0;
789 cpu_pda(cpu) = newpda;
790 return 0;
791}
792#endif /* CONFIG_X86_64 */
793
794static int __cpuinit do_boot_cpu(int apicid, int cpu) 749static int __cpuinit do_boot_cpu(int apicid, int cpu)
795/* 750/*
796 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad 751 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
797 * (ie clustered apic addressing mode), this is a LOGICAL apic ID. 752 * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
798 * Returns zero if CPU booted OK, else error code from wakeup_secondary_cpu. 753 * Returns zero if CPU booted OK, else error code from ->wakeup_cpu.
799 */ 754 */
800{ 755{
801 unsigned long boot_error = 0; 756 unsigned long boot_error = 0;
@@ -808,16 +763,6 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
808 }; 763 };
809 INIT_WORK(&c_idle.work, do_fork_idle); 764 INIT_WORK(&c_idle.work, do_fork_idle);
810 765
811#ifdef CONFIG_X86_64
812 /* Allocate node local memory for AP pdas */
813 if (cpu > 0) {
814 boot_error = get_local_pda(cpu);
815 if (boot_error)
816 goto restore_state;
817 /* if can't get pda memory, can't start cpu */
818 }
819#endif
820
821 alternatives_smp_switch(1); 766 alternatives_smp_switch(1);
822 767
823 c_idle.idle = get_idle_for_cpu(cpu); 768 c_idle.idle = get_idle_for_cpu(cpu);
@@ -847,14 +792,16 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
847 792
848 set_idle_for_cpu(cpu, c_idle.idle); 793 set_idle_for_cpu(cpu, c_idle.idle);
849do_rest: 794do_rest:
850#ifdef CONFIG_X86_32
851 per_cpu(current_task, cpu) = c_idle.idle; 795 per_cpu(current_task, cpu) = c_idle.idle;
852 init_gdt(cpu); 796#ifdef CONFIG_X86_32
853 /* Stack for startup_32 can be just as for start_secondary onwards */ 797 /* Stack for startup_32 can be just as for start_secondary onwards */
854 irq_ctx_init(cpu); 798 irq_ctx_init(cpu);
855#else 799#else
856 cpu_pda(cpu)->pcurrent = c_idle.idle;
857 clear_tsk_thread_flag(c_idle.idle, TIF_FORK); 800 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
801 initial_gs = per_cpu_offset(cpu);
802 per_cpu(kernel_stack, cpu) =
803 (unsigned long)task_stack_page(c_idle.idle) -
804 KERNEL_STACK_OFFSET + THREAD_SIZE;
858#endif 805#endif
859 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); 806 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
860 initial_code = (unsigned long)start_secondary; 807 initial_code = (unsigned long)start_secondary;
@@ -878,7 +825,8 @@ do_rest:
878 825
879 pr_debug("Setting warm reset code and vector.\n"); 826 pr_debug("Setting warm reset code and vector.\n");
880 827
881 store_NMI_vector(&nmi_high, &nmi_low); 828 if (apic->store_NMI_vector)
829 apic->store_NMI_vector(&nmi_high, &nmi_low);
882 830
883 smpboot_setup_warm_reset_vector(start_ip); 831 smpboot_setup_warm_reset_vector(start_ip);
884 /* 832 /*
@@ -893,7 +841,7 @@ do_rest:
893 /* 841 /*
894 * Starting actual IPI sequence... 842 * Starting actual IPI sequence...
895 */ 843 */
896 boot_error = wakeup_secondary_cpu(apicid, start_ip); 844 boot_error = apic->wakeup_cpu(apicid, start_ip);
897 845
898 if (!boot_error) { 846 if (!boot_error) {
899 /* 847 /*
@@ -927,13 +875,11 @@ do_rest:
927 else 875 else
928 /* trampoline code not run */ 876 /* trampoline code not run */
929 printk(KERN_ERR "Not responding.\n"); 877 printk(KERN_ERR "Not responding.\n");
930 if (get_uv_system_type() != UV_NON_UNIQUE_APIC) 878 if (apic->inquire_remote_apic)
931 inquire_remote_apic(apicid); 879 apic->inquire_remote_apic(apicid);
932 } 880 }
933 } 881 }
934#ifdef CONFIG_X86_64 882
935restore_state:
936#endif
937 if (boot_error) { 883 if (boot_error) {
938 /* Try to put things back the way they were before ... */ 884 /* Try to put things back the way they were before ... */
939 numa_remove_cpu(cpu); /* was set by numa_add_cpu */ 885 numa_remove_cpu(cpu); /* was set by numa_add_cpu */
@@ -961,7 +907,7 @@ restore_state:
961 907
962int __cpuinit native_cpu_up(unsigned int cpu) 908int __cpuinit native_cpu_up(unsigned int cpu)
963{ 909{
964 int apicid = cpu_present_to_apicid(cpu); 910 int apicid = apic->cpu_present_to_apicid(cpu);
965 unsigned long flags; 911 unsigned long flags;
966 int err; 912 int err;
967 913
@@ -1054,14 +1000,14 @@ static int __init smp_sanity_check(unsigned max_cpus)
1054{ 1000{
1055 preempt_disable(); 1001 preempt_disable();
1056 1002
1057#if defined(CONFIG_X86_PC) && defined(CONFIG_X86_32) 1003#if !defined(CONFIG_X86_BIGSMP) && defined(CONFIG_X86_32)
1058 if (def_to_bigsmp && nr_cpu_ids > 8) { 1004 if (def_to_bigsmp && nr_cpu_ids > 8) {
1059 unsigned int cpu; 1005 unsigned int cpu;
1060 unsigned nr; 1006 unsigned nr;
1061 1007
1062 printk(KERN_WARNING 1008 printk(KERN_WARNING
1063 "More than 8 CPUs detected - skipping them.\n" 1009 "More than 8 CPUs detected - skipping them.\n"
1064 "Use CONFIG_X86_GENERICARCH and CONFIG_X86_BIGSMP.\n"); 1010 "Use CONFIG_X86_BIGSMP.\n");
1065 1011
1066 nr = 0; 1012 nr = 0;
1067 for_each_present_cpu(cpu) { 1013 for_each_present_cpu(cpu) {
@@ -1107,7 +1053,7 @@ static int __init smp_sanity_check(unsigned max_cpus)
1107 * Should not be necessary because the MP table should list the boot 1053 * Should not be necessary because the MP table should list the boot
1108 * CPU too, but we do it for the sake of robustness anyway. 1054 * CPU too, but we do it for the sake of robustness anyway.
1109 */ 1055 */
1110 if (!check_phys_apicid_present(boot_cpu_physical_apicid)) { 1056 if (!apic->check_phys_apicid_present(boot_cpu_physical_apicid)) {
1111 printk(KERN_NOTICE 1057 printk(KERN_NOTICE
1112 "weird, boot CPU (#%d) not listed by the BIOS.\n", 1058 "weird, boot CPU (#%d) not listed by the BIOS.\n",
1113 boot_cpu_physical_apicid); 1059 boot_cpu_physical_apicid);
@@ -1125,6 +1071,7 @@ static int __init smp_sanity_check(unsigned max_cpus)
1125 printk(KERN_ERR "... forcing use of dummy APIC emulation." 1071 printk(KERN_ERR "... forcing use of dummy APIC emulation."
1126 "(tell your hw vendor)\n"); 1072 "(tell your hw vendor)\n");
1127 smpboot_clear_io_apic(); 1073 smpboot_clear_io_apic();
1074 arch_disable_smp_support();
1128 return -1; 1075 return -1;
1129 } 1076 }
1130 1077
@@ -1183,7 +1130,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
1183 1130
1184#ifdef CONFIG_X86_64 1131#ifdef CONFIG_X86_64
1185 enable_IR_x2apic(); 1132 enable_IR_x2apic();
1186 setup_apic_routing(); 1133 default_setup_apic_routing();
1187#endif 1134#endif
1188 1135
1189 if (smp_sanity_check(max_cpus) < 0) { 1136 if (smp_sanity_check(max_cpus) < 0) {
@@ -1218,7 +1165,8 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
1218 1165
1219 map_cpu_to_logical_apicid(); 1166 map_cpu_to_logical_apicid();
1220 1167
1221 setup_portio_remap(); 1168 if (apic->setup_portio_remap)
1169 apic->setup_portio_remap();
1222 1170
1223 smpboot_setup_io_apic(); 1171 smpboot_setup_io_apic();
1224 /* 1172 /*
@@ -1240,10 +1188,7 @@ out:
1240void __init native_smp_prepare_boot_cpu(void) 1188void __init native_smp_prepare_boot_cpu(void)
1241{ 1189{
1242 int me = smp_processor_id(); 1190 int me = smp_processor_id();
1243#ifdef CONFIG_X86_32 1191 switch_to_new_gdt(me);
1244 init_gdt(me);
1245#endif
1246 switch_to_new_gdt();
1247 /* already set me in cpu_online_mask in boot_cpu_init() */ 1192 /* already set me in cpu_online_mask in boot_cpu_init() */
1248 cpumask_set_cpu(me, cpu_callout_mask); 1193 cpumask_set_cpu(me, cpu_callout_mask);
1249 per_cpu(cpu_state, me) = CPU_ONLINE; 1194 per_cpu(cpu_state, me) = CPU_ONLINE;
diff --git a/arch/x86/kernel/smpcommon.c b/arch/x86/kernel/smpcommon.c
deleted file mode 100644
index 397e309839dd..000000000000
--- a/arch/x86/kernel/smpcommon.c
+++ /dev/null
@@ -1,30 +0,0 @@
1/*
2 * SMP stuff which is common to all sub-architectures.
3 */
4#include <linux/module.h>
5#include <asm/smp.h>
6
7#ifdef CONFIG_X86_32
8DEFINE_PER_CPU(unsigned long, this_cpu_off);
9EXPORT_PER_CPU_SYMBOL(this_cpu_off);
10
11/*
12 * Initialize the CPU's GDT. This is either the boot CPU doing itself
13 * (still using the master per-cpu area), or a CPU doing it for a
14 * secondary which will soon come up.
15 */
16__cpuinit void init_gdt(int cpu)
17{
18 struct desc_struct gdt;
19
20 pack_descriptor(&gdt, __per_cpu_offset[cpu], 0xFFFFF,
21 0x2 | DESCTYPE_S, 0x8);
22 gdt.s = 1;
23
24 write_gdt_entry(get_cpu_gdt_table(cpu),
25 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
26
27 per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu];
28 per_cpu(cpu_number, cpu) = cpu;
29}
30#endif
diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c
index 10786af95545..f7bddc2e37d1 100644
--- a/arch/x86/kernel/stacktrace.c
+++ b/arch/x86/kernel/stacktrace.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Stack trace management functions 2 * Stack trace management functions
3 * 3 *
4 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 4 * Copyright (C) 2006-2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 */ 5 */
6#include <linux/sched.h> 6#include <linux/sched.h>
7#include <linux/stacktrace.h> 7#include <linux/stacktrace.h>
diff --git a/arch/x86/kernel/summit_32.c b/arch/x86/kernel/summit_32.c
index 7b987852e876..1e733eff9b33 100644
--- a/arch/x86/kernel/summit_32.c
+++ b/arch/x86/kernel/summit_32.c
@@ -30,8 +30,364 @@
30#include <linux/init.h> 30#include <linux/init.h>
31#include <asm/io.h> 31#include <asm/io.h>
32#include <asm/bios_ebda.h> 32#include <asm/bios_ebda.h>
33#include <asm/summit/mpparse.h>
34 33
34/*
35 * APIC driver for the IBM "Summit" chipset.
36 */
37#define APIC_DEFINITION 1
38#include <linux/threads.h>
39#include <linux/cpumask.h>
40#include <asm/mpspec.h>
41#include <asm/apic.h>
42#include <asm/smp.h>
43#include <asm/genapic.h>
44#include <asm/fixmap.h>
45#include <asm/apicdef.h>
46#include <asm/ipi.h>
47#include <linux/kernel.h>
48#include <linux/string.h>
49#include <linux/init.h>
50#include <linux/gfp.h>
51#include <linux/smp.h>
52
53static inline unsigned summit_get_apic_id(unsigned long x)
54{
55 return (x >> 24) & 0xFF;
56}
57
58static inline void summit_send_IPI_mask(const cpumask_t *mask, int vector)
59{
60 default_send_IPI_mask_sequence_logical(mask, vector);
61}
62
63static inline void summit_send_IPI_allbutself(int vector)
64{
65 cpumask_t mask = cpu_online_map;
66 cpu_clear(smp_processor_id(), mask);
67
68 if (!cpus_empty(mask))
69 summit_send_IPI_mask(&mask, vector);
70}
71
72static inline void summit_send_IPI_all(int vector)
73{
74 summit_send_IPI_mask(&cpu_online_map, vector);
75}
76
77#include <asm/tsc.h>
78
79extern int use_cyclone;
80
81#ifdef CONFIG_X86_SUMMIT_NUMA
82extern void setup_summit(void);
83#else
84#define setup_summit() {}
85#endif
86
87static inline int
88summit_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
89{
90 if (!strncmp(oem, "IBM ENSW", 8) &&
91 (!strncmp(productid, "VIGIL SMP", 9)
92 || !strncmp(productid, "EXA", 3)
93 || !strncmp(productid, "RUTHLESS SMP", 12))){
94 mark_tsc_unstable("Summit based system");
95 use_cyclone = 1; /*enable cyclone-timer*/
96 setup_summit();
97 return 1;
98 }
99 return 0;
100}
101
102/* Hook from generic ACPI tables.c */
103static inline int summit_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
104{
105 if (!strncmp(oem_id, "IBM", 3) &&
106 (!strncmp(oem_table_id, "SERVIGIL", 8)
107 || !strncmp(oem_table_id, "EXA", 3))){
108 mark_tsc_unstable("Summit based system");
109 use_cyclone = 1; /*enable cyclone-timer*/
110 setup_summit();
111 return 1;
112 }
113 return 0;
114}
115
116struct rio_table_hdr {
117 unsigned char version; /* Version number of this data structure */
118 /* Version 3 adds chassis_num & WP_index */
119 unsigned char num_scal_dev; /* # of Scalability devices (Twisters for Vigil) */
120 unsigned char num_rio_dev; /* # of RIO I/O devices (Cyclones and Winnipegs) */
121} __attribute__((packed));
122
123struct scal_detail {
124 unsigned char node_id; /* Scalability Node ID */
125 unsigned long CBAR; /* Address of 1MB register space */
126 unsigned char port0node; /* Node ID port connected to: 0xFF=None */
127 unsigned char port0port; /* Port num port connected to: 0,1,2, or 0xFF=None */
128 unsigned char port1node; /* Node ID port connected to: 0xFF = None */
129 unsigned char port1port; /* Port num port connected to: 0,1,2, or 0xFF=None */
130 unsigned char port2node; /* Node ID port connected to: 0xFF = None */
131 unsigned char port2port; /* Port num port connected to: 0,1,2, or 0xFF=None */
132 unsigned char chassis_num; /* 1 based Chassis number (1 = boot node) */
133} __attribute__((packed));
134
135struct rio_detail {
136 unsigned char node_id; /* RIO Node ID */
137 unsigned long BBAR; /* Address of 1MB register space */
138 unsigned char type; /* Type of device */
139 unsigned char owner_id; /* For WPEG: Node ID of Cyclone that owns this WPEG*/
140 /* For CYC: Node ID of Twister that owns this CYC */
141 unsigned char port0node; /* Node ID port connected to: 0xFF=None */
142 unsigned char port0port; /* Port num port connected to: 0,1,2, or 0xFF=None */
143 unsigned char port1node; /* Node ID port connected to: 0xFF=None */
144 unsigned char port1port; /* Port num port connected to: 0,1,2, or 0xFF=None */
145 unsigned char first_slot; /* For WPEG: Lowest slot number below this WPEG */
146 /* For CYC: 0 */
147 unsigned char status; /* For WPEG: Bit 0 = 1 : the XAPIC is used */
148 /* = 0 : the XAPIC is not used, ie:*/
149 /* ints fwded to another XAPIC */
150 /* Bits1:7 Reserved */
151 /* For CYC: Bits0:7 Reserved */
152 unsigned char WP_index; /* For WPEG: WPEG instance index - lower ones have */
153 /* lower slot numbers/PCI bus numbers */
154 /* For CYC: No meaning */
155 unsigned char chassis_num; /* 1 based Chassis number */
156 /* For LookOut WPEGs this field indicates the */
157 /* Expansion Chassis #, enumerated from Boot */
158 /* Node WPEG external port, then Boot Node CYC */
159 /* external port, then Next Vigil chassis WPEG */
160 /* external port, etc. */
161 /* Shared Lookouts have only 1 chassis number (the */
162 /* first one assigned) */
163} __attribute__((packed));
164
165
166typedef enum {
167 CompatTwister = 0, /* Compatibility Twister */
168 AltTwister = 1, /* Alternate Twister of internal 8-way */
169 CompatCyclone = 2, /* Compatibility Cyclone */
170 AltCyclone = 3, /* Alternate Cyclone of internal 8-way */
171 CompatWPEG = 4, /* Compatibility WPEG */
172 AltWPEG = 5, /* Second Planar WPEG */
173 LookOutAWPEG = 6, /* LookOut WPEG */
174 LookOutBWPEG = 7, /* LookOut WPEG */
175} node_type;
176
177static inline int is_WPEG(struct rio_detail *rio){
178 return (rio->type == CompatWPEG || rio->type == AltWPEG ||
179 rio->type == LookOutAWPEG || rio->type == LookOutBWPEG);
180}
181
182
183/* In clustered mode, the high nibble of APIC ID is a cluster number.
184 * The low nibble is a 4-bit bitmap. */
185#define XAPIC_DEST_CPUS_SHIFT 4
186#define XAPIC_DEST_CPUS_MASK ((1u << XAPIC_DEST_CPUS_SHIFT) - 1)
187#define XAPIC_DEST_CLUSTER_MASK (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT)
188
189#define SUMMIT_APIC_DFR_VALUE (APIC_DFR_CLUSTER)
190
191static inline const cpumask_t *summit_target_cpus(void)
192{
193 /* CPU_MASK_ALL (0xff) has undefined behaviour with
194 * dest_LowestPrio mode logical clustered apic interrupt routing
195 * Just start on cpu 0. IRQ balancing will spread load
196 */
197 return &cpumask_of_cpu(0);
198}
199
200static inline unsigned long
201summit_check_apicid_used(physid_mask_t bitmap, int apicid)
202{
203 return 0;
204}
205
206/* we don't use the phys_cpu_present_map to indicate apicid presence */
207static inline unsigned long summit_check_apicid_present(int bit)
208{
209 return 1;
210}
211
212#define apicid_cluster(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK)
213
214extern u8 cpu_2_logical_apicid[];
215
216static inline void summit_init_apic_ldr(void)
217{
218 unsigned long val, id;
219 int count = 0;
220 u8 my_id = (u8)hard_smp_processor_id();
221 u8 my_cluster = (u8)apicid_cluster(my_id);
222#ifdef CONFIG_SMP
223 u8 lid;
224 int i;
225
226 /* Create logical APIC IDs by counting CPUs already in cluster. */
227 for (count = 0, i = nr_cpu_ids; --i >= 0; ) {
228 lid = cpu_2_logical_apicid[i];
229 if (lid != BAD_APICID && apicid_cluster(lid) == my_cluster)
230 ++count;
231 }
232#endif
233 /* We only have a 4 wide bitmap in cluster mode. If a deranged
234 * BIOS puts 5 CPUs in one APIC cluster, we're hosed. */
235 BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT);
236 id = my_cluster | (1UL << count);
237 apic_write(APIC_DFR, SUMMIT_APIC_DFR_VALUE);
238 val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
239 val |= SET_APIC_LOGICAL_ID(id);
240 apic_write(APIC_LDR, val);
241}
242
243static inline int summit_apic_id_registered(void)
244{
245 return 1;
246}
247
248static inline void summit_setup_apic_routing(void)
249{
250 printk("Enabling APIC mode: Summit. Using %d I/O APICs\n",
251 nr_ioapics);
252}
253
254static inline int summit_apicid_to_node(int logical_apicid)
255{
256#ifdef CONFIG_SMP
257 return apicid_2_node[hard_smp_processor_id()];
258#else
259 return 0;
260#endif
261}
262
263/* Mapping from cpu number to logical apicid */
264static inline int summit_cpu_to_logical_apicid(int cpu)
265{
266#ifdef CONFIG_SMP
267 if (cpu >= nr_cpu_ids)
268 return BAD_APICID;
269 return (int)cpu_2_logical_apicid[cpu];
270#else
271 return logical_smp_processor_id();
272#endif
273}
274
275static inline int summit_cpu_present_to_apicid(int mps_cpu)
276{
277 if (mps_cpu < nr_cpu_ids)
278 return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
279 else
280 return BAD_APICID;
281}
282
283static inline physid_mask_t
284summit_ioapic_phys_id_map(physid_mask_t phys_id_map)
285{
286 /* For clustered we don't have a good way to do this yet - hack */
287 return physids_promote(0x0F);
288}
289
290static inline physid_mask_t summit_apicid_to_cpu_present(int apicid)
291{
292 return physid_mask_of_physid(0);
293}
294
295static inline void summit_setup_portio_remap(void)
296{
297}
298
299static inline int summit_check_phys_apicid_present(int boot_cpu_physical_apicid)
300{
301 return 1;
302}
303
304static inline unsigned int summit_cpu_mask_to_apicid(const cpumask_t *cpumask)
305{
306 int cpus_found = 0;
307 int num_bits_set;
308 int apicid;
309 int cpu;
310
311 num_bits_set = cpus_weight(*cpumask);
312 /* Return id to all */
313 if (num_bits_set >= nr_cpu_ids)
314 return 0xFF;
315 /*
316 * The cpus in the mask must all be on the apic cluster. If are not
317 * on the same apicid cluster return default value of target_cpus():
318 */
319 cpu = first_cpu(*cpumask);
320 apicid = summit_cpu_to_logical_apicid(cpu);
321
322 while (cpus_found < num_bits_set) {
323 if (cpu_isset(cpu, *cpumask)) {
324 int new_apicid = summit_cpu_to_logical_apicid(cpu);
325
326 if (apicid_cluster(apicid) !=
327 apicid_cluster(new_apicid)) {
328 printk ("%s: Not a valid mask!\n", __func__);
329
330 return 0xFF;
331 }
332 apicid = apicid | new_apicid;
333 cpus_found++;
334 }
335 cpu++;
336 }
337 return apicid;
338}
339
340static inline unsigned int
341summit_cpu_mask_to_apicid_and(const struct cpumask *inmask,
342 const struct cpumask *andmask)
343{
344 int apicid = summit_cpu_to_logical_apicid(0);
345 cpumask_var_t cpumask;
346
347 if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
348 return apicid;
349
350 cpumask_and(cpumask, inmask, andmask);
351 cpumask_and(cpumask, cpumask, cpu_online_mask);
352 apicid = summit_cpu_mask_to_apicid(cpumask);
353
354 free_cpumask_var(cpumask);
355
356 return apicid;
357}
358
359/*
360 * cpuid returns the value latched in the HW at reset, not the APIC ID
361 * register's value. For any box whose BIOS changes APIC IDs, like
362 * clustered APIC systems, we must use hard_smp_processor_id.
363 *
364 * See Intel's IA-32 SW Dev's Manual Vol2 under CPUID.
365 */
366static inline int summit_phys_pkg_id(int cpuid_apic, int index_msb)
367{
368 return hard_smp_processor_id() >> index_msb;
369}
370
371static int probe_summit(void)
372{
373 /* probed later in mptable/ACPI hooks */
374 return 0;
375}
376
377static void summit_vector_allocation_domain(int cpu, cpumask_t *retmask)
378{
379 /* Careful. Some cpus do not strictly honor the set of cpus
380 * specified in the interrupt destination when using lowest
381 * priority interrupt delivery mode.
382 *
383 * In particular there was a hyperthreading cpu observed to
384 * deliver interrupts to the wrong hyperthread when only one
385 * hyperthread was specified in the interrupt desitination.
386 */
387 *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
388}
389
390#ifdef CONFIG_X86_SUMMIT_NUMA
35static struct rio_table_hdr *rio_table_hdr __initdata; 391static struct rio_table_hdr *rio_table_hdr __initdata;
36static struct scal_detail *scal_devs[MAX_NUMNODES] __initdata; 392static struct scal_detail *scal_devs[MAX_NUMNODES] __initdata;
37static struct rio_detail *rio_devs[MAX_NUMNODES*4] __initdata; 393static struct rio_detail *rio_devs[MAX_NUMNODES*4] __initdata;
@@ -186,3 +542,61 @@ void __init setup_summit(void)
186 next_wpeg = 0; 542 next_wpeg = 0;
187 } while (next_wpeg != 0); 543 } while (next_wpeg != 0);
188} 544}
545#endif
546
547struct genapic apic_summit = {
548
549 .name = "summit",
550 .probe = probe_summit,
551 .acpi_madt_oem_check = summit_acpi_madt_oem_check,
552 .apic_id_registered = summit_apic_id_registered,
553
554 .irq_delivery_mode = dest_LowestPrio,
555 /* logical delivery broadcast to all CPUs: */
556 .irq_dest_mode = 1,
557
558 .target_cpus = summit_target_cpus,
559 .disable_esr = 1,
560 .dest_logical = APIC_DEST_LOGICAL,
561 .check_apicid_used = summit_check_apicid_used,
562 .check_apicid_present = summit_check_apicid_present,
563
564 .vector_allocation_domain = summit_vector_allocation_domain,
565 .init_apic_ldr = summit_init_apic_ldr,
566
567 .ioapic_phys_id_map = summit_ioapic_phys_id_map,
568 .setup_apic_routing = summit_setup_apic_routing,
569 .multi_timer_check = NULL,
570 .apicid_to_node = summit_apicid_to_node,
571 .cpu_to_logical_apicid = summit_cpu_to_logical_apicid,
572 .cpu_present_to_apicid = summit_cpu_present_to_apicid,
573 .apicid_to_cpu_present = summit_apicid_to_cpu_present,
574 .setup_portio_remap = NULL,
575 .check_phys_apicid_present = summit_check_phys_apicid_present,
576 .enable_apic_mode = NULL,
577 .phys_pkg_id = summit_phys_pkg_id,
578 .mps_oem_check = summit_mps_oem_check,
579
580 .get_apic_id = summit_get_apic_id,
581 .set_apic_id = NULL,
582 .apic_id_mask = 0xFF << 24,
583
584 .cpu_mask_to_apicid = summit_cpu_mask_to_apicid,
585 .cpu_mask_to_apicid_and = summit_cpu_mask_to_apicid_and,
586
587 .send_IPI_mask = summit_send_IPI_mask,
588 .send_IPI_mask_allbutself = NULL,
589 .send_IPI_allbutself = summit_send_IPI_allbutself,
590 .send_IPI_all = summit_send_IPI_all,
591 .send_IPI_self = default_send_IPI_self,
592
593 .wakeup_cpu = NULL,
594 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
595 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
596
597 .wait_for_init_deassert = default_wait_for_init_deassert,
598
599 .smp_callin_clear_local_apic = NULL,
600 .store_NMI_vector = NULL,
601 .inquire_remote_apic = default_inquire_remote_apic,
602};
diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
index e2e86a08f31d..3bdb64829b82 100644
--- a/arch/x86/kernel/syscall_table_32.S
+++ b/arch/x86/kernel/syscall_table_32.S
@@ -1,7 +1,7 @@
1ENTRY(sys_call_table) 1ENTRY(sys_call_table)
2 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */ 2 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
3 .long sys_exit 3 .long sys_exit
4 .long sys_fork 4 .long ptregs_fork
5 .long sys_read 5 .long sys_read
6 .long sys_write 6 .long sys_write
7 .long sys_open /* 5 */ 7 .long sys_open /* 5 */
@@ -10,7 +10,7 @@ ENTRY(sys_call_table)
10 .long sys_creat 10 .long sys_creat
11 .long sys_link 11 .long sys_link
12 .long sys_unlink /* 10 */ 12 .long sys_unlink /* 10 */
13 .long sys_execve 13 .long ptregs_execve
14 .long sys_chdir 14 .long sys_chdir
15 .long sys_time 15 .long sys_time
16 .long sys_mknod 16 .long sys_mknod
@@ -109,17 +109,17 @@ ENTRY(sys_call_table)
109 .long sys_newlstat 109 .long sys_newlstat
110 .long sys_newfstat 110 .long sys_newfstat
111 .long sys_uname 111 .long sys_uname
112 .long sys_iopl /* 110 */ 112 .long ptregs_iopl /* 110 */
113 .long sys_vhangup 113 .long sys_vhangup
114 .long sys_ni_syscall /* old "idle" system call */ 114 .long sys_ni_syscall /* old "idle" system call */
115 .long sys_vm86old 115 .long ptregs_vm86old
116 .long sys_wait4 116 .long sys_wait4
117 .long sys_swapoff /* 115 */ 117 .long sys_swapoff /* 115 */
118 .long sys_sysinfo 118 .long sys_sysinfo
119 .long sys_ipc 119 .long sys_ipc
120 .long sys_fsync 120 .long sys_fsync
121 .long sys_sigreturn 121 .long ptregs_sigreturn
122 .long sys_clone /* 120 */ 122 .long ptregs_clone /* 120 */
123 .long sys_setdomainname 123 .long sys_setdomainname
124 .long sys_newuname 124 .long sys_newuname
125 .long sys_modify_ldt 125 .long sys_modify_ldt
@@ -165,14 +165,14 @@ ENTRY(sys_call_table)
165 .long sys_mremap 165 .long sys_mremap
166 .long sys_setresuid16 166 .long sys_setresuid16
167 .long sys_getresuid16 /* 165 */ 167 .long sys_getresuid16 /* 165 */
168 .long sys_vm86 168 .long ptregs_vm86
169 .long sys_ni_syscall /* Old sys_query_module */ 169 .long sys_ni_syscall /* Old sys_query_module */
170 .long sys_poll 170 .long sys_poll
171 .long sys_nfsservctl 171 .long sys_nfsservctl
172 .long sys_setresgid16 /* 170 */ 172 .long sys_setresgid16 /* 170 */
173 .long sys_getresgid16 173 .long sys_getresgid16
174 .long sys_prctl 174 .long sys_prctl
175 .long sys_rt_sigreturn 175 .long ptregs_rt_sigreturn
176 .long sys_rt_sigaction 176 .long sys_rt_sigaction
177 .long sys_rt_sigprocmask /* 175 */ 177 .long sys_rt_sigprocmask /* 175 */
178 .long sys_rt_sigpending 178 .long sys_rt_sigpending
@@ -185,11 +185,11 @@ ENTRY(sys_call_table)
185 .long sys_getcwd 185 .long sys_getcwd
186 .long sys_capget 186 .long sys_capget
187 .long sys_capset /* 185 */ 187 .long sys_capset /* 185 */
188 .long sys_sigaltstack 188 .long ptregs_sigaltstack
189 .long sys_sendfile 189 .long sys_sendfile
190 .long sys_ni_syscall /* reserved for streams1 */ 190 .long sys_ni_syscall /* reserved for streams1 */
191 .long sys_ni_syscall /* reserved for streams2 */ 191 .long sys_ni_syscall /* reserved for streams2 */
192 .long sys_vfork /* 190 */ 192 .long ptregs_vfork /* 190 */
193 .long sys_getrlimit 193 .long sys_getrlimit
194 .long sys_mmap2 194 .long sys_mmap2
195 .long sys_truncate64 195 .long sys_truncate64
diff --git a/arch/x86/kernel/time_32.c b/arch/x86/kernel/time_32.c
index 3985cac0ed47..764c74e871f2 100644
--- a/arch/x86/kernel/time_32.c
+++ b/arch/x86/kernel/time_32.c
@@ -38,7 +38,7 @@
38#include <asm/time.h> 38#include <asm/time.h>
39#include <asm/timer.h> 39#include <asm/timer.h>
40 40
41#include "do_timer.h" 41#include <asm/do_timer.h>
42 42
43int timer_ack; 43int timer_ack;
44 44
diff --git a/arch/x86/kernel/tlb_32.c b/arch/x86/kernel/tlb_32.c
deleted file mode 100644
index ce5054642247..000000000000
--- a/arch/x86/kernel/tlb_32.c
+++ /dev/null
@@ -1,256 +0,0 @@
1#include <linux/spinlock.h>
2#include <linux/cpu.h>
3#include <linux/interrupt.h>
4
5#include <asm/tlbflush.h>
6
7DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate)
8 ____cacheline_aligned = { &init_mm, 0, };
9
10/* must come after the send_IPI functions above for inlining */
11#include <mach_ipi.h>
12
13/*
14 * Smarter SMP flushing macros.
15 * c/o Linus Torvalds.
16 *
17 * These mean you can really definitely utterly forget about
18 * writing to user space from interrupts. (Its not allowed anyway).
19 *
20 * Optimizations Manfred Spraul <manfred@colorfullife.com>
21 */
22
23static cpumask_t flush_cpumask;
24static struct mm_struct *flush_mm;
25static unsigned long flush_va;
26static DEFINE_SPINLOCK(tlbstate_lock);
27
28/*
29 * We cannot call mmdrop() because we are in interrupt context,
30 * instead update mm->cpu_vm_mask.
31 *
32 * We need to reload %cr3 since the page tables may be going
33 * away from under us..
34 */
35void leave_mm(int cpu)
36{
37 BUG_ON(x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_OK);
38 cpu_clear(cpu, x86_read_percpu(cpu_tlbstate.active_mm)->cpu_vm_mask);
39 load_cr3(swapper_pg_dir);
40}
41EXPORT_SYMBOL_GPL(leave_mm);
42
43/*
44 *
45 * The flush IPI assumes that a thread switch happens in this order:
46 * [cpu0: the cpu that switches]
47 * 1) switch_mm() either 1a) or 1b)
48 * 1a) thread switch to a different mm
49 * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
50 * Stop ipi delivery for the old mm. This is not synchronized with
51 * the other cpus, but smp_invalidate_interrupt ignore flush ipis
52 * for the wrong mm, and in the worst case we perform a superfluous
53 * tlb flush.
54 * 1a2) set cpu_tlbstate to TLBSTATE_OK
55 * Now the smp_invalidate_interrupt won't call leave_mm if cpu0
56 * was in lazy tlb mode.
57 * 1a3) update cpu_tlbstate[].active_mm
58 * Now cpu0 accepts tlb flushes for the new mm.
59 * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
60 * Now the other cpus will send tlb flush ipis.
61 * 1a4) change cr3.
62 * 1b) thread switch without mm change
63 * cpu_tlbstate[].active_mm is correct, cpu0 already handles
64 * flush ipis.
65 * 1b1) set cpu_tlbstate to TLBSTATE_OK
66 * 1b2) test_and_set the cpu bit in cpu_vm_mask.
67 * Atomically set the bit [other cpus will start sending flush ipis],
68 * and test the bit.
69 * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
70 * 2) switch %%esp, ie current
71 *
72 * The interrupt must handle 2 special cases:
73 * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
74 * - the cpu performs speculative tlb reads, i.e. even if the cpu only
75 * runs in kernel space, the cpu could load tlb entries for user space
76 * pages.
77 *
78 * The good news is that cpu_tlbstate is local to each cpu, no
79 * write/read ordering problems.
80 */
81
82/*
83 * TLB flush IPI:
84 *
85 * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
86 * 2) Leave the mm if we are in the lazy tlb mode.
87 */
88
89void smp_invalidate_interrupt(struct pt_regs *regs)
90{
91 unsigned long cpu;
92
93 cpu = get_cpu();
94
95 if (!cpu_isset(cpu, flush_cpumask))
96 goto out;
97 /*
98 * This was a BUG() but until someone can quote me the
99 * line from the intel manual that guarantees an IPI to
100 * multiple CPUs is retried _only_ on the erroring CPUs
101 * its staying as a return
102 *
103 * BUG();
104 */
105
106 if (flush_mm == x86_read_percpu(cpu_tlbstate.active_mm)) {
107 if (x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_OK) {
108 if (flush_va == TLB_FLUSH_ALL)
109 local_flush_tlb();
110 else
111 __flush_tlb_one(flush_va);
112 } else
113 leave_mm(cpu);
114 }
115 ack_APIC_irq();
116 smp_mb__before_clear_bit();
117 cpu_clear(cpu, flush_cpumask);
118 smp_mb__after_clear_bit();
119out:
120 put_cpu_no_resched();
121 inc_irq_stat(irq_tlb_count);
122}
123
124void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
125 unsigned long va)
126{
127 cpumask_t cpumask = *cpumaskp;
128
129 /*
130 * A couple of (to be removed) sanity checks:
131 *
132 * - current CPU must not be in mask
133 * - mask must exist :)
134 */
135 BUG_ON(cpus_empty(cpumask));
136 BUG_ON(cpu_isset(smp_processor_id(), cpumask));
137 BUG_ON(!mm);
138
139#ifdef CONFIG_HOTPLUG_CPU
140 /* If a CPU which we ran on has gone down, OK. */
141 cpus_and(cpumask, cpumask, cpu_online_map);
142 if (unlikely(cpus_empty(cpumask)))
143 return;
144#endif
145
146 /*
147 * i'm not happy about this global shared spinlock in the
148 * MM hot path, but we'll see how contended it is.
149 * AK: x86-64 has a faster method that could be ported.
150 */
151 spin_lock(&tlbstate_lock);
152
153 flush_mm = mm;
154 flush_va = va;
155 cpus_or(flush_cpumask, cpumask, flush_cpumask);
156
157 /*
158 * Make the above memory operations globally visible before
159 * sending the IPI.
160 */
161 smp_mb();
162 /*
163 * We have to send the IPI only to
164 * CPUs affected.
165 */
166 send_IPI_mask(&cpumask, INVALIDATE_TLB_VECTOR);
167
168 while (!cpus_empty(flush_cpumask))
169 /* nothing. lockup detection does not belong here */
170 cpu_relax();
171
172 flush_mm = NULL;
173 flush_va = 0;
174 spin_unlock(&tlbstate_lock);
175}
176
177void flush_tlb_current_task(void)
178{
179 struct mm_struct *mm = current->mm;
180 cpumask_t cpu_mask;
181
182 preempt_disable();
183 cpu_mask = mm->cpu_vm_mask;
184 cpu_clear(smp_processor_id(), cpu_mask);
185
186 local_flush_tlb();
187 if (!cpus_empty(cpu_mask))
188 flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
189 preempt_enable();
190}
191
192void flush_tlb_mm(struct mm_struct *mm)
193{
194 cpumask_t cpu_mask;
195
196 preempt_disable();
197 cpu_mask = mm->cpu_vm_mask;
198 cpu_clear(smp_processor_id(), cpu_mask);
199
200 if (current->active_mm == mm) {
201 if (current->mm)
202 local_flush_tlb();
203 else
204 leave_mm(smp_processor_id());
205 }
206 if (!cpus_empty(cpu_mask))
207 flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
208
209 preempt_enable();
210}
211
212void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
213{
214 struct mm_struct *mm = vma->vm_mm;
215 cpumask_t cpu_mask;
216
217 preempt_disable();
218 cpu_mask = mm->cpu_vm_mask;
219 cpu_clear(smp_processor_id(), cpu_mask);
220
221 if (current->active_mm == mm) {
222 if (current->mm)
223 __flush_tlb_one(va);
224 else
225 leave_mm(smp_processor_id());
226 }
227
228 if (!cpus_empty(cpu_mask))
229 flush_tlb_others(cpu_mask, mm, va);
230
231 preempt_enable();
232}
233EXPORT_SYMBOL(flush_tlb_page);
234
235static void do_flush_tlb_all(void *info)
236{
237 unsigned long cpu = smp_processor_id();
238
239 __flush_tlb_all();
240 if (x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_LAZY)
241 leave_mm(cpu);
242}
243
244void flush_tlb_all(void)
245{
246 on_each_cpu(do_flush_tlb_all, NULL, 1);
247}
248
249void reset_lazy_tlbstate(void)
250{
251 int cpu = raw_smp_processor_id();
252
253 per_cpu(cpu_tlbstate, cpu).state = 0;
254 per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm;
255}
256
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c
index 6812b829ed83..f396e61bcb34 100644
--- a/arch/x86/kernel/tlb_uv.c
+++ b/arch/x86/kernel/tlb_uv.c
@@ -11,6 +11,7 @@
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12 12
13#include <asm/mmu_context.h> 13#include <asm/mmu_context.h>
14#include <asm/uv/uv.h>
14#include <asm/uv/uv_mmrs.h> 15#include <asm/uv/uv_mmrs.h>
15#include <asm/uv/uv_hub.h> 16#include <asm/uv/uv_hub.h>
16#include <asm/uv/uv_bau.h> 17#include <asm/uv/uv_bau.h>
@@ -19,7 +20,7 @@
19#include <asm/tsc.h> 20#include <asm/tsc.h>
20#include <asm/irq_vectors.h> 21#include <asm/irq_vectors.h>
21 22
22#include <mach_apic.h> 23#include <asm/genapic.h>
23 24
24static struct bau_control **uv_bau_table_bases __read_mostly; 25static struct bau_control **uv_bau_table_bases __read_mostly;
25static int uv_bau_retry_limit __read_mostly; 26static int uv_bau_retry_limit __read_mostly;
@@ -210,14 +211,15 @@ static int uv_wait_completion(struct bau_desc *bau_desc,
210 * 211 *
211 * Send a broadcast and wait for a broadcast message to complete. 212 * Send a broadcast and wait for a broadcast message to complete.
212 * 213 *
213 * The cpumaskp mask contains the cpus the broadcast was sent to. 214 * The flush_mask contains the cpus the broadcast was sent to.
214 * 215 *
215 * Returns 1 if all remote flushing was done. The mask is zeroed. 216 * Returns NULL if all remote flushing was done. The mask is zeroed.
216 * Returns 0 if some remote flushing remains to be done. The mask is left 217 * Returns @flush_mask if some remote flushing remains to be done. The
217 * unchanged. 218 * mask will have some bits still set.
218 */ 219 */
219int uv_flush_send_and_wait(int cpu, int this_blade, struct bau_desc *bau_desc, 220const struct cpumask *uv_flush_send_and_wait(int cpu, int this_blade,
220 cpumask_t *cpumaskp) 221 struct bau_desc *bau_desc,
222 struct cpumask *flush_mask)
221{ 223{
222 int completion_status = 0; 224 int completion_status = 0;
223 int right_shift; 225 int right_shift;
@@ -257,66 +259,76 @@ int uv_flush_send_and_wait(int cpu, int this_blade, struct bau_desc *bau_desc,
257 * the cpu's, all of which are still in the mask. 259 * the cpu's, all of which are still in the mask.
258 */ 260 */
259 __get_cpu_var(ptcstats).ptc_i++; 261 __get_cpu_var(ptcstats).ptc_i++;
260 return 0; 262 return flush_mask;
261 } 263 }
262 264
263 /* 265 /*
264 * Success, so clear the remote cpu's from the mask so we don't 266 * Success, so clear the remote cpu's from the mask so we don't
265 * use the IPI method of shootdown on them. 267 * use the IPI method of shootdown on them.
266 */ 268 */
267 for_each_cpu_mask(bit, *cpumaskp) { 269 for_each_cpu(bit, flush_mask) {
268 blade = uv_cpu_to_blade_id(bit); 270 blade = uv_cpu_to_blade_id(bit);
269 if (blade == this_blade) 271 if (blade == this_blade)
270 continue; 272 continue;
271 cpu_clear(bit, *cpumaskp); 273 cpumask_clear_cpu(bit, flush_mask);
272 } 274 }
273 if (!cpus_empty(*cpumaskp)) 275 if (!cpumask_empty(flush_mask))
274 return 0; 276 return flush_mask;
275 return 1; 277 return NULL;
276} 278}
277 279
278/** 280/**
279 * uv_flush_tlb_others - globally purge translation cache of a virtual 281 * uv_flush_tlb_others - globally purge translation cache of a virtual
280 * address or all TLB's 282 * address or all TLB's
281 * @cpumaskp: mask of all cpu's in which the address is to be removed 283 * @cpumask: mask of all cpu's in which the address is to be removed
282 * @mm: mm_struct containing virtual address range 284 * @mm: mm_struct containing virtual address range
283 * @va: virtual address to be removed (or TLB_FLUSH_ALL for all TLB's on cpu) 285 * @va: virtual address to be removed (or TLB_FLUSH_ALL for all TLB's on cpu)
286 * @cpu: the current cpu
284 * 287 *
285 * This is the entry point for initiating any UV global TLB shootdown. 288 * This is the entry point for initiating any UV global TLB shootdown.
286 * 289 *
287 * Purges the translation caches of all specified processors of the given 290 * Purges the translation caches of all specified processors of the given
288 * virtual address, or purges all TLB's on specified processors. 291 * virtual address, or purges all TLB's on specified processors.
289 * 292 *
290 * The caller has derived the cpumaskp from the mm_struct and has subtracted 293 * The caller has derived the cpumask from the mm_struct. This function
291 * the local cpu from the mask. This function is called only if there 294 * is called only if there are bits set in the mask. (e.g. flush_tlb_page())
292 * are bits set in the mask. (e.g. flush_tlb_page())
293 * 295 *
294 * The cpumaskp is converted into a nodemask of the nodes containing 296 * The cpumask is converted into a nodemask of the nodes containing
295 * the cpus. 297 * the cpus.
296 * 298 *
297 * Returns 1 if all remote flushing was done. 299 * Note that this function should be called with preemption disabled.
298 * Returns 0 if some remote flushing remains to be done. 300 *
301 * Returns NULL if all remote flushing was done.
302 * Returns pointer to cpumask if some remote flushing remains to be
303 * done. The returned pointer is valid till preemption is re-enabled.
299 */ 304 */
300int uv_flush_tlb_others(cpumask_t *cpumaskp, struct mm_struct *mm, 305const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
301 unsigned long va) 306 struct mm_struct *mm,
307 unsigned long va, unsigned int cpu)
302{ 308{
309 static DEFINE_PER_CPU(cpumask_t, flush_tlb_mask);
310 struct cpumask *flush_mask = &__get_cpu_var(flush_tlb_mask);
303 int i; 311 int i;
304 int bit; 312 int bit;
305 int blade; 313 int blade;
306 int cpu; 314 int uv_cpu;
307 int this_blade; 315 int this_blade;
308 int locals = 0; 316 int locals = 0;
309 struct bau_desc *bau_desc; 317 struct bau_desc *bau_desc;
310 318
311 cpu = uv_blade_processor_id(); 319 WARN_ON(!in_atomic());
320
321 cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu));
322
323 uv_cpu = uv_blade_processor_id();
312 this_blade = uv_numa_blade_id(); 324 this_blade = uv_numa_blade_id();
313 bau_desc = __get_cpu_var(bau_control).descriptor_base; 325 bau_desc = __get_cpu_var(bau_control).descriptor_base;
314 bau_desc += UV_ITEMS_PER_DESCRIPTOR * cpu; 326 bau_desc += UV_ITEMS_PER_DESCRIPTOR * uv_cpu;
315 327
316 bau_nodes_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE); 328 bau_nodes_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
317 329
318 i = 0; 330 i = 0;
319 for_each_cpu_mask(bit, *cpumaskp) { 331 for_each_cpu(bit, flush_mask) {
320 blade = uv_cpu_to_blade_id(bit); 332 blade = uv_cpu_to_blade_id(bit);
321 BUG_ON(blade > (UV_DISTRIBUTION_SIZE - 1)); 333 BUG_ON(blade > (UV_DISTRIBUTION_SIZE - 1));
322 if (blade == this_blade) { 334 if (blade == this_blade) {
@@ -331,17 +343,17 @@ int uv_flush_tlb_others(cpumask_t *cpumaskp, struct mm_struct *mm,
331 * no off_node flushing; return status for local node 343 * no off_node flushing; return status for local node
332 */ 344 */
333 if (locals) 345 if (locals)
334 return 0; 346 return flush_mask;
335 else 347 else
336 return 1; 348 return NULL;
337 } 349 }
338 __get_cpu_var(ptcstats).requestor++; 350 __get_cpu_var(ptcstats).requestor++;
339 __get_cpu_var(ptcstats).ntargeted += i; 351 __get_cpu_var(ptcstats).ntargeted += i;
340 352
341 bau_desc->payload.address = va; 353 bau_desc->payload.address = va;
342 bau_desc->payload.sending_cpu = smp_processor_id(); 354 bau_desc->payload.sending_cpu = cpu;
343 355
344 return uv_flush_send_and_wait(cpu, this_blade, bau_desc, cpumaskp); 356 return uv_flush_send_and_wait(uv_cpu, this_blade, bau_desc, flush_mask);
345} 357}
346 358
347/* 359/*
diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
index 894293c598db..95a012a4664e 100644
--- a/arch/x86/kernel/trampoline_64.S
+++ b/arch/x86/kernel/trampoline_64.S
@@ -29,6 +29,7 @@
29#include <asm/page.h> 29#include <asm/page.h>
30#include <asm/msr.h> 30#include <asm/msr.h>
31#include <asm/segment.h> 31#include <asm/segment.h>
32#include <asm/processor-flags.h>
32 33
33.section .rodata, "a", @progbits 34.section .rodata, "a", @progbits
34 35
@@ -37,7 +38,7 @@
37ENTRY(trampoline_data) 38ENTRY(trampoline_data)
38r_base = . 39r_base = .
39 cli # We should be safe anyway 40 cli # We should be safe anyway
40 wbinvd 41 wbinvd
41 mov %cs, %ax # Code and data in the same place 42 mov %cs, %ax # Code and data in the same place
42 mov %ax, %ds 43 mov %ax, %ds
43 mov %ax, %es 44 mov %ax, %es
@@ -73,9 +74,8 @@ r_base = .
73 lidtl tidt - r_base # load idt with 0, 0 74 lidtl tidt - r_base # load idt with 0, 0
74 lgdtl tgdt - r_base # load gdt with whatever is appropriate 75 lgdtl tgdt - r_base # load gdt with whatever is appropriate
75 76
76 xor %ax, %ax 77 mov $X86_CR0_PE, %ax # protected mode (PE) bit
77 inc %ax # protected mode (PE) bit 78 lmsw %ax # into protected mode
78 lmsw %ax # into protected mode
79 79
80 # flush prefetch and jump to startup_32 80 # flush prefetch and jump to startup_32
81 ljmpl *(startup_32_vector - r_base) 81 ljmpl *(startup_32_vector - r_base)
@@ -86,9 +86,8 @@ startup_32:
86 movl $__KERNEL_DS, %eax # Initialize the %ds segment register 86 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
87 movl %eax, %ds 87 movl %eax, %ds
88 88
89 xorl %eax, %eax 89 movl $X86_CR4_PAE, %eax
90 btsl $5, %eax # Enable PAE mode 90 movl %eax, %cr4 # Enable PAE mode
91 movl %eax, %cr4
92 91
93 # Setup trampoline 4 level pagetables 92 # Setup trampoline 4 level pagetables
94 leal (trampoline_level4_pgt - r_base)(%esi), %eax 93 leal (trampoline_level4_pgt - r_base)(%esi), %eax
@@ -99,9 +98,9 @@ startup_32:
99 xorl %edx, %edx 98 xorl %edx, %edx
100 wrmsr 99 wrmsr
101 100
102 xorl %eax, %eax 101 # Enable paging and in turn activate Long Mode
103 btsl $31, %eax # Enable paging and in turn activate Long Mode 102 # Enable protected mode
104 btsl $0, %eax # Enable protected mode 103 movl $(X86_CR0_PG | X86_CR0_PE), %eax
105 movl %eax, %cr0 104 movl %eax, %cr0
106 105
107 /* 106 /*
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 7932338d7cb3..bde57f0f1616 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -54,12 +54,11 @@
54#include <asm/desc.h> 54#include <asm/desc.h>
55#include <asm/i387.h> 55#include <asm/i387.h>
56 56
57#include <mach_traps.h> 57#include <asm/mach_traps.h>
58 58
59#ifdef CONFIG_X86_64 59#ifdef CONFIG_X86_64
60#include <asm/pgalloc.h> 60#include <asm/pgalloc.h>
61#include <asm/proto.h> 61#include <asm/proto.h>
62#include <asm/pda.h>
63#else 62#else
64#include <asm/processor-flags.h> 63#include <asm/processor-flags.h>
65#include <asm/arch_hooks.h> 64#include <asm/arch_hooks.h>
@@ -906,19 +905,20 @@ void math_emulate(struct math_emu_info *info)
906} 905}
907#endif /* CONFIG_MATH_EMULATION */ 906#endif /* CONFIG_MATH_EMULATION */
908 907
909dotraplinkage void __kprobes do_device_not_available(struct pt_regs regs) 908dotraplinkage void __kprobes
909do_device_not_available(struct pt_regs *regs, long error_code)
910{ 910{
911#ifdef CONFIG_X86_32 911#ifdef CONFIG_X86_32
912 if (read_cr0() & X86_CR0_EM) { 912 if (read_cr0() & X86_CR0_EM) {
913 struct math_emu_info info = { }; 913 struct math_emu_info info = { };
914 914
915 conditional_sti(&regs); 915 conditional_sti(regs);
916 916
917 info.regs = &regs; 917 info.regs = regs;
918 math_emulate(&info); 918 math_emulate(&info);
919 } else { 919 } else {
920 math_state_restore(); /* interrupts still off */ 920 math_state_restore(); /* interrupts still off */
921 conditional_sti(&regs); 921 conditional_sti(regs);
922 } 922 }
923#else 923#else
924 math_state_restore(); 924 math_state_restore();
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 599e58168631..83d53ce5d4c4 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -773,7 +773,7 @@ __cpuinit int unsynchronized_tsc(void)
773 if (!cpu_has_tsc || tsc_unstable) 773 if (!cpu_has_tsc || tsc_unstable)
774 return 1; 774 return 1;
775 775
776#ifdef CONFIG_X86_SMP 776#ifdef CONFIG_SMP
777 if (apic_is_clustered_box()) 777 if (apic_is_clustered_box())
778 return 1; 778 return 1;
779#endif 779#endif
diff --git a/arch/x86/kernel/visws_quirks.c b/arch/x86/kernel/visws_quirks.c
index d801d06af068..4fd646e6dd43 100644
--- a/arch/x86/kernel/visws_quirks.c
+++ b/arch/x86/kernel/visws_quirks.c
@@ -32,9 +32,9 @@
32#include <asm/e820.h> 32#include <asm/e820.h>
33#include <asm/io.h> 33#include <asm/io.h>
34 34
35#include <mach_ipi.h> 35#include <asm/genapic.h>
36 36
37#include "mach_apic.h" 37#include <asm/genapic.h>
38 38
39#include <linux/kernel_stat.h> 39#include <linux/kernel_stat.h>
40 40
@@ -200,7 +200,7 @@ static void __init MP_processor_info(struct mpc_cpu *m)
200 return; 200 return;
201 } 201 }
202 202
203 apic_cpus = apicid_to_cpu_present(m->apicid); 203 apic_cpus = apic->apicid_to_cpu_present(m->apicid);
204 physids_or(phys_cpu_present_map, phys_cpu_present_map, apic_cpus); 204 physids_or(phys_cpu_present_map, phys_cpu_present_map, apic_cpus);
205 /* 205 /*
206 * Validate version 206 * Validate version
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index 4eeb5cf9720d..d7ac84e7fc1c 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -158,7 +158,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
158 ret = KVM86->regs32; 158 ret = KVM86->regs32;
159 159
160 ret->fs = current->thread.saved_fs; 160 ret->fs = current->thread.saved_fs;
161 loadsegment(gs, current->thread.saved_gs); 161 set_user_gs(ret, current->thread.saved_gs);
162 162
163 return ret; 163 return ret;
164} 164}
@@ -197,9 +197,9 @@ out:
197static int do_vm86_irq_handling(int subfunction, int irqnumber); 197static int do_vm86_irq_handling(int subfunction, int irqnumber);
198static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk); 198static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk);
199 199
200asmlinkage int sys_vm86old(struct pt_regs regs) 200int sys_vm86old(struct pt_regs *regs)
201{ 201{
202 struct vm86_struct __user *v86 = (struct vm86_struct __user *)regs.bx; 202 struct vm86_struct __user *v86 = (struct vm86_struct __user *)regs->bx;
203 struct kernel_vm86_struct info; /* declare this _on top_, 203 struct kernel_vm86_struct info; /* declare this _on top_,
204 * this avoids wasting of stack space. 204 * this avoids wasting of stack space.
205 * This remains on the stack until we 205 * This remains on the stack until we
@@ -218,7 +218,7 @@ asmlinkage int sys_vm86old(struct pt_regs regs)
218 if (tmp) 218 if (tmp)
219 goto out; 219 goto out;
220 memset(&info.vm86plus, 0, (int)&info.regs32 - (int)&info.vm86plus); 220 memset(&info.vm86plus, 0, (int)&info.regs32 - (int)&info.vm86plus);
221 info.regs32 = &regs; 221 info.regs32 = regs;
222 tsk->thread.vm86_info = v86; 222 tsk->thread.vm86_info = v86;
223 do_sys_vm86(&info, tsk); 223 do_sys_vm86(&info, tsk);
224 ret = 0; /* we never return here */ 224 ret = 0; /* we never return here */
@@ -227,7 +227,7 @@ out:
227} 227}
228 228
229 229
230asmlinkage int sys_vm86(struct pt_regs regs) 230int sys_vm86(struct pt_regs *regs)
231{ 231{
232 struct kernel_vm86_struct info; /* declare this _on top_, 232 struct kernel_vm86_struct info; /* declare this _on top_,
233 * this avoids wasting of stack space. 233 * this avoids wasting of stack space.
@@ -239,12 +239,12 @@ asmlinkage int sys_vm86(struct pt_regs regs)
239 struct vm86plus_struct __user *v86; 239 struct vm86plus_struct __user *v86;
240 240
241 tsk = current; 241 tsk = current;
242 switch (regs.bx) { 242 switch (regs->bx) {
243 case VM86_REQUEST_IRQ: 243 case VM86_REQUEST_IRQ:
244 case VM86_FREE_IRQ: 244 case VM86_FREE_IRQ:
245 case VM86_GET_IRQ_BITS: 245 case VM86_GET_IRQ_BITS:
246 case VM86_GET_AND_RESET_IRQ: 246 case VM86_GET_AND_RESET_IRQ:
247 ret = do_vm86_irq_handling(regs.bx, (int)regs.cx); 247 ret = do_vm86_irq_handling(regs->bx, (int)regs->cx);
248 goto out; 248 goto out;
249 case VM86_PLUS_INSTALL_CHECK: 249 case VM86_PLUS_INSTALL_CHECK:
250 /* 250 /*
@@ -261,14 +261,14 @@ asmlinkage int sys_vm86(struct pt_regs regs)
261 ret = -EPERM; 261 ret = -EPERM;
262 if (tsk->thread.saved_sp0) 262 if (tsk->thread.saved_sp0)
263 goto out; 263 goto out;
264 v86 = (struct vm86plus_struct __user *)regs.cx; 264 v86 = (struct vm86plus_struct __user *)regs->cx;
265 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs, 265 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
266 offsetof(struct kernel_vm86_struct, regs32) - 266 offsetof(struct kernel_vm86_struct, regs32) -
267 sizeof(info.regs)); 267 sizeof(info.regs));
268 ret = -EFAULT; 268 ret = -EFAULT;
269 if (tmp) 269 if (tmp)
270 goto out; 270 goto out;
271 info.regs32 = &regs; 271 info.regs32 = regs;
272 info.vm86plus.is_vm86pus = 1; 272 info.vm86plus.is_vm86pus = 1;
273 tsk->thread.vm86_info = (struct vm86_struct __user *)v86; 273 tsk->thread.vm86_info = (struct vm86_struct __user *)v86;
274 do_sys_vm86(&info, tsk); 274 do_sys_vm86(&info, tsk);
@@ -323,7 +323,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
323 info->regs32->ax = 0; 323 info->regs32->ax = 0;
324 tsk->thread.saved_sp0 = tsk->thread.sp0; 324 tsk->thread.saved_sp0 = tsk->thread.sp0;
325 tsk->thread.saved_fs = info->regs32->fs; 325 tsk->thread.saved_fs = info->regs32->fs;
326 savesegment(gs, tsk->thread.saved_gs); 326 tsk->thread.saved_gs = get_user_gs(info->regs32);
327 327
328 tss = &per_cpu(init_tss, get_cpu()); 328 tss = &per_cpu(init_tss, get_cpu());
329 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0; 329 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
index bef58b4982db..f052c84ecbe4 100644
--- a/arch/x86/kernel/vmi_32.c
+++ b/arch/x86/kernel/vmi_32.c
@@ -680,10 +680,11 @@ static inline int __init activate_vmi(void)
680 para_fill(pv_mmu_ops.write_cr2, SetCR2); 680 para_fill(pv_mmu_ops.write_cr2, SetCR2);
681 para_fill(pv_mmu_ops.write_cr3, SetCR3); 681 para_fill(pv_mmu_ops.write_cr3, SetCR3);
682 para_fill(pv_cpu_ops.write_cr4, SetCR4); 682 para_fill(pv_cpu_ops.write_cr4, SetCR4);
683 para_fill(pv_irq_ops.save_fl, GetInterruptMask); 683
684 para_fill(pv_irq_ops.restore_fl, SetInterruptMask); 684 para_fill(pv_irq_ops.save_fl.func, GetInterruptMask);
685 para_fill(pv_irq_ops.irq_disable, DisableInterrupts); 685 para_fill(pv_irq_ops.restore_fl.func, SetInterruptMask);
686 para_fill(pv_irq_ops.irq_enable, EnableInterrupts); 686 para_fill(pv_irq_ops.irq_disable.func, DisableInterrupts);
687 para_fill(pv_irq_ops.irq_enable.func, EnableInterrupts);
687 688
688 para_fill(pv_cpu_ops.wbinvd, WBINVD); 689 para_fill(pv_cpu_ops.wbinvd, WBINVD);
689 para_fill(pv_cpu_ops.read_tsc, RDTSC); 690 para_fill(pv_cpu_ops.read_tsc, RDTSC);
diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c
index c4c1f9e09402..a4791ef412d1 100644
--- a/arch/x86/kernel/vmiclock_32.c
+++ b/arch/x86/kernel/vmiclock_32.c
@@ -256,7 +256,7 @@ void __devinit vmi_time_bsp_init(void)
256 */ 256 */
257 clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL); 257 clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
258 local_irq_disable(); 258 local_irq_disable();
259#ifdef CONFIG_X86_SMP 259#ifdef CONFIG_SMP
260 /* 260 /*
261 * XXX handle_percpu_irq only defined for SMP; we need to switch over 261 * XXX handle_percpu_irq only defined for SMP; we need to switch over
262 * to using it, since this is a local interrupt, which each CPU must 262 * to using it, since this is a local interrupt, which each CPU must
diff --git a/arch/x86/kernel/vmlinux_32.lds.S b/arch/x86/kernel/vmlinux_32.lds.S
index 82c67559dde7..3eba7f7bac05 100644
--- a/arch/x86/kernel/vmlinux_32.lds.S
+++ b/arch/x86/kernel/vmlinux_32.lds.S
@@ -178,14 +178,7 @@ SECTIONS
178 __initramfs_end = .; 178 __initramfs_end = .;
179 } 179 }
180#endif 180#endif
181 . = ALIGN(PAGE_SIZE); 181 PERCPU(PAGE_SIZE)
182 .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) {
183 __per_cpu_start = .;
184 *(.data.percpu.page_aligned)
185 *(.data.percpu)
186 *(.data.percpu.shared_aligned)
187 __per_cpu_end = .;
188 }
189 . = ALIGN(PAGE_SIZE); 182 . = ALIGN(PAGE_SIZE);
190 /* freed after init ends here */ 183 /* freed after init ends here */
191 184
diff --git a/arch/x86/kernel/vmlinux_64.lds.S b/arch/x86/kernel/vmlinux_64.lds.S
index 1a614c0e6bef..087a7f2c639b 100644
--- a/arch/x86/kernel/vmlinux_64.lds.S
+++ b/arch/x86/kernel/vmlinux_64.lds.S
@@ -5,6 +5,7 @@
5#define LOAD_OFFSET __START_KERNEL_map 5#define LOAD_OFFSET __START_KERNEL_map
6 6
7#include <asm-generic/vmlinux.lds.h> 7#include <asm-generic/vmlinux.lds.h>
8#include <asm/asm-offsets.h>
8#include <asm/page.h> 9#include <asm/page.h>
9 10
10#undef i386 /* in case the preprocessor is a 32bit one */ 11#undef i386 /* in case the preprocessor is a 32bit one */
@@ -13,12 +14,15 @@ OUTPUT_FORMAT("elf64-x86-64", "elf64-x86-64", "elf64-x86-64")
13OUTPUT_ARCH(i386:x86-64) 14OUTPUT_ARCH(i386:x86-64)
14ENTRY(phys_startup_64) 15ENTRY(phys_startup_64)
15jiffies_64 = jiffies; 16jiffies_64 = jiffies;
16_proxy_pda = 1;
17PHDRS { 17PHDRS {
18 text PT_LOAD FLAGS(5); /* R_E */ 18 text PT_LOAD FLAGS(5); /* R_E */
19 data PT_LOAD FLAGS(7); /* RWE */ 19 data PT_LOAD FLAGS(7); /* RWE */
20 user PT_LOAD FLAGS(7); /* RWE */ 20 user PT_LOAD FLAGS(7); /* RWE */
21 data.init PT_LOAD FLAGS(7); /* RWE */ 21 data.init PT_LOAD FLAGS(7); /* RWE */
22#ifdef CONFIG_SMP
23 percpu PT_LOAD FLAGS(7); /* RWE */
24#endif
25 data.init2 PT_LOAD FLAGS(7); /* RWE */
22 note PT_NOTE FLAGS(0); /* ___ */ 26 note PT_NOTE FLAGS(0); /* ___ */
23} 27}
24SECTIONS 28SECTIONS
@@ -208,14 +212,28 @@ SECTIONS
208 __initramfs_end = .; 212 __initramfs_end = .;
209#endif 213#endif
210 214
215#ifdef CONFIG_SMP
216 /*
217 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
218 * output PHDR, so the next output section - __data_nosave - should
219 * start another section data.init2. Also, pda should be at the head of
220 * percpu area. Preallocate it and define the percpu offset symbol
221 * so that it can be accessed as a percpu variable.
222 */
223 . = ALIGN(PAGE_SIZE);
224 PERCPU_VADDR(0, :percpu)
225#else
211 PERCPU(PAGE_SIZE) 226 PERCPU(PAGE_SIZE)
227#endif
212 228
213 . = ALIGN(PAGE_SIZE); 229 . = ALIGN(PAGE_SIZE);
214 __init_end = .; 230 __init_end = .;
215 231
216 . = ALIGN(PAGE_SIZE); 232 . = ALIGN(PAGE_SIZE);
217 __nosave_begin = .; 233 __nosave_begin = .;
218 .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { *(.data.nosave) } 234 .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
235 *(.data.nosave)
236 } :data.init2 /* use another section data.init2, see PERCPU_VADDR() above */
219 . = ALIGN(PAGE_SIZE); 237 . = ALIGN(PAGE_SIZE);
220 __nosave_end = .; 238 __nosave_end = .;
221 239
@@ -239,8 +257,21 @@ SECTIONS
239 DWARF_DEBUG 257 DWARF_DEBUG
240} 258}
241 259
260 /*
261 * Per-cpu symbols which need to be offset from __per_cpu_load
262 * for the boot processor.
263 */
264#define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
265INIT_PER_CPU(gdt_page);
266INIT_PER_CPU(irq_stack_union);
267
242/* 268/*
243 * Build-time check on the image size: 269 * Build-time check on the image size:
244 */ 270 */
245ASSERT((_end - _text <= KERNEL_IMAGE_SIZE), 271ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
246 "kernel image bigger than KERNEL_IMAGE_SIZE") 272 "kernel image bigger than KERNEL_IMAGE_SIZE")
273
274#ifdef CONFIG_SMP
275ASSERT((per_cpu__irq_stack_union == 0),
276 "irq_stack_union is not at start of per-cpu area");
277#endif
diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c
index a688f3bfaec2..c609205df594 100644
--- a/arch/x86/kernel/vsmp_64.c
+++ b/arch/x86/kernel/vsmp_64.c
@@ -37,6 +37,7 @@ static unsigned long vsmp_save_fl(void)
37 flags &= ~X86_EFLAGS_IF; 37 flags &= ~X86_EFLAGS_IF;
38 return flags; 38 return flags;
39} 39}
40PV_CALLEE_SAVE_REGS_THUNK(vsmp_save_fl);
40 41
41static void vsmp_restore_fl(unsigned long flags) 42static void vsmp_restore_fl(unsigned long flags)
42{ 43{
@@ -46,6 +47,7 @@ static void vsmp_restore_fl(unsigned long flags)
46 flags |= X86_EFLAGS_AC; 47 flags |= X86_EFLAGS_AC;
47 native_restore_fl(flags); 48 native_restore_fl(flags);
48} 49}
50PV_CALLEE_SAVE_REGS_THUNK(vsmp_restore_fl);
49 51
50static void vsmp_irq_disable(void) 52static void vsmp_irq_disable(void)
51{ 53{
@@ -53,6 +55,7 @@ static void vsmp_irq_disable(void)
53 55
54 native_restore_fl((flags & ~X86_EFLAGS_IF) | X86_EFLAGS_AC); 56 native_restore_fl((flags & ~X86_EFLAGS_IF) | X86_EFLAGS_AC);
55} 57}
58PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_disable);
56 59
57static void vsmp_irq_enable(void) 60static void vsmp_irq_enable(void)
58{ 61{
@@ -60,6 +63,7 @@ static void vsmp_irq_enable(void)
60 63
61 native_restore_fl((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC)); 64 native_restore_fl((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC));
62} 65}
66PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_enable);
63 67
64static unsigned __init_or_module vsmp_patch(u8 type, u16 clobbers, void *ibuf, 68static unsigned __init_or_module vsmp_patch(u8 type, u16 clobbers, void *ibuf,
65 unsigned long addr, unsigned len) 69 unsigned long addr, unsigned len)
@@ -90,10 +94,10 @@ static void __init set_vsmp_pv_ops(void)
90 cap, ctl); 94 cap, ctl);
91 if (cap & ctl & (1 << 4)) { 95 if (cap & ctl & (1 << 4)) {
92 /* Setup irq ops and turn on vSMP IRQ fastpath handling */ 96 /* Setup irq ops and turn on vSMP IRQ fastpath handling */
93 pv_irq_ops.irq_disable = vsmp_irq_disable; 97 pv_irq_ops.irq_disable = PV_CALLEE_SAVE(vsmp_irq_disable);
94 pv_irq_ops.irq_enable = vsmp_irq_enable; 98 pv_irq_ops.irq_enable = PV_CALLEE_SAVE(vsmp_irq_enable);
95 pv_irq_ops.save_fl = vsmp_save_fl; 99 pv_irq_ops.save_fl = PV_CALLEE_SAVE(vsmp_save_fl);
96 pv_irq_ops.restore_fl = vsmp_restore_fl; 100 pv_irq_ops.restore_fl = PV_CALLEE_SAVE(vsmp_restore_fl);
97 pv_init_ops.patch = vsmp_patch; 101 pv_init_ops.patch = vsmp_patch;
98 102
99 ctl &= ~(1 << 4); 103 ctl &= ~(1 << 4);
diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
index 695e426aa354..3909e3ba5ce3 100644
--- a/arch/x86/kernel/x8664_ksyms_64.c
+++ b/arch/x86/kernel/x8664_ksyms_64.c
@@ -58,5 +58,3 @@ EXPORT_SYMBOL(__memcpy);
58EXPORT_SYMBOL(empty_zero_page); 58EXPORT_SYMBOL(empty_zero_page);
59EXPORT_SYMBOL(init_level4_pgt); 59EXPORT_SYMBOL(init_level4_pgt);
60EXPORT_SYMBOL(load_gs_index); 60EXPORT_SYMBOL(load_gs_index);
61
62EXPORT_SYMBOL(_proxy_pda);
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index 92f1c6f3e19d..da2e314f61b5 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -173,24 +173,29 @@ static unsigned long save_fl(void)
173{ 173{
174 return lguest_data.irq_enabled; 174 return lguest_data.irq_enabled;
175} 175}
176PV_CALLEE_SAVE_REGS_THUNK(save_fl);
176 177
177/* restore_flags() just sets the flags back to the value given. */ 178/* restore_flags() just sets the flags back to the value given. */
178static void restore_fl(unsigned long flags) 179static void restore_fl(unsigned long flags)
179{ 180{
180 lguest_data.irq_enabled = flags; 181 lguest_data.irq_enabled = flags;
181} 182}
183PV_CALLEE_SAVE_REGS_THUNK(restore_fl);
182 184
183/* Interrupts go off... */ 185/* Interrupts go off... */
184static void irq_disable(void) 186static void irq_disable(void)
185{ 187{
186 lguest_data.irq_enabled = 0; 188 lguest_data.irq_enabled = 0;
187} 189}
190PV_CALLEE_SAVE_REGS_THUNK(irq_disable);
188 191
189/* Interrupts go on... */ 192/* Interrupts go on... */
190static void irq_enable(void) 193static void irq_enable(void)
191{ 194{
192 lguest_data.irq_enabled = X86_EFLAGS_IF; 195 lguest_data.irq_enabled = X86_EFLAGS_IF;
193} 196}
197PV_CALLEE_SAVE_REGS_THUNK(irq_enable);
198
194/*:*/ 199/*:*/
195/*M:003 Note that we don't check for outstanding interrupts when we re-enable 200/*M:003 Note that we don't check for outstanding interrupts when we re-enable
196 * them (or when we unmask an interrupt). This seems to work for the moment, 201 * them (or when we unmask an interrupt). This seems to work for the moment,
@@ -278,7 +283,7 @@ static void lguest_load_tls(struct thread_struct *t, unsigned int cpu)
278 /* There's one problem which normal hardware doesn't have: the Host 283 /* There's one problem which normal hardware doesn't have: the Host
279 * can't handle us removing entries we're currently using. So we clear 284 * can't handle us removing entries we're currently using. So we clear
280 * the GS register here: if it's needed it'll be reloaded anyway. */ 285 * the GS register here: if it's needed it'll be reloaded anyway. */
281 loadsegment(gs, 0); 286 lazy_load_gs(0);
282 lazy_hcall(LHCALL_LOAD_TLS, __pa(&t->tls_array), cpu, 0); 287 lazy_hcall(LHCALL_LOAD_TLS, __pa(&t->tls_array), cpu, 0);
283} 288}
284 289
@@ -984,10 +989,10 @@ __init void lguest_init(void)
984 989
985 /* interrupt-related operations */ 990 /* interrupt-related operations */
986 pv_irq_ops.init_IRQ = lguest_init_IRQ; 991 pv_irq_ops.init_IRQ = lguest_init_IRQ;
987 pv_irq_ops.save_fl = save_fl; 992 pv_irq_ops.save_fl = PV_CALLEE_SAVE(save_fl);
988 pv_irq_ops.restore_fl = restore_fl; 993 pv_irq_ops.restore_fl = PV_CALLEE_SAVE(restore_fl);
989 pv_irq_ops.irq_disable = irq_disable; 994 pv_irq_ops.irq_disable = PV_CALLEE_SAVE(irq_disable);
990 pv_irq_ops.irq_enable = irq_enable; 995 pv_irq_ops.irq_enable = PV_CALLEE_SAVE(irq_enable);
991 pv_irq_ops.safe_halt = lguest_safe_halt; 996 pv_irq_ops.safe_halt = lguest_safe_halt;
992 997
993 /* init-time operations */ 998 /* init-time operations */
diff --git a/arch/x86/mach-default/Makefile b/arch/x86/mach-default/Makefile
deleted file mode 100644
index 012fe34459e6..000000000000
--- a/arch/x86/mach-default/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
1#
2# Makefile for the linux kernel.
3#
4
5obj-y := setup.o
diff --git a/arch/x86/mach-default/setup.c b/arch/x86/mach-default/setup.c
deleted file mode 100644
index a265a7c63190..000000000000
--- a/arch/x86/mach-default/setup.c
+++ /dev/null
@@ -1,174 +0,0 @@
1/*
2 * Machine specific setup for generic
3 */
4
5#include <linux/smp.h>
6#include <linux/init.h>
7#include <linux/interrupt.h>
8#include <asm/acpi.h>
9#include <asm/arch_hooks.h>
10#include <asm/e820.h>
11#include <asm/setup.h>
12
13#include <mach_ipi.h>
14
15#ifdef CONFIG_HOTPLUG_CPU
16#define DEFAULT_SEND_IPI (1)
17#else
18#define DEFAULT_SEND_IPI (0)
19#endif
20
21int no_broadcast = DEFAULT_SEND_IPI;
22
23/**
24 * pre_intr_init_hook - initialisation prior to setting up interrupt vectors
25 *
26 * Description:
27 * Perform any necessary interrupt initialisation prior to setting up
28 * the "ordinary" interrupt call gates. For legacy reasons, the ISA
29 * interrupts should be initialised here if the machine emulates a PC
30 * in any way.
31 **/
32void __init pre_intr_init_hook(void)
33{
34 if (x86_quirks->arch_pre_intr_init) {
35 if (x86_quirks->arch_pre_intr_init())
36 return;
37 }
38 init_ISA_irqs();
39}
40
41/*
42 * IRQ2 is cascade interrupt to second interrupt controller
43 */
44static struct irqaction irq2 = {
45 .handler = no_action,
46 .mask = CPU_MASK_NONE,
47 .name = "cascade",
48};
49
50/**
51 * intr_init_hook - post gate setup interrupt initialisation
52 *
53 * Description:
54 * Fill in any interrupts that may have been left out by the general
55 * init_IRQ() routine. interrupts having to do with the machine rather
56 * than the devices on the I/O bus (like APIC interrupts in intel MP
57 * systems) are started here.
58 **/
59void __init intr_init_hook(void)
60{
61 if (x86_quirks->arch_intr_init) {
62 if (x86_quirks->arch_intr_init())
63 return;
64 }
65 if (!acpi_ioapic)
66 setup_irq(2, &irq2);
67
68}
69
70/**
71 * pre_setup_arch_hook - hook called prior to any setup_arch() execution
72 *
73 * Description:
74 * generally used to activate any machine specific identification
75 * routines that may be needed before setup_arch() runs. On Voyager
76 * this is used to get the board revision and type.
77 **/
78void __init pre_setup_arch_hook(void)
79{
80}
81
82/**
83 * trap_init_hook - initialise system specific traps
84 *
85 * Description:
86 * Called as the final act of trap_init(). Used in VISWS to initialise
87 * the various board specific APIC traps.
88 **/
89void __init trap_init_hook(void)
90{
91 if (x86_quirks->arch_trap_init) {
92 if (x86_quirks->arch_trap_init())
93 return;
94 }
95}
96
97static struct irqaction irq0 = {
98 .handler = timer_interrupt,
99 .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_IRQPOLL,
100 .mask = CPU_MASK_NONE,
101 .name = "timer"
102};
103
104/**
105 * pre_time_init_hook - do any specific initialisations before.
106 *
107 **/
108void __init pre_time_init_hook(void)
109{
110 if (x86_quirks->arch_pre_time_init)
111 x86_quirks->arch_pre_time_init();
112}
113
114/**
115 * time_init_hook - do any specific initialisations for the system timer.
116 *
117 * Description:
118 * Must plug the system timer interrupt source at HZ into the IRQ listed
119 * in irq_vectors.h:TIMER_IRQ
120 **/
121void __init time_init_hook(void)
122{
123 if (x86_quirks->arch_time_init) {
124 /*
125 * A nonzero return code does not mean failure, it means
126 * that the architecture quirk does not want any
127 * generic (timer) setup to be performed after this:
128 */
129 if (x86_quirks->arch_time_init())
130 return;
131 }
132
133 irq0.mask = cpumask_of_cpu(0);
134 setup_irq(0, &irq0);
135}
136
137#ifdef CONFIG_MCA
138/**
139 * mca_nmi_hook - hook into MCA specific NMI chain
140 *
141 * Description:
142 * The MCA (Microchannel Architecture) has an NMI chain for NMI sources
143 * along the MCA bus. Use this to hook into that chain if you will need
144 * it.
145 **/
146void mca_nmi_hook(void)
147{
148 /*
149 * If I recall correctly, there's a whole bunch of other things that
150 * we can do to check for NMI problems, but that's all I know about
151 * at the moment.
152 */
153 pr_warning("NMI generated from unknown source!\n");
154}
155#endif
156
157static __init int no_ipi_broadcast(char *str)
158{
159 get_option(&str, &no_broadcast);
160 pr_info("Using %s mode\n",
161 no_broadcast ? "No IPI Broadcast" : "IPI Broadcast");
162 return 1;
163}
164__setup("no_ipi_broadcast=", no_ipi_broadcast);
165
166static int __init print_ipi_mode(void)
167{
168 pr_info("Using IPI %s mode\n",
169 no_broadcast ? "No-Shortcut" : "Shortcut");
170 return 0;
171}
172
173late_initcall(print_ipi_mode);
174
diff --git a/arch/x86/mach-generic/Makefile b/arch/x86/mach-generic/Makefile
deleted file mode 100644
index 6730f4e7c744..000000000000
--- a/arch/x86/mach-generic/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
1#
2# Makefile for the generic architecture
3#
4
5EXTRA_CFLAGS := -Iarch/x86/kernel
6
7obj-y := probe.o default.o
8obj-$(CONFIG_X86_NUMAQ) += numaq.o
9obj-$(CONFIG_X86_SUMMIT) += summit.o
10obj-$(CONFIG_X86_BIGSMP) += bigsmp.o
11obj-$(CONFIG_X86_ES7000) += es7000.o
diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c
deleted file mode 100644
index bc4c7840b2a8..000000000000
--- a/arch/x86/mach-generic/bigsmp.c
+++ /dev/null
@@ -1,60 +0,0 @@
1/*
2 * APIC driver for "bigsmp" XAPIC machines with more than 8 virtual CPUs.
3 * Drives the local APIC in "clustered mode".
4 */
5#define APIC_DEFINITION 1
6#include <linux/threads.h>
7#include <linux/cpumask.h>
8#include <asm/mpspec.h>
9#include <asm/genapic.h>
10#include <asm/fixmap.h>
11#include <asm/apicdef.h>
12#include <linux/kernel.h>
13#include <linux/init.h>
14#include <linux/dmi.h>
15#include <asm/bigsmp/apicdef.h>
16#include <linux/smp.h>
17#include <asm/bigsmp/apic.h>
18#include <asm/bigsmp/ipi.h>
19#include <asm/mach-default/mach_mpparse.h>
20#include <asm/mach-default/mach_wakecpu.h>
21
22static int dmi_bigsmp; /* can be set by dmi scanners */
23
24static int hp_ht_bigsmp(const struct dmi_system_id *d)
25{
26 printk(KERN_NOTICE "%s detected: force use of apic=bigsmp\n", d->ident);
27 dmi_bigsmp = 1;
28 return 0;
29}
30
31
32static const struct dmi_system_id bigsmp_dmi_table[] = {
33 { hp_ht_bigsmp, "HP ProLiant DL760 G2",
34 { DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
35 DMI_MATCH(DMI_BIOS_VERSION, "P44-"),}
36 },
37
38 { hp_ht_bigsmp, "HP ProLiant DL740",
39 { DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
40 DMI_MATCH(DMI_BIOS_VERSION, "P47-"),}
41 },
42 { }
43};
44
45static void vector_allocation_domain(int cpu, cpumask_t *retmask)
46{
47 cpus_clear(*retmask);
48 cpu_set(cpu, *retmask);
49}
50
51static int probe_bigsmp(void)
52{
53 if (def_to_bigsmp)
54 dmi_bigsmp = 1;
55 else
56 dmi_check_system(bigsmp_dmi_table);
57 return dmi_bigsmp;
58}
59
60struct genapic apic_bigsmp = APIC_INIT("bigsmp", probe_bigsmp);
diff --git a/arch/x86/mach-generic/default.c b/arch/x86/mach-generic/default.c
deleted file mode 100644
index e63a4a76d8cd..000000000000
--- a/arch/x86/mach-generic/default.c
+++ /dev/null
@@ -1,27 +0,0 @@
1/*
2 * Default generic APIC driver. This handles up to 8 CPUs.
3 */
4#define APIC_DEFINITION 1
5#include <linux/threads.h>
6#include <linux/cpumask.h>
7#include <asm/mpspec.h>
8#include <asm/mach-default/mach_apicdef.h>
9#include <asm/genapic.h>
10#include <asm/fixmap.h>
11#include <asm/apicdef.h>
12#include <linux/kernel.h>
13#include <linux/string.h>
14#include <linux/smp.h>
15#include <linux/init.h>
16#include <asm/mach-default/mach_apic.h>
17#include <asm/mach-default/mach_ipi.h>
18#include <asm/mach-default/mach_mpparse.h>
19#include <asm/mach-default/mach_wakecpu.h>
20
21/* should be called last. */
22static int probe_default(void)
23{
24 return 1;
25}
26
27struct genapic apic_default = APIC_INIT("default", probe_default);
diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c
deleted file mode 100644
index c2ded1448024..000000000000
--- a/arch/x86/mach-generic/es7000.c
+++ /dev/null
@@ -1,103 +0,0 @@
1/*
2 * APIC driver for the Unisys ES7000 chipset.
3 */
4#define APIC_DEFINITION 1
5#include <linux/threads.h>
6#include <linux/cpumask.h>
7#include <asm/mpspec.h>
8#include <asm/genapic.h>
9#include <asm/fixmap.h>
10#include <asm/apicdef.h>
11#include <linux/kernel.h>
12#include <linux/string.h>
13#include <linux/init.h>
14#include <asm/es7000/apicdef.h>
15#include <linux/smp.h>
16#include <asm/es7000/apic.h>
17#include <asm/es7000/ipi.h>
18#include <asm/es7000/mpparse.h>
19#include <asm/mach-default/mach_wakecpu.h>
20
21void __init es7000_update_genapic_to_cluster(void)
22{
23 genapic->target_cpus = target_cpus_cluster;
24 genapic->int_delivery_mode = INT_DELIVERY_MODE_CLUSTER;
25 genapic->int_dest_mode = INT_DEST_MODE_CLUSTER;
26 genapic->no_balance_irq = NO_BALANCE_IRQ_CLUSTER;
27
28 genapic->init_apic_ldr = init_apic_ldr_cluster;
29
30 genapic->cpu_mask_to_apicid = cpu_mask_to_apicid_cluster;
31}
32
33static int probe_es7000(void)
34{
35 /* probed later in mptable/ACPI hooks */
36 return 0;
37}
38
39extern void es7000_sw_apic(void);
40static void __init enable_apic_mode(void)
41{
42 es7000_sw_apic();
43 return;
44}
45
46static __init int
47mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
48{
49 if (mpc->oemptr) {
50 struct mpc_oemtable *oem_table =
51 (struct mpc_oemtable *)mpc->oemptr;
52 if (!strncmp(oem, "UNISYS", 6))
53 return parse_unisys_oem((char *)oem_table);
54 }
55 return 0;
56}
57
58#ifdef CONFIG_ACPI
59/* Hook from generic ACPI tables.c */
60static int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
61{
62 unsigned long oem_addr = 0;
63 int check_dsdt;
64 int ret = 0;
65
66 /* check dsdt at first to avoid clear fix_map for oem_addr */
67 check_dsdt = es7000_check_dsdt();
68
69 if (!find_unisys_acpi_oem_table(&oem_addr)) {
70 if (check_dsdt)
71 ret = parse_unisys_oem((char *)oem_addr);
72 else {
73 setup_unisys();
74 ret = 1;
75 }
76 /*
77 * we need to unmap it
78 */
79 unmap_unisys_acpi_oem_table(oem_addr);
80 }
81 return ret;
82}
83#else
84static int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
85{
86 return 0;
87}
88#endif
89
90static void vector_allocation_domain(int cpu, cpumask_t *retmask)
91{
92 /* Careful. Some cpus do not strictly honor the set of cpus
93 * specified in the interrupt destination when using lowest
94 * priority interrupt delivery mode.
95 *
96 * In particular there was a hyperthreading cpu observed to
97 * deliver interrupts to the wrong hyperthread when only one
98 * hyperthread was specified in the interrupt desitination.
99 */
100 *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
101}
102
103struct genapic __initdata_refok apic_es7000 = APIC_INIT("es7000", probe_es7000);
diff --git a/arch/x86/mach-generic/numaq.c b/arch/x86/mach-generic/numaq.c
deleted file mode 100644
index 3679e2255645..000000000000
--- a/arch/x86/mach-generic/numaq.c
+++ /dev/null
@@ -1,53 +0,0 @@
1/*
2 * APIC driver for the IBM NUMAQ chipset.
3 */
4#define APIC_DEFINITION 1
5#include <linux/threads.h>
6#include <linux/cpumask.h>
7#include <asm/mpspec.h>
8#include <asm/genapic.h>
9#include <asm/fixmap.h>
10#include <asm/apicdef.h>
11#include <linux/kernel.h>
12#include <linux/string.h>
13#include <linux/init.h>
14#include <asm/numaq/apicdef.h>
15#include <linux/smp.h>
16#include <asm/numaq/apic.h>
17#include <asm/numaq/ipi.h>
18#include <asm/numaq/mpparse.h>
19#include <asm/numaq/wakecpu.h>
20#include <asm/numaq.h>
21
22static int mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
23{
24 numaq_mps_oem_check(mpc, oem, productid);
25 return found_numaq;
26}
27
28static int probe_numaq(void)
29{
30 /* already know from get_memcfg_numaq() */
31 return found_numaq;
32}
33
34/* Hook from generic ACPI tables.c */
35static int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
36{
37 return 0;
38}
39
40static void vector_allocation_domain(int cpu, cpumask_t *retmask)
41{
42 /* Careful. Some cpus do not strictly honor the set of cpus
43 * specified in the interrupt destination when using lowest
44 * priority interrupt delivery mode.
45 *
46 * In particular there was a hyperthreading cpu observed to
47 * deliver interrupts to the wrong hyperthread when only one
48 * hyperthread was specified in the interrupt desitination.
49 */
50 *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
51}
52
53struct genapic apic_numaq = APIC_INIT("NUMAQ", probe_numaq);
diff --git a/arch/x86/mach-generic/probe.c b/arch/x86/mach-generic/probe.c
deleted file mode 100644
index 15a38daef1a8..000000000000
--- a/arch/x86/mach-generic/probe.c
+++ /dev/null
@@ -1,152 +0,0 @@
1/*
2 * Copyright 2003 Andi Kleen, SuSE Labs.
3 * Subject to the GNU Public License, v.2
4 *
5 * Generic x86 APIC driver probe layer.
6 */
7#include <linux/threads.h>
8#include <linux/cpumask.h>
9#include <linux/string.h>
10#include <linux/kernel.h>
11#include <linux/ctype.h>
12#include <linux/init.h>
13#include <linux/errno.h>
14#include <asm/fixmap.h>
15#include <asm/mpspec.h>
16#include <asm/apicdef.h>
17#include <asm/genapic.h>
18#include <asm/setup.h>
19
20extern struct genapic apic_numaq;
21extern struct genapic apic_summit;
22extern struct genapic apic_bigsmp;
23extern struct genapic apic_es7000;
24extern struct genapic apic_default;
25
26struct genapic *genapic = &apic_default;
27
28static struct genapic *apic_probe[] __initdata = {
29#ifdef CONFIG_X86_NUMAQ
30 &apic_numaq,
31#endif
32#ifdef CONFIG_X86_SUMMIT
33 &apic_summit,
34#endif
35#ifdef CONFIG_X86_BIGSMP
36 &apic_bigsmp,
37#endif
38#ifdef CONFIG_X86_ES7000
39 &apic_es7000,
40#endif
41 &apic_default, /* must be last */
42 NULL,
43};
44
45static int cmdline_apic __initdata;
46static int __init parse_apic(char *arg)
47{
48 int i;
49
50 if (!arg)
51 return -EINVAL;
52
53 for (i = 0; apic_probe[i]; i++) {
54 if (!strcmp(apic_probe[i]->name, arg)) {
55 genapic = apic_probe[i];
56 cmdline_apic = 1;
57 return 0;
58 }
59 }
60
61 if (x86_quirks->update_genapic)
62 x86_quirks->update_genapic();
63
64 /* Parsed again by __setup for debug/verbose */
65 return 0;
66}
67early_param("apic", parse_apic);
68
69void __init generic_bigsmp_probe(void)
70{
71#ifdef CONFIG_X86_BIGSMP
72 /*
73 * This routine is used to switch to bigsmp mode when
74 * - There is no apic= option specified by the user
75 * - generic_apic_probe() has chosen apic_default as the sub_arch
76 * - we find more than 8 CPUs in acpi LAPIC listing with xAPIC support
77 */
78
79 if (!cmdline_apic && genapic == &apic_default) {
80 if (apic_bigsmp.probe()) {
81 genapic = &apic_bigsmp;
82 if (x86_quirks->update_genapic)
83 x86_quirks->update_genapic();
84 printk(KERN_INFO "Overriding APIC driver with %s\n",
85 genapic->name);
86 }
87 }
88#endif
89}
90
91void __init generic_apic_probe(void)
92{
93 if (!cmdline_apic) {
94 int i;
95 for (i = 0; apic_probe[i]; i++) {
96 if (apic_probe[i]->probe()) {
97 genapic = apic_probe[i];
98 break;
99 }
100 }
101 /* Not visible without early console */
102 if (!apic_probe[i])
103 panic("Didn't find an APIC driver");
104
105 if (x86_quirks->update_genapic)
106 x86_quirks->update_genapic();
107 }
108 printk(KERN_INFO "Using APIC driver %s\n", genapic->name);
109}
110
111/* These functions can switch the APIC even after the initial ->probe() */
112
113int __init mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
114{
115 int i;
116 for (i = 0; apic_probe[i]; ++i) {
117 if (apic_probe[i]->mps_oem_check(mpc, oem, productid)) {
118 if (!cmdline_apic) {
119 genapic = apic_probe[i];
120 if (x86_quirks->update_genapic)
121 x86_quirks->update_genapic();
122 printk(KERN_INFO "Switched to APIC driver `%s'.\n",
123 genapic->name);
124 }
125 return 1;
126 }
127 }
128 return 0;
129}
130
131int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
132{
133 int i;
134 for (i = 0; apic_probe[i]; ++i) {
135 if (apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id)) {
136 if (!cmdline_apic) {
137 genapic = apic_probe[i];
138 if (x86_quirks->update_genapic)
139 x86_quirks->update_genapic();
140 printk(KERN_INFO "Switched to APIC driver `%s'.\n",
141 genapic->name);
142 }
143 return 1;
144 }
145 }
146 return 0;
147}
148
149int hard_smp_processor_id(void)
150{
151 return genapic->get_apic_id(*(unsigned long *)(APIC_BASE+APIC_ID));
152}
diff --git a/arch/x86/mach-generic/summit.c b/arch/x86/mach-generic/summit.c
deleted file mode 100644
index 2821ffc188b5..000000000000
--- a/arch/x86/mach-generic/summit.c
+++ /dev/null
@@ -1,40 +0,0 @@
1/*
2 * APIC driver for the IBM "Summit" chipset.
3 */
4#define APIC_DEFINITION 1
5#include <linux/threads.h>
6#include <linux/cpumask.h>
7#include <asm/mpspec.h>
8#include <asm/genapic.h>
9#include <asm/fixmap.h>
10#include <asm/apicdef.h>
11#include <linux/kernel.h>
12#include <linux/string.h>
13#include <linux/init.h>
14#include <asm/summit/apicdef.h>
15#include <linux/smp.h>
16#include <asm/summit/apic.h>
17#include <asm/summit/ipi.h>
18#include <asm/summit/mpparse.h>
19#include <asm/mach-default/mach_wakecpu.h>
20
21static int probe_summit(void)
22{
23 /* probed later in mptable/ACPI hooks */
24 return 0;
25}
26
27static void vector_allocation_domain(int cpu, cpumask_t *retmask)
28{
29 /* Careful. Some cpus do not strictly honor the set of cpus
30 * specified in the interrupt destination when using lowest
31 * priority interrupt delivery mode.
32 *
33 * In particular there was a hyperthreading cpu observed to
34 * deliver interrupts to the wrong hyperthread when only one
35 * hyperthread was specified in the interrupt desitination.
36 */
37 *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
38}
39
40struct genapic apic_summit = APIC_INIT("summit", probe_summit);
diff --git a/arch/x86/mach-rdc321x/Makefile b/arch/x86/mach-rdc321x/Makefile
deleted file mode 100644
index 8325b4ca431c..000000000000
--- a/arch/x86/mach-rdc321x/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
1#
2# Makefile for the RDC321x specific parts of the kernel
3#
4obj-$(CONFIG_X86_RDC321X) := gpio.o platform.o
5
diff --git a/arch/x86/mach-rdc321x/gpio.c b/arch/x86/mach-rdc321x/gpio.c
deleted file mode 100644
index 247f33d3a407..000000000000
--- a/arch/x86/mach-rdc321x/gpio.c
+++ /dev/null
@@ -1,194 +0,0 @@
1/*
2 * GPIO support for RDC SoC R3210/R8610
3 *
4 * Copyright (C) 2007, Florian Fainelli <florian@openwrt.org>
5 * Copyright (C) 2008, Volker Weiss <dev@tintuc.de>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 *
21 */
22
23
24#include <linux/spinlock.h>
25#include <linux/io.h>
26#include <linux/types.h>
27#include <linux/module.h>
28
29#include <asm/gpio.h>
30#include <asm/mach-rdc321x/rdc321x_defs.h>
31
32
33/* spin lock to protect our private copy of GPIO data register plus
34 the access to PCI conf registers. */
35static DEFINE_SPINLOCK(gpio_lock);
36
37/* copy of GPIO data registers */
38static u32 gpio_data_reg1;
39static u32 gpio_data_reg2;
40
41static u32 gpio_request_data[2];
42
43
44static inline void rdc321x_conf_write(unsigned addr, u32 value)
45{
46 outl((1 << 31) | (7 << 11) | addr, RDC3210_CFGREG_ADDR);
47 outl(value, RDC3210_CFGREG_DATA);
48}
49
50static inline void rdc321x_conf_or(unsigned addr, u32 value)
51{
52 outl((1 << 31) | (7 << 11) | addr, RDC3210_CFGREG_ADDR);
53 value |= inl(RDC3210_CFGREG_DATA);
54 outl(value, RDC3210_CFGREG_DATA);
55}
56
57static inline u32 rdc321x_conf_read(unsigned addr)
58{
59 outl((1 << 31) | (7 << 11) | addr, RDC3210_CFGREG_ADDR);
60
61 return inl(RDC3210_CFGREG_DATA);
62}
63
64/* configure pin as GPIO */
65static void rdc321x_configure_gpio(unsigned gpio)
66{
67 unsigned long flags;
68
69 spin_lock_irqsave(&gpio_lock, flags);
70 rdc321x_conf_or(gpio < 32
71 ? RDC321X_GPIO_CTRL_REG1 : RDC321X_GPIO_CTRL_REG2,
72 1 << (gpio & 0x1f));
73 spin_unlock_irqrestore(&gpio_lock, flags);
74}
75
76/* initially setup the 2 copies of the gpio data registers.
77 This function must be called by the platform setup code. */
78void __init rdc321x_gpio_setup()
79{
80 /* this might not be, what others (BIOS, bootloader, etc.)
81 wrote to these registers before, but it's a good guess. Still
82 better than just using 0xffffffff. */
83
84 gpio_data_reg1 = rdc321x_conf_read(RDC321X_GPIO_DATA_REG1);
85 gpio_data_reg2 = rdc321x_conf_read(RDC321X_GPIO_DATA_REG2);
86}
87
88/* determine, if gpio number is valid */
89static inline int rdc321x_is_gpio(unsigned gpio)
90{
91 return gpio <= RDC321X_MAX_GPIO;
92}
93
94/* request GPIO */
95int rdc_gpio_request(unsigned gpio, const char *label)
96{
97 unsigned long flags;
98
99 if (!rdc321x_is_gpio(gpio))
100 return -EINVAL;
101
102 spin_lock_irqsave(&gpio_lock, flags);
103 if (gpio_request_data[(gpio & 0x20) ? 1 : 0] & (1 << (gpio & 0x1f)))
104 goto inuse;
105 gpio_request_data[(gpio & 0x20) ? 1 : 0] |= (1 << (gpio & 0x1f));
106 spin_unlock_irqrestore(&gpio_lock, flags);
107
108 return 0;
109inuse:
110 spin_unlock_irqrestore(&gpio_lock, flags);
111 return -EINVAL;
112}
113EXPORT_SYMBOL(rdc_gpio_request);
114
115/* release previously-claimed GPIO */
116void rdc_gpio_free(unsigned gpio)
117{
118 unsigned long flags;
119
120 if (!rdc321x_is_gpio(gpio))
121 return;
122
123 spin_lock_irqsave(&gpio_lock, flags);
124 gpio_request_data[(gpio & 0x20) ? 1 : 0] &= ~(1 << (gpio & 0x1f));
125 spin_unlock_irqrestore(&gpio_lock, flags);
126}
127EXPORT_SYMBOL(rdc_gpio_free);
128
129/* read GPIO pin */
130int rdc_gpio_get_value(unsigned gpio)
131{
132 u32 reg;
133 unsigned long flags;
134
135 spin_lock_irqsave(&gpio_lock, flags);
136 reg = rdc321x_conf_read(gpio < 32
137 ? RDC321X_GPIO_DATA_REG1 : RDC321X_GPIO_DATA_REG2);
138 spin_unlock_irqrestore(&gpio_lock, flags);
139
140 return (1 << (gpio & 0x1f)) & reg ? 1 : 0;
141}
142EXPORT_SYMBOL(rdc_gpio_get_value);
143
144/* set GPIO pin to value */
145void rdc_gpio_set_value(unsigned gpio, int value)
146{
147 unsigned long flags;
148 u32 reg;
149
150 reg = 1 << (gpio & 0x1f);
151 if (gpio < 32) {
152 spin_lock_irqsave(&gpio_lock, flags);
153 if (value)
154 gpio_data_reg1 |= reg;
155 else
156 gpio_data_reg1 &= ~reg;
157 rdc321x_conf_write(RDC321X_GPIO_DATA_REG1, gpio_data_reg1);
158 spin_unlock_irqrestore(&gpio_lock, flags);
159 } else {
160 spin_lock_irqsave(&gpio_lock, flags);
161 if (value)
162 gpio_data_reg2 |= reg;
163 else
164 gpio_data_reg2 &= ~reg;
165 rdc321x_conf_write(RDC321X_GPIO_DATA_REG2, gpio_data_reg2);
166 spin_unlock_irqrestore(&gpio_lock, flags);
167 }
168}
169EXPORT_SYMBOL(rdc_gpio_set_value);
170
171/* configure GPIO pin as input */
172int rdc_gpio_direction_input(unsigned gpio)
173{
174 if (!rdc321x_is_gpio(gpio))
175 return -EINVAL;
176
177 rdc321x_configure_gpio(gpio);
178
179 return 0;
180}
181EXPORT_SYMBOL(rdc_gpio_direction_input);
182
183/* configure GPIO pin as output and set value */
184int rdc_gpio_direction_output(unsigned gpio, int value)
185{
186 if (!rdc321x_is_gpio(gpio))
187 return -EINVAL;
188
189 gpio_set_value(gpio, value);
190 rdc321x_configure_gpio(gpio);
191
192 return 0;
193}
194EXPORT_SYMBOL(rdc_gpio_direction_output);
diff --git a/arch/x86/mach-rdc321x/platform.c b/arch/x86/mach-rdc321x/platform.c
deleted file mode 100644
index 4f4e50c3ad3b..000000000000
--- a/arch/x86/mach-rdc321x/platform.c
+++ /dev/null
@@ -1,69 +0,0 @@
1/*
2 * Generic RDC321x platform devices
3 *
4 * Copyright (C) 2007 Florian Fainelli <florian@openwrt.org>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the
18 * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
19 * Boston, MA 02110-1301, USA.
20 *
21 */
22
23#include <linux/init.h>
24#include <linux/kernel.h>
25#include <linux/list.h>
26#include <linux/device.h>
27#include <linux/platform_device.h>
28#include <linux/leds.h>
29
30#include <asm/gpio.h>
31
32/* LEDS */
33static struct gpio_led default_leds[] = {
34 { .name = "rdc:dmz", .gpio = 1, },
35};
36
37static struct gpio_led_platform_data rdc321x_led_data = {
38 .num_leds = ARRAY_SIZE(default_leds),
39 .leds = default_leds,
40};
41
42static struct platform_device rdc321x_leds = {
43 .name = "leds-gpio",
44 .id = -1,
45 .dev = {
46 .platform_data = &rdc321x_led_data,
47 }
48};
49
50/* Watchdog */
51static struct platform_device rdc321x_wdt = {
52 .name = "rdc321x-wdt",
53 .id = -1,
54 .num_resources = 0,
55};
56
57static struct platform_device *rdc321x_devs[] = {
58 &rdc321x_leds,
59 &rdc321x_wdt
60};
61
62static int __init rdc_board_setup(void)
63{
64 rdc321x_gpio_setup();
65
66 return platform_add_devices(rdc321x_devs, ARRAY_SIZE(rdc321x_devs));
67}
68
69arch_initcall(rdc_board_setup);
diff --git a/arch/x86/mach-voyager/setup.c b/arch/x86/mach-voyager/setup.c
index d914a7996a66..66b7eb57d8e4 100644
--- a/arch/x86/mach-voyager/setup.c
+++ b/arch/x86/mach-voyager/setup.c
@@ -9,6 +9,7 @@
9#include <asm/e820.h> 9#include <asm/e820.h>
10#include <asm/io.h> 10#include <asm/io.h>
11#include <asm/setup.h> 11#include <asm/setup.h>
12#include <asm/cpu.h>
12 13
13void __init pre_intr_init_hook(void) 14void __init pre_intr_init_hook(void)
14{ 15{
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c
index 7ffcdeec4631..6f5a38c7f900 100644
--- a/arch/x86/mach-voyager/voyager_smp.c
+++ b/arch/x86/mach-voyager/voyager_smp.c
@@ -400,7 +400,7 @@ void __init find_smp_config(void)
400 VOYAGER_SUS_IN_CONTROL_PORT); 400 VOYAGER_SUS_IN_CONTROL_PORT);
401 401
402 current_thread_info()->cpu = boot_cpu_id; 402 current_thread_info()->cpu = boot_cpu_id;
403 x86_write_percpu(cpu_number, boot_cpu_id); 403 percpu_write(cpu_number, boot_cpu_id);
404} 404}
405 405
406/* 406/*
@@ -528,7 +528,6 @@ static void __init do_boot_cpu(__u8 cpu)
528 /* init_tasks (in sched.c) is indexed logically */ 528 /* init_tasks (in sched.c) is indexed logically */
529 stack_start.sp = (void *)idle->thread.sp; 529 stack_start.sp = (void *)idle->thread.sp;
530 530
531 init_gdt(cpu);
532 per_cpu(current_task, cpu) = idle; 531 per_cpu(current_task, cpu) = idle;
533 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); 532 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
534 irq_ctx_init(cpu); 533 irq_ctx_init(cpu);
@@ -1745,14 +1744,13 @@ static void __init voyager_smp_prepare_cpus(unsigned int max_cpus)
1745 1744
1746static void __cpuinit voyager_smp_prepare_boot_cpu(void) 1745static void __cpuinit voyager_smp_prepare_boot_cpu(void)
1747{ 1746{
1748 init_gdt(smp_processor_id()); 1747 int cpu = smp_processor_id();
1749 switch_to_new_gdt(); 1748 switch_to_new_gdt(cpu);
1750 1749
1751 cpu_online_map = cpumask_of_cpu(smp_processor_id()); 1750 cpu_online_map = cpumask_of_cpu(smp_processor_id());
1752 cpu_callout_map = cpumask_of_cpu(smp_processor_id()); 1751 cpu_callout_map = cpumask_of_cpu(smp_processor_id());
1753 cpu_callin_map = CPU_MASK_NONE; 1752 cpu_callin_map = CPU_MASK_NONE;
1754 cpu_present_map = cpumask_of_cpu(smp_processor_id()); 1753 cpu_present_map = cpumask_of_cpu(smp_processor_id());
1755
1756} 1754}
1757 1755
1758static int __cpuinit voyager_cpu_up(unsigned int cpu) 1756static int __cpuinit voyager_cpu_up(unsigned int cpu)
@@ -1779,7 +1777,6 @@ static void __init voyager_smp_cpus_done(unsigned int max_cpus)
1779void __init smp_setup_processor_id(void) 1777void __init smp_setup_processor_id(void)
1780{ 1778{
1781 current_thread_info()->cpu = hard_smp_processor_id(); 1779 current_thread_info()->cpu = hard_smp_processor_id();
1782 x86_write_percpu(cpu_number, hard_smp_processor_id());
1783} 1780}
1784 1781
1785static void voyager_send_call_func(const struct cpumask *callmask) 1782static void voyager_send_call_func(const struct cpumask *callmask)
diff --git a/arch/x86/math-emu/get_address.c b/arch/x86/math-emu/get_address.c
index 420b3b6e3915..6ef5e99380f9 100644
--- a/arch/x86/math-emu/get_address.c
+++ b/arch/x86/math-emu/get_address.c
@@ -150,11 +150,9 @@ static long pm_address(u_char FPU_modrm, u_char segment,
150#endif /* PARANOID */ 150#endif /* PARANOID */
151 151
152 switch (segment) { 152 switch (segment) {
153 /* gs isn't used by the kernel, so it still has its
154 user-space value. */
155 case PREFIX_GS_ - 1: 153 case PREFIX_GS_ - 1:
156 /* N.B. - movl %seg, mem is a 2 byte write regardless of prefix */ 154 /* user gs handling can be lazy, use special accessors */
157 savesegment(gs, addr->selector); 155 addr->selector = get_user_gs(FPU_info->regs);
158 break; 156 break;
159 default: 157 default:
160 addr->selector = PM_REG_(segment); 158 addr->selector = PM_REG_(segment);
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index d8cc96a2738f..2b938a384910 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -1,6 +1,8 @@
1obj-y := init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \ 1obj-y := init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
2 pat.o pgtable.o gup.o 2 pat.o pgtable.o gup.o
3 3
4obj-$(CONFIG_SMP) += tlb.o
5
4obj-$(CONFIG_X86_32) += pgtable_32.o iomap_32.o 6obj-$(CONFIG_X86_32) += pgtable_32.o iomap_32.o
5 7
6obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o 8obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
index 7e8db53528a7..61b41ca3b5a2 100644
--- a/arch/x86/mm/extable.c
+++ b/arch/x86/mm/extable.c
@@ -23,6 +23,12 @@ int fixup_exception(struct pt_regs *regs)
23 23
24 fixup = search_exception_tables(regs->ip); 24 fixup = search_exception_tables(regs->ip);
25 if (fixup) { 25 if (fixup) {
26 /* If fixup is less than 16, it means uaccess error */
27 if (fixup->fixup < 16) {
28 current_thread_info()->uaccess_err = -EFAULT;
29 regs->ip += fixup->fixup;
30 return 1;
31 }
26 regs->ip = fixup->fixup; 32 regs->ip = fixup->fixup;
27 return 1; 33 return 1;
28 } 34 }
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index c76ef1d701c9..2a9ea3aee493 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -26,6 +26,7 @@
26#include <linux/kprobes.h> 26#include <linux/kprobes.h>
27#include <linux/uaccess.h> 27#include <linux/uaccess.h>
28#include <linux/kdebug.h> 28#include <linux/kdebug.h>
29#include <linux/magic.h>
29 30
30#include <asm/system.h> 31#include <asm/system.h>
31#include <asm/desc.h> 32#include <asm/desc.h>
@@ -91,8 +92,8 @@ static inline int notify_page_fault(struct pt_regs *regs)
91 * 92 *
92 * Opcode checker based on code by Richard Brunner 93 * Opcode checker based on code by Richard Brunner
93 */ 94 */
94static int is_prefetch(struct pt_regs *regs, unsigned long addr, 95static int is_prefetch(struct pt_regs *regs, unsigned long error_code,
95 unsigned long error_code) 96 unsigned long addr)
96{ 97{
97 unsigned char *instr; 98 unsigned char *instr;
98 int scan_more = 1; 99 int scan_more = 1;
@@ -409,17 +410,16 @@ static void show_fault_oops(struct pt_regs *regs, unsigned long error_code,
409} 410}
410 411
411#ifdef CONFIG_X86_64 412#ifdef CONFIG_X86_64
412static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs, 413static noinline void pgtable_bad(struct pt_regs *regs,
413 unsigned long error_code) 414 unsigned long error_code, unsigned long address)
414{ 415{
415 unsigned long flags = oops_begin(); 416 unsigned long flags = oops_begin();
416 int sig = SIGKILL; 417 int sig = SIGKILL;
417 struct task_struct *tsk; 418 struct task_struct *tsk = current;
418 419
419 printk(KERN_ALERT "%s: Corrupted page table at address %lx\n", 420 printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
420 current->comm, address); 421 tsk->comm, address);
421 dump_pagetable(address); 422 dump_pagetable(address);
422 tsk = current;
423 tsk->thread.cr2 = address; 423 tsk->thread.cr2 = address;
424 tsk->thread.trap_no = 14; 424 tsk->thread.trap_no = 14;
425 tsk->thread.error_code = error_code; 425 tsk->thread.error_code = error_code;
@@ -429,6 +429,196 @@ static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs,
429} 429}
430#endif 430#endif
431 431
432static noinline void no_context(struct pt_regs *regs,
433 unsigned long error_code, unsigned long address)
434{
435 struct task_struct *tsk = current;
436 unsigned long *stackend;
437
438#ifdef CONFIG_X86_64
439 unsigned long flags;
440 int sig;
441#endif
442
443 /* Are we prepared to handle this kernel fault? */
444 if (fixup_exception(regs))
445 return;
446
447 /*
448 * X86_32
449 * Valid to do another page fault here, because if this fault
450 * had been triggered by is_prefetch fixup_exception would have
451 * handled it.
452 *
453 * X86_64
454 * Hall of shame of CPU/BIOS bugs.
455 */
456 if (is_prefetch(regs, error_code, address))
457 return;
458
459 if (is_errata93(regs, address))
460 return;
461
462 /*
463 * Oops. The kernel tried to access some bad page. We'll have to
464 * terminate things with extreme prejudice.
465 */
466#ifdef CONFIG_X86_32
467 bust_spinlocks(1);
468#else
469 flags = oops_begin();
470#endif
471
472 show_fault_oops(regs, error_code, address);
473
474 stackend = end_of_stack(tsk);
475 if (*stackend != STACK_END_MAGIC)
476 printk(KERN_ALERT "Thread overran stack, or stack corrupted\n");
477
478 tsk->thread.cr2 = address;
479 tsk->thread.trap_no = 14;
480 tsk->thread.error_code = error_code;
481
482#ifdef CONFIG_X86_32
483 die("Oops", regs, error_code);
484 bust_spinlocks(0);
485 do_exit(SIGKILL);
486#else
487 sig = SIGKILL;
488 if (__die("Oops", regs, error_code))
489 sig = 0;
490 /* Executive summary in case the body of the oops scrolled away */
491 printk(KERN_EMERG "CR2: %016lx\n", address);
492 oops_end(flags, regs, sig);
493#endif
494}
495
496static void __bad_area_nosemaphore(struct pt_regs *regs,
497 unsigned long error_code, unsigned long address,
498 int si_code)
499{
500 struct task_struct *tsk = current;
501
502 /* User mode accesses just cause a SIGSEGV */
503 if (error_code & PF_USER) {
504 /*
505 * It's possible to have interrupts off here.
506 */
507 local_irq_enable();
508
509 /*
510 * Valid to do another page fault here because this one came
511 * from user space.
512 */
513 if (is_prefetch(regs, error_code, address))
514 return;
515
516 if (is_errata100(regs, address))
517 return;
518
519 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
520 printk_ratelimit()) {
521 printk(
522 "%s%s[%d]: segfault at %lx ip %p sp %p error %lx",
523 task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
524 tsk->comm, task_pid_nr(tsk), address,
525 (void *) regs->ip, (void *) regs->sp, error_code);
526 print_vma_addr(" in ", regs->ip);
527 printk("\n");
528 }
529
530 tsk->thread.cr2 = address;
531 /* Kernel addresses are always protection faults */
532 tsk->thread.error_code = error_code | (address >= TASK_SIZE);
533 tsk->thread.trap_no = 14;
534 force_sig_info_fault(SIGSEGV, si_code, address, tsk);
535 return;
536 }
537
538 if (is_f00f_bug(regs, address))
539 return;
540
541 no_context(regs, error_code, address);
542}
543
544static noinline void bad_area_nosemaphore(struct pt_regs *regs,
545 unsigned long error_code, unsigned long address)
546{
547 __bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR);
548}
549
550static void __bad_area(struct pt_regs *regs,
551 unsigned long error_code, unsigned long address,
552 int si_code)
553{
554 struct mm_struct *mm = current->mm;
555
556 /*
557 * Something tried to access memory that isn't in our memory map..
558 * Fix it, but check if it's kernel or user first..
559 */
560 up_read(&mm->mmap_sem);
561
562 __bad_area_nosemaphore(regs, error_code, address, si_code);
563}
564
565static noinline void bad_area(struct pt_regs *regs,
566 unsigned long error_code, unsigned long address)
567{
568 __bad_area(regs, error_code, address, SEGV_MAPERR);
569}
570
571static noinline void bad_area_access_error(struct pt_regs *regs,
572 unsigned long error_code, unsigned long address)
573{
574 __bad_area(regs, error_code, address, SEGV_ACCERR);
575}
576
577/* TODO: fixup for "mm-invoke-oom-killer-from-page-fault.patch" */
578static void out_of_memory(struct pt_regs *regs,
579 unsigned long error_code, unsigned long address)
580{
581 /*
582 * We ran out of memory, call the OOM killer, and return the userspace
583 * (which will retry the fault, or kill us if we got oom-killed).
584 */
585 up_read(&current->mm->mmap_sem);
586 pagefault_out_of_memory();
587}
588
589static void do_sigbus(struct pt_regs *regs,
590 unsigned long error_code, unsigned long address)
591{
592 struct task_struct *tsk = current;
593 struct mm_struct *mm = tsk->mm;
594
595 up_read(&mm->mmap_sem);
596
597 /* Kernel mode? Handle exceptions or die */
598 if (!(error_code & PF_USER))
599 no_context(regs, error_code, address);
600#ifdef CONFIG_X86_32
601 /* User space => ok to do another page fault */
602 if (is_prefetch(regs, error_code, address))
603 return;
604#endif
605 tsk->thread.cr2 = address;
606 tsk->thread.error_code = error_code;
607 tsk->thread.trap_no = 14;
608 force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
609}
610
611static noinline void mm_fault_error(struct pt_regs *regs,
612 unsigned long error_code, unsigned long address, unsigned int fault)
613{
614 if (fault & VM_FAULT_OOM)
615 out_of_memory(regs, error_code, address);
616 else if (fault & VM_FAULT_SIGBUS)
617 do_sigbus(regs, error_code, address);
618 else
619 BUG();
620}
621
432static int spurious_fault_check(unsigned long error_code, pte_t *pte) 622static int spurious_fault_check(unsigned long error_code, pte_t *pte)
433{ 623{
434 if ((error_code & PF_WRITE) && !pte_write(*pte)) 624 if ((error_code & PF_WRITE) && !pte_write(*pte))
@@ -448,8 +638,8 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
448 * There are no security implications to leaving a stale TLB when 638 * There are no security implications to leaving a stale TLB when
449 * increasing the permissions on a page. 639 * increasing the permissions on a page.
450 */ 640 */
451static int spurious_fault(unsigned long address, 641static noinline int spurious_fault(unsigned long error_code,
452 unsigned long error_code) 642 unsigned long address)
453{ 643{
454 pgd_t *pgd; 644 pgd_t *pgd;
455 pud_t *pud; 645 pud_t *pud;
@@ -494,7 +684,7 @@ static int spurious_fault(unsigned long address,
494 * 684 *
495 * This assumes no large pages in there. 685 * This assumes no large pages in there.
496 */ 686 */
497static int vmalloc_fault(unsigned long address) 687static noinline int vmalloc_fault(unsigned long address)
498{ 688{
499#ifdef CONFIG_X86_32 689#ifdef CONFIG_X86_32
500 unsigned long pgd_paddr; 690 unsigned long pgd_paddr;
@@ -573,6 +763,25 @@ static int vmalloc_fault(unsigned long address)
573 763
574int show_unhandled_signals = 1; 764int show_unhandled_signals = 1;
575 765
766static inline int access_error(unsigned long error_code, int write,
767 struct vm_area_struct *vma)
768{
769 if (write) {
770 /* write, present and write, not present */
771 if (unlikely(!(vma->vm_flags & VM_WRITE)))
772 return 1;
773 } else if (unlikely(error_code & PF_PROT)) {
774 /* read, present */
775 return 1;
776 } else {
777 /* read, not present */
778 if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
779 return 1;
780 }
781
782 return 0;
783}
784
576/* 785/*
577 * This routine handles page faults. It determines the address, 786 * This routine handles page faults. It determines the address,
578 * and the problem, and then passes it off to one of the appropriate 787 * and the problem, and then passes it off to one of the appropriate
@@ -583,16 +792,12 @@ asmlinkage
583#endif 792#endif
584void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) 793void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
585{ 794{
795 unsigned long address;
586 struct task_struct *tsk; 796 struct task_struct *tsk;
587 struct mm_struct *mm; 797 struct mm_struct *mm;
588 struct vm_area_struct *vma; 798 struct vm_area_struct *vma;
589 unsigned long address; 799 int write;
590 int write, si_code;
591 int fault; 800 int fault;
592#ifdef CONFIG_X86_64
593 unsigned long flags;
594 int sig;
595#endif
596 801
597 tsk = current; 802 tsk = current;
598 mm = tsk->mm; 803 mm = tsk->mm;
@@ -601,8 +806,6 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
601 /* get the address */ 806 /* get the address */
602 address = read_cr2(); 807 address = read_cr2();
603 808
604 si_code = SEGV_MAPERR;
605
606 if (unlikely(kmmio_fault(regs, address))) 809 if (unlikely(kmmio_fault(regs, address)))
607 return; 810 return;
608 811
@@ -629,7 +832,7 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
629 return; 832 return;
630 833
631 /* Can handle a stale RO->RW TLB */ 834 /* Can handle a stale RO->RW TLB */
632 if (spurious_fault(address, error_code)) 835 if (spurious_fault(error_code, address))
633 return; 836 return;
634 837
635 /* kprobes don't want to hook the spurious faults. */ 838 /* kprobes don't want to hook the spurious faults. */
@@ -639,13 +842,12 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
639 * Don't take the mm semaphore here. If we fixup a prefetch 842 * Don't take the mm semaphore here. If we fixup a prefetch
640 * fault we could otherwise deadlock. 843 * fault we could otherwise deadlock.
641 */ 844 */
642 goto bad_area_nosemaphore; 845 bad_area_nosemaphore(regs, error_code, address);
846 return;
643 } 847 }
644 848
645 /* kprobes don't want to hook the spurious faults. */ 849 if (unlikely(notify_page_fault(regs)))
646 if (notify_page_fault(regs))
647 return; 850 return;
648
649 /* 851 /*
650 * It's safe to allow irq's after cr2 has been saved and the 852 * It's safe to allow irq's after cr2 has been saved and the
651 * vmalloc fault has been handled. 853 * vmalloc fault has been handled.
@@ -661,15 +863,17 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
661 863
662#ifdef CONFIG_X86_64 864#ifdef CONFIG_X86_64
663 if (unlikely(error_code & PF_RSVD)) 865 if (unlikely(error_code & PF_RSVD))
664 pgtable_bad(address, regs, error_code); 866 pgtable_bad(regs, error_code, address);
665#endif 867#endif
666 868
667 /* 869 /*
668 * If we're in an interrupt, have no user context or are running in an 870 * If we're in an interrupt, have no user context or are running in an
669 * atomic region then we must not take the fault. 871 * atomic region then we must not take the fault.
670 */ 872 */
671 if (unlikely(in_atomic() || !mm)) 873 if (unlikely(in_atomic() || !mm)) {
672 goto bad_area_nosemaphore; 874 bad_area_nosemaphore(regs, error_code, address);
875 return;
876 }
673 877
674 /* 878 /*
675 * When running in the kernel we expect faults to occur only to 879 * When running in the kernel we expect faults to occur only to
@@ -687,20 +891,26 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
687 * source. If this is invalid we can skip the address space check, 891 * source. If this is invalid we can skip the address space check,
688 * thus avoiding the deadlock. 892 * thus avoiding the deadlock.
689 */ 893 */
690 if (!down_read_trylock(&mm->mmap_sem)) { 894 if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
691 if ((error_code & PF_USER) == 0 && 895 if ((error_code & PF_USER) == 0 &&
692 !search_exception_tables(regs->ip)) 896 !search_exception_tables(regs->ip)) {
693 goto bad_area_nosemaphore; 897 bad_area_nosemaphore(regs, error_code, address);
898 return;
899 }
694 down_read(&mm->mmap_sem); 900 down_read(&mm->mmap_sem);
695 } 901 }
696 902
697 vma = find_vma(mm, address); 903 vma = find_vma(mm, address);
698 if (!vma) 904 if (unlikely(!vma)) {
699 goto bad_area; 905 bad_area(regs, error_code, address);
700 if (vma->vm_start <= address) 906 return;
907 }
908 if (likely(vma->vm_start <= address))
701 goto good_area; 909 goto good_area;
702 if (!(vma->vm_flags & VM_GROWSDOWN)) 910 if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
703 goto bad_area; 911 bad_area(regs, error_code, address);
912 return;
913 }
704 if (error_code & PF_USER) { 914 if (error_code & PF_USER) {
705 /* 915 /*
706 * Accessing the stack below %sp is always a bug. 916 * Accessing the stack below %sp is always a bug.
@@ -708,31 +918,25 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
708 * and pusha to work. ("enter $65535,$31" pushes 918 * and pusha to work. ("enter $65535,$31" pushes
709 * 32 pointers and then decrements %sp by 65535.) 919 * 32 pointers and then decrements %sp by 65535.)
710 */ 920 */
711 if (address + 65536 + 32 * sizeof(unsigned long) < regs->sp) 921 if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
712 goto bad_area; 922 bad_area(regs, error_code, address);
923 return;
924 }
713 } 925 }
714 if (expand_stack(vma, address)) 926 if (unlikely(expand_stack(vma, address))) {
715 goto bad_area; 927 bad_area(regs, error_code, address);
716/* 928 return;
717 * Ok, we have a good vm_area for this memory access, so 929 }
718 * we can handle it.. 930
719 */ 931 /*
932 * Ok, we have a good vm_area for this memory access, so
933 * we can handle it..
934 */
720good_area: 935good_area:
721 si_code = SEGV_ACCERR; 936 write = error_code & PF_WRITE;
722 write = 0; 937 if (unlikely(access_error(error_code, write, vma))) {
723 switch (error_code & (PF_PROT|PF_WRITE)) { 938 bad_area_access_error(regs, error_code, address);
724 default: /* 3: write, present */ 939 return;
725 /* fall through */
726 case PF_WRITE: /* write, not present */
727 if (!(vma->vm_flags & VM_WRITE))
728 goto bad_area;
729 write++;
730 break;
731 case PF_PROT: /* read, present */
732 goto bad_area;
733 case 0: /* read, not present */
734 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
735 goto bad_area;
736 } 940 }
737 941
738 /* 942 /*
@@ -742,11 +946,8 @@ good_area:
742 */ 946 */
743 fault = handle_mm_fault(mm, vma, address, write); 947 fault = handle_mm_fault(mm, vma, address, write);
744 if (unlikely(fault & VM_FAULT_ERROR)) { 948 if (unlikely(fault & VM_FAULT_ERROR)) {
745 if (fault & VM_FAULT_OOM) 949 mm_fault_error(regs, error_code, address, fault);
746 goto out_of_memory; 950 return;
747 else if (fault & VM_FAULT_SIGBUS)
748 goto do_sigbus;
749 BUG();
750 } 951 }
751 if (fault & VM_FAULT_MAJOR) 952 if (fault & VM_FAULT_MAJOR)
752 tsk->maj_flt++; 953 tsk->maj_flt++;
@@ -764,128 +965,6 @@ good_area:
764 } 965 }
765#endif 966#endif
766 up_read(&mm->mmap_sem); 967 up_read(&mm->mmap_sem);
767 return;
768
769/*
770 * Something tried to access memory that isn't in our memory map..
771 * Fix it, but check if it's kernel or user first..
772 */
773bad_area:
774 up_read(&mm->mmap_sem);
775
776bad_area_nosemaphore:
777 /* User mode accesses just cause a SIGSEGV */
778 if (error_code & PF_USER) {
779 /*
780 * It's possible to have interrupts off here.
781 */
782 local_irq_enable();
783
784 /*
785 * Valid to do another page fault here because this one came
786 * from user space.
787 */
788 if (is_prefetch(regs, address, error_code))
789 return;
790
791 if (is_errata100(regs, address))
792 return;
793
794 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
795 printk_ratelimit()) {
796 printk(
797 "%s%s[%d]: segfault at %lx ip %p sp %p error %lx",
798 task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
799 tsk->comm, task_pid_nr(tsk), address,
800 (void *) regs->ip, (void *) regs->sp, error_code);
801 print_vma_addr(" in ", regs->ip);
802 printk("\n");
803 }
804
805 tsk->thread.cr2 = address;
806 /* Kernel addresses are always protection faults */
807 tsk->thread.error_code = error_code | (address >= TASK_SIZE);
808 tsk->thread.trap_no = 14;
809 force_sig_info_fault(SIGSEGV, si_code, address, tsk);
810 return;
811 }
812
813 if (is_f00f_bug(regs, address))
814 return;
815
816no_context:
817 /* Are we prepared to handle this kernel fault? */
818 if (fixup_exception(regs))
819 return;
820
821 /*
822 * X86_32
823 * Valid to do another page fault here, because if this fault
824 * had been triggered by is_prefetch fixup_exception would have
825 * handled it.
826 *
827 * X86_64
828 * Hall of shame of CPU/BIOS bugs.
829 */
830 if (is_prefetch(regs, address, error_code))
831 return;
832
833 if (is_errata93(regs, address))
834 return;
835
836/*
837 * Oops. The kernel tried to access some bad page. We'll have to
838 * terminate things with extreme prejudice.
839 */
840#ifdef CONFIG_X86_32
841 bust_spinlocks(1);
842#else
843 flags = oops_begin();
844#endif
845
846 show_fault_oops(regs, error_code, address);
847
848 tsk->thread.cr2 = address;
849 tsk->thread.trap_no = 14;
850 tsk->thread.error_code = error_code;
851
852#ifdef CONFIG_X86_32
853 die("Oops", regs, error_code);
854 bust_spinlocks(0);
855 do_exit(SIGKILL);
856#else
857 sig = SIGKILL;
858 if (__die("Oops", regs, error_code))
859 sig = 0;
860 /* Executive summary in case the body of the oops scrolled away */
861 printk(KERN_EMERG "CR2: %016lx\n", address);
862 oops_end(flags, regs, sig);
863#endif
864
865out_of_memory:
866 /*
867 * We ran out of memory, call the OOM killer, and return the userspace
868 * (which will retry the fault, or kill us if we got oom-killed).
869 */
870 up_read(&mm->mmap_sem);
871 pagefault_out_of_memory();
872 return;
873
874do_sigbus:
875 up_read(&mm->mmap_sem);
876
877 /* Kernel mode? Handle exceptions or die */
878 if (!(error_code & PF_USER))
879 goto no_context;
880#ifdef CONFIG_X86_32
881 /* User space => ok to do another page fault */
882 if (is_prefetch(regs, address, error_code))
883 return;
884#endif
885 tsk->thread.cr2 = address;
886 tsk->thread.error_code = error_code;
887 tsk->thread.trap_no = 14;
888 force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
889} 968}
890 969
891DEFINE_SPINLOCK(pgd_lock); 970DEFINE_SPINLOCK(pgd_lock);
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 2cef05074413..06708ee94aa4 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -49,7 +49,6 @@
49#include <asm/paravirt.h> 49#include <asm/paravirt.h>
50#include <asm/setup.h> 50#include <asm/setup.h>
51#include <asm/cacheflush.h> 51#include <asm/cacheflush.h>
52#include <asm/smp.h>
53 52
54unsigned int __VMALLOC_RESERVE = 128 << 20; 53unsigned int __VMALLOC_RESERVE = 128 << 20;
55 54
@@ -675,75 +674,97 @@ static int __init parse_highmem(char *arg)
675} 674}
676early_param("highmem", parse_highmem); 675early_param("highmem", parse_highmem);
677 676
677#define MSG_HIGHMEM_TOO_BIG \
678 "highmem size (%luMB) is bigger than pages available (%luMB)!\n"
679
680#define MSG_LOWMEM_TOO_SMALL \
681 "highmem size (%luMB) results in <64MB lowmem, ignoring it!\n"
678/* 682/*
679 * Determine low and high memory ranges: 683 * All of RAM fits into lowmem - but if user wants highmem
684 * artificially via the highmem=x boot parameter then create
685 * it:
680 */ 686 */
681void __init find_low_pfn_range(void) 687void __init lowmem_pfn_init(void)
682{ 688{
683 /* it could update max_pfn */
684
685 /* max_low_pfn is 0, we already have early_res support */ 689 /* max_low_pfn is 0, we already have early_res support */
686
687 max_low_pfn = max_pfn; 690 max_low_pfn = max_pfn;
688 if (max_low_pfn > MAXMEM_PFN) { 691
689 if (highmem_pages == -1) 692 if (highmem_pages == -1)
690 highmem_pages = max_pfn - MAXMEM_PFN; 693 highmem_pages = 0;
691 if (highmem_pages + MAXMEM_PFN < max_pfn) 694#ifdef CONFIG_HIGHMEM
692 max_pfn = MAXMEM_PFN + highmem_pages; 695 if (highmem_pages >= max_pfn) {
693 if (highmem_pages + MAXMEM_PFN > max_pfn) { 696 printk(KERN_ERR MSG_HIGHMEM_TOO_BIG,
694 printk(KERN_WARNING "only %luMB highmem pages " 697 pages_to_mb(highmem_pages), pages_to_mb(max_pfn));
695 "available, ignoring highmem size of %uMB.\n", 698 highmem_pages = 0;
696 pages_to_mb(max_pfn - MAXMEM_PFN), 699 }
700 if (highmem_pages) {
701 if (max_low_pfn - highmem_pages < 64*1024*1024/PAGE_SIZE) {
702 printk(KERN_ERR MSG_LOWMEM_TOO_SMALL,
697 pages_to_mb(highmem_pages)); 703 pages_to_mb(highmem_pages));
698 highmem_pages = 0; 704 highmem_pages = 0;
699 } 705 }
700 max_low_pfn = MAXMEM_PFN; 706 max_low_pfn -= highmem_pages;
707 }
708#else
709 if (highmem_pages)
710 printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n");
711#endif
712}
713
714#define MSG_HIGHMEM_TOO_SMALL \
715 "only %luMB highmem pages available, ignoring highmem size of %luMB!\n"
716
717#define MSG_HIGHMEM_TRIMMED \
718 "Warning: only 4GB will be used. Use a HIGHMEM64G enabled kernel!\n"
719/*
720 * We have more RAM than fits into lowmem - we try to put it into
721 * highmem, also taking the highmem=x boot parameter into account:
722 */
723void __init highmem_pfn_init(void)
724{
725 max_low_pfn = MAXMEM_PFN;
726
727 if (highmem_pages == -1)
728 highmem_pages = max_pfn - MAXMEM_PFN;
729
730 if (highmem_pages + MAXMEM_PFN < max_pfn)
731 max_pfn = MAXMEM_PFN + highmem_pages;
732
733 if (highmem_pages + MAXMEM_PFN > max_pfn) {
734 printk(KERN_WARNING MSG_HIGHMEM_TOO_SMALL,
735 pages_to_mb(max_pfn - MAXMEM_PFN),
736 pages_to_mb(highmem_pages));
737 highmem_pages = 0;
738 }
701#ifndef CONFIG_HIGHMEM 739#ifndef CONFIG_HIGHMEM
702 /* Maximum memory usable is what is directly addressable */ 740 /* Maximum memory usable is what is directly addressable */
703 printk(KERN_WARNING "Warning only %ldMB will be used.\n", 741 printk(KERN_WARNING "Warning only %ldMB will be used.\n", MAXMEM>>20);
704 MAXMEM>>20); 742 if (max_pfn > MAX_NONPAE_PFN)
705 if (max_pfn > MAX_NONPAE_PFN) 743 printk(KERN_WARNING "Use a HIGHMEM64G enabled kernel.\n");
706 printk(KERN_WARNING 744 else
707 "Use a HIGHMEM64G enabled kernel.\n"); 745 printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
708 else 746 max_pfn = MAXMEM_PFN;
709 printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
710 max_pfn = MAXMEM_PFN;
711#else /* !CONFIG_HIGHMEM */ 747#else /* !CONFIG_HIGHMEM */
712#ifndef CONFIG_HIGHMEM64G 748#ifndef CONFIG_HIGHMEM64G
713 if (max_pfn > MAX_NONPAE_PFN) { 749 if (max_pfn > MAX_NONPAE_PFN) {
714 max_pfn = MAX_NONPAE_PFN; 750 max_pfn = MAX_NONPAE_PFN;
715 printk(KERN_WARNING "Warning only 4GB will be used." 751 printk(KERN_WARNING MSG_HIGHMEM_TRIMMED);
716 "Use a HIGHMEM64G enabled kernel.\n"); 752 }
717 }
718#endif /* !CONFIG_HIGHMEM64G */ 753#endif /* !CONFIG_HIGHMEM64G */
719#endif /* !CONFIG_HIGHMEM */ 754#endif /* !CONFIG_HIGHMEM */
720 } else { 755}
721 if (highmem_pages == -1) 756
722 highmem_pages = 0; 757/*
723#ifdef CONFIG_HIGHMEM 758 * Determine low and high memory ranges:
724 if (highmem_pages >= max_pfn) { 759 */
725 printk(KERN_ERR "highmem size specified (%uMB) is " 760void __init find_low_pfn_range(void)
726 "bigger than pages available (%luMB)!.\n", 761{
727 pages_to_mb(highmem_pages), 762 /* it could update max_pfn */
728 pages_to_mb(max_pfn)); 763
729 highmem_pages = 0; 764 if (max_pfn <= MAXMEM_PFN)
730 } 765 lowmem_pfn_init();
731 if (highmem_pages) { 766 else
732 if (max_low_pfn - highmem_pages < 767 highmem_pfn_init();
733 64*1024*1024/PAGE_SIZE){
734 printk(KERN_ERR "highmem size %uMB results in "
735 "smaller than 64MB lowmem, ignoring it.\n"
736 , pages_to_mb(highmem_pages));
737 highmem_pages = 0;
738 }
739 max_low_pfn -= highmem_pages;
740 }
741#else
742 if (highmem_pages)
743 printk(KERN_ERR "ignoring highmem size on non-highmem"
744 " kernel!\n");
745#endif
746 }
747} 768}
748 769
749#ifndef CONFIG_NEED_MULTIPLE_NODES 770#ifndef CONFIG_NEED_MULTIPLE_NODES
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index af750ab973b6..1448bcb7f22f 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -367,7 +367,7 @@ EXPORT_SYMBOL(ioremap_nocache);
367 * 367 *
368 * Must be freed with iounmap. 368 * Must be freed with iounmap.
369 */ 369 */
370void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size) 370void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
371{ 371{
372 if (pat_enabled) 372 if (pat_enabled)
373 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC, 373 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index 56fe7124fbec..165829600566 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -4,7 +4,7 @@
4 * Based on code by Ingo Molnar and Andi Kleen, copyrighted 4 * Based on code by Ingo Molnar and Andi Kleen, copyrighted
5 * as follows: 5 * as follows:
6 * 6 *
7 * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina. 7 * Copyright 2003-2009 Red Hat Inc.
8 * All Rights Reserved. 8 * All Rights Reserved.
9 * Copyright 2005 Andi Kleen, SUSE Labs. 9 * Copyright 2005 Andi Kleen, SUSE Labs.
10 * Copyright 2007 Jiri Kosina, SUSE Labs. 10 * Copyright 2007 Jiri Kosina, SUSE Labs.
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 71a14f89f89e..deb1c1ab7868 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -20,6 +20,12 @@
20#include <asm/acpi.h> 20#include <asm/acpi.h>
21#include <asm/k8.h> 21#include <asm/k8.h>
22 22
23#ifdef CONFIG_DEBUG_PER_CPU_MAPS
24# define DBG(x...) printk(KERN_DEBUG x)
25#else
26# define DBG(x...)
27#endif
28
23struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; 29struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
24EXPORT_SYMBOL(node_data); 30EXPORT_SYMBOL(node_data);
25 31
@@ -33,6 +39,21 @@ int numa_off __initdata;
33static unsigned long __initdata nodemap_addr; 39static unsigned long __initdata nodemap_addr;
34static unsigned long __initdata nodemap_size; 40static unsigned long __initdata nodemap_size;
35 41
42DEFINE_PER_CPU(int, node_number) = 0;
43EXPORT_PER_CPU_SYMBOL(node_number);
44
45/*
46 * Map cpu index to node index
47 */
48DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
49EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
50
51/*
52 * Which logical CPUs are on which nodes
53 */
54cpumask_t *node_to_cpumask_map;
55EXPORT_SYMBOL(node_to_cpumask_map);
56
36/* 57/*
37 * Given a shift value, try to populate memnodemap[] 58 * Given a shift value, try to populate memnodemap[]
38 * Returns : 59 * Returns :
@@ -640,3 +661,199 @@ void __init init_cpu_to_node(void)
640#endif 661#endif
641 662
642 663
664/*
665 * Allocate node_to_cpumask_map based on number of available nodes
666 * Requires node_possible_map to be valid.
667 *
668 * Note: node_to_cpumask() is not valid until after this is done.
669 * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
670 */
671void __init setup_node_to_cpumask_map(void)
672{
673 unsigned int node, num = 0;
674 cpumask_t *map;
675
676 /* setup nr_node_ids if not done yet */
677 if (nr_node_ids == MAX_NUMNODES) {
678 for_each_node_mask(node, node_possible_map)
679 num = node;
680 nr_node_ids = num + 1;
681 }
682
683 /* allocate the map */
684 map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t));
685 DBG("node_to_cpumask_map at %p for %d nodes\n", map, nr_node_ids);
686
687 pr_debug("Node to cpumask map at %p for %d nodes\n",
688 map, nr_node_ids);
689
690 /* node_to_cpumask() will now work */
691 node_to_cpumask_map = map;
692}
693
694void __cpuinit numa_set_node(int cpu, int node)
695{
696 int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
697
698 /* early setting, no percpu area yet */
699 if (cpu_to_node_map) {
700 cpu_to_node_map[cpu] = node;
701 return;
702 }
703
704#ifdef CONFIG_DEBUG_PER_CPU_MAPS
705 if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
706 printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
707 dump_stack();
708 return;
709 }
710#endif
711 per_cpu(x86_cpu_to_node_map, cpu) = node;
712
713 if (node != NUMA_NO_NODE)
714 per_cpu(node_number, cpu) = node;
715}
716
717void __cpuinit numa_clear_node(int cpu)
718{
719 numa_set_node(cpu, NUMA_NO_NODE);
720}
721
722#ifndef CONFIG_DEBUG_PER_CPU_MAPS
723
724void __cpuinit numa_add_cpu(int cpu)
725{
726 cpu_set(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
727}
728
729void __cpuinit numa_remove_cpu(int cpu)
730{
731 cpu_clear(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
732}
733
734#else /* CONFIG_DEBUG_PER_CPU_MAPS */
735
736/*
737 * --------- debug versions of the numa functions ---------
738 */
739static void __cpuinit numa_set_cpumask(int cpu, int enable)
740{
741 int node = early_cpu_to_node(cpu);
742 cpumask_t *mask;
743 char buf[64];
744
745 if (node_to_cpumask_map == NULL) {
746 printk(KERN_ERR "node_to_cpumask_map NULL\n");
747 dump_stack();
748 return;
749 }
750
751 mask = &node_to_cpumask_map[node];
752 if (enable)
753 cpu_set(cpu, *mask);
754 else
755 cpu_clear(cpu, *mask);
756
757 cpulist_scnprintf(buf, sizeof(buf), mask);
758 printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
759 enable ? "numa_add_cpu" : "numa_remove_cpu", cpu, node, buf);
760}
761
762void __cpuinit numa_add_cpu(int cpu)
763{
764 numa_set_cpumask(cpu, 1);
765}
766
767void __cpuinit numa_remove_cpu(int cpu)
768{
769 numa_set_cpumask(cpu, 0);
770}
771
772int cpu_to_node(int cpu)
773{
774 if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
775 printk(KERN_WARNING
776 "cpu_to_node(%d): usage too early!\n", cpu);
777 dump_stack();
778 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
779 }
780 return per_cpu(x86_cpu_to_node_map, cpu);
781}
782EXPORT_SYMBOL(cpu_to_node);
783
784/*
785 * Same function as cpu_to_node() but used if called before the
786 * per_cpu areas are setup.
787 */
788int early_cpu_to_node(int cpu)
789{
790 if (early_per_cpu_ptr(x86_cpu_to_node_map))
791 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
792
793 if (!cpu_possible(cpu)) {
794 printk(KERN_WARNING
795 "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
796 dump_stack();
797 return NUMA_NO_NODE;
798 }
799 return per_cpu(x86_cpu_to_node_map, cpu);
800}
801
802
803/* empty cpumask */
804static const cpumask_t cpu_mask_none;
805
806/*
807 * Returns a pointer to the bitmask of CPUs on Node 'node'.
808 */
809const cpumask_t *cpumask_of_node(int node)
810{
811 if (node_to_cpumask_map == NULL) {
812 printk(KERN_WARNING
813 "cpumask_of_node(%d): no node_to_cpumask_map!\n",
814 node);
815 dump_stack();
816 return (const cpumask_t *)&cpu_online_map;
817 }
818 if (node >= nr_node_ids) {
819 printk(KERN_WARNING
820 "cpumask_of_node(%d): node > nr_node_ids(%d)\n",
821 node, nr_node_ids);
822 dump_stack();
823 return &cpu_mask_none;
824 }
825 return &node_to_cpumask_map[node];
826}
827EXPORT_SYMBOL(cpumask_of_node);
828
829/*
830 * Returns a bitmask of CPUs on Node 'node'.
831 *
832 * Side note: this function creates the returned cpumask on the stack
833 * so with a high NR_CPUS count, excessive stack space is used. The
834 * node_to_cpumask_ptr function should be used whenever possible.
835 */
836cpumask_t node_to_cpumask(int node)
837{
838 if (node_to_cpumask_map == NULL) {
839 printk(KERN_WARNING
840 "node_to_cpumask(%d): no node_to_cpumask_map!\n", node);
841 dump_stack();
842 return cpu_online_map;
843 }
844 if (node >= nr_node_ids) {
845 printk(KERN_WARNING
846 "node_to_cpumask(%d): node > nr_node_ids(%d)\n",
847 node, nr_node_ids);
848 dump_stack();
849 return cpu_mask_none;
850 }
851 return node_to_cpumask_map[node];
852}
853EXPORT_SYMBOL(node_to_cpumask);
854
855/*
856 * --------- end of debug versions of the numa functions ---------
857 */
858
859#endif /* CONFIG_DEBUG_PER_CPU_MAPS */
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 7b61036427df..9127e31c7268 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -30,7 +30,7 @@
30#ifdef CONFIG_X86_PAT 30#ifdef CONFIG_X86_PAT
31int __read_mostly pat_enabled = 1; 31int __read_mostly pat_enabled = 1;
32 32
33void __cpuinit pat_disable(char *reason) 33void __cpuinit pat_disable(const char *reason)
34{ 34{
35 pat_enabled = 0; 35 pat_enabled = 0;
36 printk(KERN_INFO "%s\n", reason); 36 printk(KERN_INFO "%s\n", reason);
@@ -42,6 +42,11 @@ static int __init nopat(char *str)
42 return 0; 42 return 0;
43} 43}
44early_param("nopat", nopat); 44early_param("nopat", nopat);
45#else
46static inline void pat_disable(const char *reason)
47{
48 (void)reason;
49}
45#endif 50#endif
46 51
47 52
@@ -78,16 +83,20 @@ void pat_init(void)
78 if (!pat_enabled) 83 if (!pat_enabled)
79 return; 84 return;
80 85
81 /* Paranoia check. */ 86 if (!cpu_has_pat) {
82 if (!cpu_has_pat && boot_pat_state) { 87 if (!boot_pat_state) {
83 /* 88 pat_disable("PAT not supported by CPU.");
84 * If this happens we are on a secondary CPU, but 89 return;
85 * switched to PAT on the boot CPU. We have no way to 90 } else {
86 * undo PAT. 91 /*
87 */ 92 * If this happens we are on a secondary CPU, but
88 printk(KERN_ERR "PAT enabled, " 93 * switched to PAT on the boot CPU. We have no way to
89 "but not supported by secondary CPU\n"); 94 * undo PAT.
90 BUG(); 95 */
96 printk(KERN_ERR "PAT enabled, "
97 "but not supported by secondary CPU\n");
98 BUG();
99 }
91 } 100 }
92 101
93 /* Set PWT to Write-Combining. All other bits stay the same */ 102 /* Set PWT to Write-Combining. All other bits stay the same */
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c
index 09737c8af074..15df1baee100 100644
--- a/arch/x86/mm/srat_64.c
+++ b/arch/x86/mm/srat_64.c
@@ -21,6 +21,7 @@
21#include <asm/numa.h> 21#include <asm/numa.h>
22#include <asm/e820.h> 22#include <asm/e820.h>
23#include <asm/genapic.h> 23#include <asm/genapic.h>
24#include <asm/uv/uv.h>
24 25
25int acpi_numa __initdata; 26int acpi_numa __initdata;
26 27
diff --git a/arch/x86/kernel/tlb_64.c b/arch/x86/mm/tlb.c
index f8be6f1d2e48..14c5af4d11e6 100644
--- a/arch/x86/kernel/tlb_64.c
+++ b/arch/x86/mm/tlb.c
@@ -1,24 +1,20 @@
1#include <linux/init.h> 1#include <linux/init.h>
2 2
3#include <linux/mm.h> 3#include <linux/mm.h>
4#include <linux/delay.h>
5#include <linux/spinlock.h> 4#include <linux/spinlock.h>
6#include <linux/smp.h> 5#include <linux/smp.h>
7#include <linux/kernel_stat.h>
8#include <linux/mc146818rtc.h>
9#include <linux/interrupt.h> 6#include <linux/interrupt.h>
7#include <linux/module.h>
10 8
11#include <asm/mtrr.h>
12#include <asm/pgalloc.h>
13#include <asm/tlbflush.h> 9#include <asm/tlbflush.h>
14#include <asm/mmu_context.h> 10#include <asm/mmu_context.h>
15#include <asm/proto.h> 11#include <asm/apic.h>
16#include <asm/apicdef.h> 12#include <asm/uv/uv.h>
17#include <asm/idle.h>
18#include <asm/uv/uv_hub.h>
19#include <asm/uv/uv_bau.h>
20 13
21#include <mach_ipi.h> 14DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate)
15 = { &init_mm, 0, };
16
17#include <asm/genapic.h>
22/* 18/*
23 * Smarter SMP flushing macros. 19 * Smarter SMP flushing macros.
24 * c/o Linus Torvalds. 20 * c/o Linus Torvalds.
@@ -33,7 +29,7 @@
33 * To avoid global state use 8 different call vectors. 29 * To avoid global state use 8 different call vectors.
34 * Each CPU uses a specific vector to trigger flushes on other 30 * Each CPU uses a specific vector to trigger flushes on other
35 * CPUs. Depending on the received vector the target CPUs look into 31 * CPUs. Depending on the received vector the target CPUs look into
36 * the right per cpu variable for the flush data. 32 * the right array slot for the flush data.
37 * 33 *
38 * With more than 8 CPUs they are hashed to the 8 available 34 * With more than 8 CPUs they are hashed to the 8 available
39 * vectors. The limited global vector space forces us to this right now. 35 * vectors. The limited global vector space forces us to this right now.
@@ -43,18 +39,18 @@
43 39
44union smp_flush_state { 40union smp_flush_state {
45 struct { 41 struct {
46 cpumask_t flush_cpumask;
47 struct mm_struct *flush_mm; 42 struct mm_struct *flush_mm;
48 unsigned long flush_va; 43 unsigned long flush_va;
49 spinlock_t tlbstate_lock; 44 spinlock_t tlbstate_lock;
45 DECLARE_BITMAP(flush_cpumask, NR_CPUS);
50 }; 46 };
51 char pad[SMP_CACHE_BYTES]; 47 char pad[CONFIG_X86_INTERNODE_CACHE_BYTES];
52} ____cacheline_aligned; 48} ____cacheline_internodealigned_in_smp;
53 49
54/* State is put into the per CPU data section, but padded 50/* State is put into the per CPU data section, but padded
55 to a full cache line because other CPUs can access it and we don't 51 to a full cache line because other CPUs can access it and we don't
56 want false sharing in the per cpu data segment. */ 52 want false sharing in the per cpu data segment. */
57static DEFINE_PER_CPU(union smp_flush_state, flush_state); 53static union smp_flush_state flush_state[NUM_INVALIDATE_TLB_VECTORS];
58 54
59/* 55/*
60 * We cannot call mmdrop() because we are in interrupt context, 56 * We cannot call mmdrop() because we are in interrupt context,
@@ -62,9 +58,9 @@ static DEFINE_PER_CPU(union smp_flush_state, flush_state);
62 */ 58 */
63void leave_mm(int cpu) 59void leave_mm(int cpu)
64{ 60{
65 if (read_pda(mmu_state) == TLBSTATE_OK) 61 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
66 BUG(); 62 BUG();
67 cpu_clear(cpu, read_pda(active_mm)->cpu_vm_mask); 63 cpu_clear(cpu, percpu_read(cpu_tlbstate.active_mm)->cpu_vm_mask);
68 load_cr3(swapper_pg_dir); 64 load_cr3(swapper_pg_dir);
69} 65}
70EXPORT_SYMBOL_GPL(leave_mm); 66EXPORT_SYMBOL_GPL(leave_mm);
@@ -117,10 +113,20 @@ EXPORT_SYMBOL_GPL(leave_mm);
117 * Interrupts are disabled. 113 * Interrupts are disabled.
118 */ 114 */
119 115
120asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs) 116/*
117 * FIXME: use of asmlinkage is not consistent. On x86_64 it's noop
118 * but still used for documentation purpose but the usage is slightly
119 * inconsistent. On x86_32, asmlinkage is regparm(0) but interrupt
120 * entry calls in with the first parameter in %eax. Maybe define
121 * intrlinkage?
122 */
123#ifdef CONFIG_X86_64
124asmlinkage
125#endif
126void smp_invalidate_interrupt(struct pt_regs *regs)
121{ 127{
122 int cpu; 128 unsigned int cpu;
123 int sender; 129 unsigned int sender;
124 union smp_flush_state *f; 130 union smp_flush_state *f;
125 131
126 cpu = smp_processor_id(); 132 cpu = smp_processor_id();
@@ -129,9 +135,9 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
129 * Use that to determine where the sender put the data. 135 * Use that to determine where the sender put the data.
130 */ 136 */
131 sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START; 137 sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START;
132 f = &per_cpu(flush_state, sender); 138 f = &flush_state[sender];
133 139
134 if (!cpu_isset(cpu, f->flush_cpumask)) 140 if (!cpumask_test_cpu(cpu, to_cpumask(f->flush_cpumask)))
135 goto out; 141 goto out;
136 /* 142 /*
137 * This was a BUG() but until someone can quote me the 143 * This was a BUG() but until someone can quote me the
@@ -142,8 +148,8 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
142 * BUG(); 148 * BUG();
143 */ 149 */
144 150
145 if (f->flush_mm == read_pda(active_mm)) { 151 if (f->flush_mm == percpu_read(cpu_tlbstate.active_mm)) {
146 if (read_pda(mmu_state) == TLBSTATE_OK) { 152 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
147 if (f->flush_va == TLB_FLUSH_ALL) 153 if (f->flush_va == TLB_FLUSH_ALL)
148 local_flush_tlb(); 154 local_flush_tlb();
149 else 155 else
@@ -153,23 +159,21 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
153 } 159 }
154out: 160out:
155 ack_APIC_irq(); 161 ack_APIC_irq();
156 cpu_clear(cpu, f->flush_cpumask); 162 smp_mb__before_clear_bit();
163 cpumask_clear_cpu(cpu, to_cpumask(f->flush_cpumask));
164 smp_mb__after_clear_bit();
157 inc_irq_stat(irq_tlb_count); 165 inc_irq_stat(irq_tlb_count);
158} 166}
159 167
160void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm, 168static void flush_tlb_others_ipi(const struct cpumask *cpumask,
161 unsigned long va) 169 struct mm_struct *mm, unsigned long va)
162{ 170{
163 int sender; 171 unsigned int sender;
164 union smp_flush_state *f; 172 union smp_flush_state *f;
165 cpumask_t cpumask = *cpumaskp;
166
167 if (is_uv_system() && uv_flush_tlb_others(&cpumask, mm, va))
168 return;
169 173
170 /* Caller has disabled preemption */ 174 /* Caller has disabled preemption */
171 sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS; 175 sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS;
172 f = &per_cpu(flush_state, sender); 176 f = &flush_state[sender];
173 177
174 /* 178 /*
175 * Could avoid this lock when 179 * Could avoid this lock when
@@ -180,7 +184,8 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
180 184
181 f->flush_mm = mm; 185 f->flush_mm = mm;
182 f->flush_va = va; 186 f->flush_va = va;
183 cpus_or(f->flush_cpumask, cpumask, f->flush_cpumask); 187 cpumask_andnot(to_cpumask(f->flush_cpumask),
188 cpumask, cpumask_of(smp_processor_id()));
184 189
185 /* 190 /*
186 * Make the above memory operations globally visible before 191 * Make the above memory operations globally visible before
@@ -191,9 +196,10 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
191 * We have to send the IPI only to 196 * We have to send the IPI only to
192 * CPUs affected. 197 * CPUs affected.
193 */ 198 */
194 send_IPI_mask(&cpumask, INVALIDATE_TLB_VECTOR_START + sender); 199 apic->send_IPI_mask(to_cpumask(f->flush_cpumask),
200 INVALIDATE_TLB_VECTOR_START + sender);
195 201
196 while (!cpus_empty(f->flush_cpumask)) 202 while (!cpumask_empty(to_cpumask(f->flush_cpumask)))
197 cpu_relax(); 203 cpu_relax();
198 204
199 f->flush_mm = NULL; 205 f->flush_mm = NULL;
@@ -201,12 +207,28 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
201 spin_unlock(&f->tlbstate_lock); 207 spin_unlock(&f->tlbstate_lock);
202} 208}
203 209
210void native_flush_tlb_others(const struct cpumask *cpumask,
211 struct mm_struct *mm, unsigned long va)
212{
213 if (is_uv_system()) {
214 unsigned int cpu;
215
216 cpu = get_cpu();
217 cpumask = uv_flush_tlb_others(cpumask, mm, va, cpu);
218 if (cpumask)
219 flush_tlb_others_ipi(cpumask, mm, va);
220 put_cpu();
221 return;
222 }
223 flush_tlb_others_ipi(cpumask, mm, va);
224}
225
204static int __cpuinit init_smp_flush(void) 226static int __cpuinit init_smp_flush(void)
205{ 227{
206 int i; 228 int i;
207 229
208 for_each_possible_cpu(i) 230 for (i = 0; i < ARRAY_SIZE(flush_state); i++)
209 spin_lock_init(&per_cpu(flush_state, i).tlbstate_lock); 231 spin_lock_init(&flush_state[i].tlbstate_lock);
210 232
211 return 0; 233 return 0;
212} 234}
@@ -215,25 +237,18 @@ core_initcall(init_smp_flush);
215void flush_tlb_current_task(void) 237void flush_tlb_current_task(void)
216{ 238{
217 struct mm_struct *mm = current->mm; 239 struct mm_struct *mm = current->mm;
218 cpumask_t cpu_mask;
219 240
220 preempt_disable(); 241 preempt_disable();
221 cpu_mask = mm->cpu_vm_mask;
222 cpu_clear(smp_processor_id(), cpu_mask);
223 242
224 local_flush_tlb(); 243 local_flush_tlb();
225 if (!cpus_empty(cpu_mask)) 244 if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
226 flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); 245 flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL);
227 preempt_enable(); 246 preempt_enable();
228} 247}
229 248
230void flush_tlb_mm(struct mm_struct *mm) 249void flush_tlb_mm(struct mm_struct *mm)
231{ 250{
232 cpumask_t cpu_mask;
233
234 preempt_disable(); 251 preempt_disable();
235 cpu_mask = mm->cpu_vm_mask;
236 cpu_clear(smp_processor_id(), cpu_mask);
237 252
238 if (current->active_mm == mm) { 253 if (current->active_mm == mm) {
239 if (current->mm) 254 if (current->mm)
@@ -241,8 +256,8 @@ void flush_tlb_mm(struct mm_struct *mm)
241 else 256 else
242 leave_mm(smp_processor_id()); 257 leave_mm(smp_processor_id());
243 } 258 }
244 if (!cpus_empty(cpu_mask)) 259 if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
245 flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); 260 flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL);
246 261
247 preempt_enable(); 262 preempt_enable();
248} 263}
@@ -250,11 +265,8 @@ void flush_tlb_mm(struct mm_struct *mm)
250void flush_tlb_page(struct vm_area_struct *vma, unsigned long va) 265void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
251{ 266{
252 struct mm_struct *mm = vma->vm_mm; 267 struct mm_struct *mm = vma->vm_mm;
253 cpumask_t cpu_mask;
254 268
255 preempt_disable(); 269 preempt_disable();
256 cpu_mask = mm->cpu_vm_mask;
257 cpu_clear(smp_processor_id(), cpu_mask);
258 270
259 if (current->active_mm == mm) { 271 if (current->active_mm == mm) {
260 if (current->mm) 272 if (current->mm)
@@ -263,8 +275,8 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
263 leave_mm(smp_processor_id()); 275 leave_mm(smp_processor_id());
264 } 276 }
265 277
266 if (!cpus_empty(cpu_mask)) 278 if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
267 flush_tlb_others(cpu_mask, mm, va); 279 flush_tlb_others(&mm->cpu_vm_mask, mm, va);
268 280
269 preempt_enable(); 281 preempt_enable();
270} 282}
@@ -274,7 +286,7 @@ static void do_flush_tlb_all(void *info)
274 unsigned long cpu = smp_processor_id(); 286 unsigned long cpu = smp_processor_id();
275 287
276 __flush_tlb_all(); 288 __flush_tlb_all();
277 if (read_pda(mmu_state) == TLBSTATE_LAZY) 289 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY)
278 leave_mm(cpu); 290 leave_mm(cpu);
279} 291}
280 292
diff --git a/arch/x86/pci/numaq_32.c b/arch/x86/pci/numaq_32.c
index 2089354968a2..5601e829c387 100644
--- a/arch/x86/pci/numaq_32.c
+++ b/arch/x86/pci/numaq_32.c
@@ -5,7 +5,7 @@
5#include <linux/pci.h> 5#include <linux/pci.h>
6#include <linux/init.h> 6#include <linux/init.h>
7#include <linux/nodemask.h> 7#include <linux/nodemask.h>
8#include <mach_apic.h> 8#include <asm/genapic.h>
9#include <asm/mpspec.h> 9#include <asm/mpspec.h>
10#include <asm/pci_x86.h> 10#include <asm/pci_x86.h>
11 11
@@ -18,10 +18,6 @@
18 18
19#define QUADLOCAL2BUS(quad,local) (quad_local_to_mp_bus_id[quad][local]) 19#define QUADLOCAL2BUS(quad,local) (quad_local_to_mp_bus_id[quad][local])
20 20
21/* Where the IO area was mapped on multiquad, always 0 otherwise */
22void *xquad_portio;
23EXPORT_SYMBOL(xquad_portio);
24
25#define XQUAD_PORT_ADDR(port, quad) (xquad_portio + (XQUAD_PORTIO_QUAD*quad) + port) 21#define XQUAD_PORT_ADDR(port, quad) (xquad_portio + (XQUAD_PORTIO_QUAD*quad) + port)
26 22
27#define PCI_CONF1_MQ_ADDRESS(bus, devfn, reg) \ 23#define PCI_CONF1_MQ_ADDRESS(bus, devfn, reg) \
diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
index b82cae970dfd..1c975cc9839e 100644
--- a/arch/x86/pci/pcbios.c
+++ b/arch/x86/pci/pcbios.c
@@ -7,7 +7,7 @@
7#include <linux/module.h> 7#include <linux/module.h>
8#include <linux/uaccess.h> 8#include <linux/uaccess.h>
9#include <asm/pci_x86.h> 9#include <asm/pci_x86.h>
10#include <asm/mach-default/pci-functions.h> 10#include <asm/pci-functions.h>
11 11
12/* BIOS32 signature: "_32_" */ 12/* BIOS32 signature: "_32_" */
13#define BIOS32_SIGNATURE (('_' << 0) + ('3' << 8) + ('2' << 16) + ('_' << 24)) 13#define BIOS32_SIGNATURE (('_' << 0) + ('3' << 8) + ('2' << 16) + ('_' << 24))
diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
index 4d6ef0a336d6..16a9020c8f11 100644
--- a/arch/x86/vdso/Makefile
+++ b/arch/x86/vdso/Makefile
@@ -38,7 +38,7 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
38 $(call if_changed,objcopy) 38 $(call if_changed,objcopy)
39 39
40CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \ 40CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \
41 $(filter -g%,$(KBUILD_CFLAGS)) 41 $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector)
42 42
43$(vobjs): KBUILD_CFLAGS += $(CFL) 43$(vobjs): KBUILD_CFLAGS += $(CFL)
44 44
diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile
index 6dcefba7836f..3b767d03fd6a 100644
--- a/arch/x86/xen/Makefile
+++ b/arch/x86/xen/Makefile
@@ -6,7 +6,8 @@ CFLAGS_REMOVE_irq.o = -pg
6endif 6endif
7 7
8obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \ 8obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \
9 time.o xen-asm_$(BITS).o grant-table.o suspend.o 9 time.o xen-asm.o xen-asm_$(BITS).o \
10 grant-table.o suspend.o
10 11
11obj-$(CONFIG_SMP) += smp.o spinlock.o 12obj-$(CONFIG_SMP) += smp.o spinlock.o
12obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o \ No newline at end of file 13obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o \ No newline at end of file
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index bea215230b20..95ff6a0e942a 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -61,40 +61,13 @@ DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
61enum xen_domain_type xen_domain_type = XEN_NATIVE; 61enum xen_domain_type xen_domain_type = XEN_NATIVE;
62EXPORT_SYMBOL_GPL(xen_domain_type); 62EXPORT_SYMBOL_GPL(xen_domain_type);
63 63
64/*
65 * Identity map, in addition to plain kernel map. This needs to be
66 * large enough to allocate page table pages to allocate the rest.
67 * Each page can map 2MB.
68 */
69static pte_t level1_ident_pgt[PTRS_PER_PTE * 4] __page_aligned_bss;
70
71#ifdef CONFIG_X86_64
72/* l3 pud for userspace vsyscall mapping */
73static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
74#endif /* CONFIG_X86_64 */
75
76/*
77 * Note about cr3 (pagetable base) values:
78 *
79 * xen_cr3 contains the current logical cr3 value; it contains the
80 * last set cr3. This may not be the current effective cr3, because
81 * its update may be being lazily deferred. However, a vcpu looking
82 * at its own cr3 can use this value knowing that it everything will
83 * be self-consistent.
84 *
85 * xen_current_cr3 contains the actual vcpu cr3; it is set once the
86 * hypercall to set the vcpu cr3 is complete (so it may be a little
87 * out of date, but it will never be set early). If one vcpu is
88 * looking at another vcpu's cr3 value, it should use this variable.
89 */
90DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */
91DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */
92
93struct start_info *xen_start_info; 64struct start_info *xen_start_info;
94EXPORT_SYMBOL_GPL(xen_start_info); 65EXPORT_SYMBOL_GPL(xen_start_info);
95 66
96struct shared_info xen_dummy_shared_info; 67struct shared_info xen_dummy_shared_info;
97 68
69void *xen_initial_gdt;
70
98/* 71/*
99 * Point at some empty memory to start with. We map the real shared_info 72 * Point at some empty memory to start with. We map the real shared_info
100 * page as soon as fixmap is up and running. 73 * page as soon as fixmap is up and running.
@@ -114,14 +87,7 @@ struct shared_info *HYPERVISOR_shared_info = (void *)&xen_dummy_shared_info;
114 * 87 *
115 * 0: not available, 1: available 88 * 0: not available, 1: available
116 */ 89 */
117static int have_vcpu_info_placement = 90static int have_vcpu_info_placement = 1;
118#ifdef CONFIG_X86_32
119 1
120#else
121 0
122#endif
123 ;
124
125 91
126static void xen_vcpu_setup(int cpu) 92static void xen_vcpu_setup(int cpu)
127{ 93{
@@ -237,7 +203,7 @@ static unsigned long xen_get_debugreg(int reg)
237 return HYPERVISOR_get_debugreg(reg); 203 return HYPERVISOR_get_debugreg(reg);
238} 204}
239 205
240static void xen_leave_lazy(void) 206void xen_leave_lazy(void)
241{ 207{
242 paravirt_leave_lazy(paravirt_get_lazy_mode()); 208 paravirt_leave_lazy(paravirt_get_lazy_mode());
243 xen_mc_flush(); 209 xen_mc_flush();
@@ -357,13 +323,14 @@ static void load_TLS_descriptor(struct thread_struct *t,
357static void xen_load_tls(struct thread_struct *t, unsigned int cpu) 323static void xen_load_tls(struct thread_struct *t, unsigned int cpu)
358{ 324{
359 /* 325 /*
360 * XXX sleazy hack: If we're being called in a lazy-cpu zone, 326 * XXX sleazy hack: If we're being called in a lazy-cpu zone
361 * it means we're in a context switch, and %gs has just been 327 * and lazy gs handling is enabled, it means we're in a
362 * saved. This means we can zero it out to prevent faults on 328 * context switch, and %gs has just been saved. This means we
363 * exit from the hypervisor if the next process has no %gs. 329 * can zero it out to prevent faults on exit from the
364 * Either way, it has been saved, and the new value will get 330 * hypervisor if the next process has no %gs. Either way, it
365 * loaded properly. This will go away as soon as Xen has been 331 * has been saved, and the new value will get loaded properly.
366 * modified to not save/restore %gs for normal hypercalls. 332 * This will go away as soon as Xen has been modified to not
333 * save/restore %gs for normal hypercalls.
367 * 334 *
368 * On x86_64, this hack is not used for %gs, because gs points 335 * On x86_64, this hack is not used for %gs, because gs points
369 * to KERNEL_GS_BASE (and uses it for PDA references), so we 336 * to KERNEL_GS_BASE (and uses it for PDA references), so we
@@ -375,7 +342,7 @@ static void xen_load_tls(struct thread_struct *t, unsigned int cpu)
375 */ 342 */
376 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU) { 343 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU) {
377#ifdef CONFIG_X86_32 344#ifdef CONFIG_X86_32
378 loadsegment(gs, 0); 345 lazy_load_gs(0);
379#else 346#else
380 loadsegment(fs, 0); 347 loadsegment(fs, 0);
381#endif 348#endif
@@ -598,83 +565,6 @@ static struct apic_ops xen_basic_apic_ops = {
598 565
599#endif 566#endif
600 567
601static void xen_flush_tlb(void)
602{
603 struct mmuext_op *op;
604 struct multicall_space mcs;
605
606 preempt_disable();
607
608 mcs = xen_mc_entry(sizeof(*op));
609
610 op = mcs.args;
611 op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
612 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
613
614 xen_mc_issue(PARAVIRT_LAZY_MMU);
615
616 preempt_enable();
617}
618
619static void xen_flush_tlb_single(unsigned long addr)
620{
621 struct mmuext_op *op;
622 struct multicall_space mcs;
623
624 preempt_disable();
625
626 mcs = xen_mc_entry(sizeof(*op));
627 op = mcs.args;
628 op->cmd = MMUEXT_INVLPG_LOCAL;
629 op->arg1.linear_addr = addr & PAGE_MASK;
630 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
631
632 xen_mc_issue(PARAVIRT_LAZY_MMU);
633
634 preempt_enable();
635}
636
637static void xen_flush_tlb_others(const cpumask_t *cpus, struct mm_struct *mm,
638 unsigned long va)
639{
640 struct {
641 struct mmuext_op op;
642 cpumask_t mask;
643 } *args;
644 cpumask_t cpumask = *cpus;
645 struct multicall_space mcs;
646
647 /*
648 * A couple of (to be removed) sanity checks:
649 *
650 * - current CPU must not be in mask
651 * - mask must exist :)
652 */
653 BUG_ON(cpus_empty(cpumask));
654 BUG_ON(cpu_isset(smp_processor_id(), cpumask));
655 BUG_ON(!mm);
656
657 /* If a CPU which we ran on has gone down, OK. */
658 cpus_and(cpumask, cpumask, cpu_online_map);
659 if (cpus_empty(cpumask))
660 return;
661
662 mcs = xen_mc_entry(sizeof(*args));
663 args = mcs.args;
664 args->mask = cpumask;
665 args->op.arg2.vcpumask = &args->mask;
666
667 if (va == TLB_FLUSH_ALL) {
668 args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
669 } else {
670 args->op.cmd = MMUEXT_INVLPG_MULTI;
671 args->op.arg1.linear_addr = va;
672 }
673
674 MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
675
676 xen_mc_issue(PARAVIRT_LAZY_MMU);
677}
678 568
679static void xen_clts(void) 569static void xen_clts(void)
680{ 570{
@@ -700,21 +590,6 @@ static void xen_write_cr0(unsigned long cr0)
700 xen_mc_issue(PARAVIRT_LAZY_CPU); 590 xen_mc_issue(PARAVIRT_LAZY_CPU);
701} 591}
702 592
703static void xen_write_cr2(unsigned long cr2)
704{
705 x86_read_percpu(xen_vcpu)->arch.cr2 = cr2;
706}
707
708static unsigned long xen_read_cr2(void)
709{
710 return x86_read_percpu(xen_vcpu)->arch.cr2;
711}
712
713static unsigned long xen_read_cr2_direct(void)
714{
715 return x86_read_percpu(xen_vcpu_info.arch.cr2);
716}
717
718static void xen_write_cr4(unsigned long cr4) 593static void xen_write_cr4(unsigned long cr4)
719{ 594{
720 cr4 &= ~X86_CR4_PGE; 595 cr4 &= ~X86_CR4_PGE;
@@ -723,71 +598,6 @@ static void xen_write_cr4(unsigned long cr4)
723 native_write_cr4(cr4); 598 native_write_cr4(cr4);
724} 599}
725 600
726static unsigned long xen_read_cr3(void)
727{
728 return x86_read_percpu(xen_cr3);
729}
730
731static void set_current_cr3(void *v)
732{
733 x86_write_percpu(xen_current_cr3, (unsigned long)v);
734}
735
736static void __xen_write_cr3(bool kernel, unsigned long cr3)
737{
738 struct mmuext_op *op;
739 struct multicall_space mcs;
740 unsigned long mfn;
741
742 if (cr3)
743 mfn = pfn_to_mfn(PFN_DOWN(cr3));
744 else
745 mfn = 0;
746
747 WARN_ON(mfn == 0 && kernel);
748
749 mcs = __xen_mc_entry(sizeof(*op));
750
751 op = mcs.args;
752 op->cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
753 op->arg1.mfn = mfn;
754
755 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
756
757 if (kernel) {
758 x86_write_percpu(xen_cr3, cr3);
759
760 /* Update xen_current_cr3 once the batch has actually
761 been submitted. */
762 xen_mc_callback(set_current_cr3, (void *)cr3);
763 }
764}
765
766static void xen_write_cr3(unsigned long cr3)
767{
768 BUG_ON(preemptible());
769
770 xen_mc_batch(); /* disables interrupts */
771
772 /* Update while interrupts are disabled, so its atomic with
773 respect to ipis */
774 x86_write_percpu(xen_cr3, cr3);
775
776 __xen_write_cr3(true, cr3);
777
778#ifdef CONFIG_X86_64
779 {
780 pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
781 if (user_pgd)
782 __xen_write_cr3(false, __pa(user_pgd));
783 else
784 __xen_write_cr3(false, 0);
785 }
786#endif
787
788 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
789}
790
791static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high) 601static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
792{ 602{
793 int ret; 603 int ret;
@@ -829,185 +639,6 @@ static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
829 return ret; 639 return ret;
830} 640}
831 641
832/* Early in boot, while setting up the initial pagetable, assume
833 everything is pinned. */
834static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
835{
836#ifdef CONFIG_FLATMEM
837 BUG_ON(mem_map); /* should only be used early */
838#endif
839 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
840}
841
842/* Early release_pte assumes that all pts are pinned, since there's
843 only init_mm and anything attached to that is pinned. */
844static void xen_release_pte_init(unsigned long pfn)
845{
846 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
847}
848
849static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
850{
851 struct mmuext_op op;
852 op.cmd = cmd;
853 op.arg1.mfn = pfn_to_mfn(pfn);
854 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
855 BUG();
856}
857
858/* This needs to make sure the new pte page is pinned iff its being
859 attached to a pinned pagetable. */
860static void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned level)
861{
862 struct page *page = pfn_to_page(pfn);
863
864 if (PagePinned(virt_to_page(mm->pgd))) {
865 SetPagePinned(page);
866
867 vm_unmap_aliases();
868 if (!PageHighMem(page)) {
869 make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn)));
870 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
871 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
872 } else {
873 /* make sure there are no stray mappings of
874 this page */
875 kmap_flush_unused();
876 }
877 }
878}
879
880static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
881{
882 xen_alloc_ptpage(mm, pfn, PT_PTE);
883}
884
885static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
886{
887 xen_alloc_ptpage(mm, pfn, PT_PMD);
888}
889
890static int xen_pgd_alloc(struct mm_struct *mm)
891{
892 pgd_t *pgd = mm->pgd;
893 int ret = 0;
894
895 BUG_ON(PagePinned(virt_to_page(pgd)));
896
897#ifdef CONFIG_X86_64
898 {
899 struct page *page = virt_to_page(pgd);
900 pgd_t *user_pgd;
901
902 BUG_ON(page->private != 0);
903
904 ret = -ENOMEM;
905
906 user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
907 page->private = (unsigned long)user_pgd;
908
909 if (user_pgd != NULL) {
910 user_pgd[pgd_index(VSYSCALL_START)] =
911 __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
912 ret = 0;
913 }
914
915 BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
916 }
917#endif
918
919 return ret;
920}
921
922static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
923{
924#ifdef CONFIG_X86_64
925 pgd_t *user_pgd = xen_get_user_pgd(pgd);
926
927 if (user_pgd)
928 free_page((unsigned long)user_pgd);
929#endif
930}
931
932/* This should never happen until we're OK to use struct page */
933static void xen_release_ptpage(unsigned long pfn, unsigned level)
934{
935 struct page *page = pfn_to_page(pfn);
936
937 if (PagePinned(page)) {
938 if (!PageHighMem(page)) {
939 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
940 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
941 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
942 }
943 ClearPagePinned(page);
944 }
945}
946
947static void xen_release_pte(unsigned long pfn)
948{
949 xen_release_ptpage(pfn, PT_PTE);
950}
951
952static void xen_release_pmd(unsigned long pfn)
953{
954 xen_release_ptpage(pfn, PT_PMD);
955}
956
957#if PAGETABLE_LEVELS == 4
958static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
959{
960 xen_alloc_ptpage(mm, pfn, PT_PUD);
961}
962
963static void xen_release_pud(unsigned long pfn)
964{
965 xen_release_ptpage(pfn, PT_PUD);
966}
967#endif
968
969#ifdef CONFIG_HIGHPTE
970static void *xen_kmap_atomic_pte(struct page *page, enum km_type type)
971{
972 pgprot_t prot = PAGE_KERNEL;
973
974 if (PagePinned(page))
975 prot = PAGE_KERNEL_RO;
976
977 if (0 && PageHighMem(page))
978 printk("mapping highpte %lx type %d prot %s\n",
979 page_to_pfn(page), type,
980 (unsigned long)pgprot_val(prot) & _PAGE_RW ? "WRITE" : "READ");
981
982 return kmap_atomic_prot(page, type, prot);
983}
984#endif
985
986#ifdef CONFIG_X86_32
987static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
988{
989 /* If there's an existing pte, then don't allow _PAGE_RW to be set */
990 if (pte_val_ma(*ptep) & _PAGE_PRESENT)
991 pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
992 pte_val_ma(pte));
993
994 return pte;
995}
996
997/* Init-time set_pte while constructing initial pagetables, which
998 doesn't allow RO pagetable pages to be remapped RW */
999static __init void xen_set_pte_init(pte_t *ptep, pte_t pte)
1000{
1001 pte = mask_rw_pte(ptep, pte);
1002
1003 xen_set_pte(ptep, pte);
1004}
1005#endif
1006
1007static __init void xen_pagetable_setup_start(pgd_t *base)
1008{
1009}
1010
1011void xen_setup_shared_info(void) 642void xen_setup_shared_info(void)
1012{ 643{
1013 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 644 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
@@ -1028,37 +659,6 @@ void xen_setup_shared_info(void)
1028 xen_setup_mfn_list_list(); 659 xen_setup_mfn_list_list();
1029} 660}
1030 661
1031static __init void xen_pagetable_setup_done(pgd_t *base)
1032{
1033 xen_setup_shared_info();
1034}
1035
1036static __init void xen_post_allocator_init(void)
1037{
1038 pv_mmu_ops.set_pte = xen_set_pte;
1039 pv_mmu_ops.set_pmd = xen_set_pmd;
1040 pv_mmu_ops.set_pud = xen_set_pud;
1041#if PAGETABLE_LEVELS == 4
1042 pv_mmu_ops.set_pgd = xen_set_pgd;
1043#endif
1044
1045 /* This will work as long as patching hasn't happened yet
1046 (which it hasn't) */
1047 pv_mmu_ops.alloc_pte = xen_alloc_pte;
1048 pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
1049 pv_mmu_ops.release_pte = xen_release_pte;
1050 pv_mmu_ops.release_pmd = xen_release_pmd;
1051#if PAGETABLE_LEVELS == 4
1052 pv_mmu_ops.alloc_pud = xen_alloc_pud;
1053 pv_mmu_ops.release_pud = xen_release_pud;
1054#endif
1055
1056#ifdef CONFIG_X86_64
1057 SetPagePinned(virt_to_page(level3_user_vsyscall));
1058#endif
1059 xen_mark_init_mm_pinned();
1060}
1061
1062/* This is called once we have the cpu_possible_map */ 662/* This is called once we have the cpu_possible_map */
1063void xen_setup_vcpu_info_placement(void) 663void xen_setup_vcpu_info_placement(void)
1064{ 664{
@@ -1072,10 +672,10 @@ void xen_setup_vcpu_info_placement(void)
1072 if (have_vcpu_info_placement) { 672 if (have_vcpu_info_placement) {
1073 printk(KERN_INFO "Xen: using vcpu_info placement\n"); 673 printk(KERN_INFO "Xen: using vcpu_info placement\n");
1074 674
1075 pv_irq_ops.save_fl = xen_save_fl_direct; 675 pv_irq_ops.save_fl = __PV_IS_CALLEE_SAVE(xen_save_fl_direct);
1076 pv_irq_ops.restore_fl = xen_restore_fl_direct; 676 pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(xen_restore_fl_direct);
1077 pv_irq_ops.irq_disable = xen_irq_disable_direct; 677 pv_irq_ops.irq_disable = __PV_IS_CALLEE_SAVE(xen_irq_disable_direct);
1078 pv_irq_ops.irq_enable = xen_irq_enable_direct; 678 pv_irq_ops.irq_enable = __PV_IS_CALLEE_SAVE(xen_irq_enable_direct);
1079 pv_mmu_ops.read_cr2 = xen_read_cr2_direct; 679 pv_mmu_ops.read_cr2 = xen_read_cr2_direct;
1080 } 680 }
1081} 681}
@@ -1133,49 +733,6 @@ static unsigned xen_patch(u8 type, u16 clobbers, void *insnbuf,
1133 return ret; 733 return ret;
1134} 734}
1135 735
1136static void xen_set_fixmap(unsigned idx, unsigned long phys, pgprot_t prot)
1137{
1138 pte_t pte;
1139
1140 phys >>= PAGE_SHIFT;
1141
1142 switch (idx) {
1143 case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
1144#ifdef CONFIG_X86_F00F_BUG
1145 case FIX_F00F_IDT:
1146#endif
1147#ifdef CONFIG_X86_32
1148 case FIX_WP_TEST:
1149 case FIX_VDSO:
1150# ifdef CONFIG_HIGHMEM
1151 case FIX_KMAP_BEGIN ... FIX_KMAP_END:
1152# endif
1153#else
1154 case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE:
1155#endif
1156#ifdef CONFIG_X86_LOCAL_APIC
1157 case FIX_APIC_BASE: /* maps dummy local APIC */
1158#endif
1159 pte = pfn_pte(phys, prot);
1160 break;
1161
1162 default:
1163 pte = mfn_pte(phys, prot);
1164 break;
1165 }
1166
1167 __native_set_fixmap(idx, pte);
1168
1169#ifdef CONFIG_X86_64
1170 /* Replicate changes to map the vsyscall page into the user
1171 pagetable vsyscall mapping. */
1172 if (idx >= VSYSCALL_LAST_PAGE && idx <= VSYSCALL_FIRST_PAGE) {
1173 unsigned long vaddr = __fix_to_virt(idx);
1174 set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
1175 }
1176#endif
1177}
1178
1179static const struct pv_info xen_info __initdata = { 736static const struct pv_info xen_info __initdata = {
1180 .paravirt_enabled = 1, 737 .paravirt_enabled = 1,
1181 .shared_kernel_pmd = 0, 738 .shared_kernel_pmd = 0,
@@ -1271,87 +828,6 @@ static const struct pv_apic_ops xen_apic_ops __initdata = {
1271#endif 828#endif
1272}; 829};
1273 830
1274static const struct pv_mmu_ops xen_mmu_ops __initdata = {
1275 .pagetable_setup_start = xen_pagetable_setup_start,
1276 .pagetable_setup_done = xen_pagetable_setup_done,
1277
1278 .read_cr2 = xen_read_cr2,
1279 .write_cr2 = xen_write_cr2,
1280
1281 .read_cr3 = xen_read_cr3,
1282 .write_cr3 = xen_write_cr3,
1283
1284 .flush_tlb_user = xen_flush_tlb,
1285 .flush_tlb_kernel = xen_flush_tlb,
1286 .flush_tlb_single = xen_flush_tlb_single,
1287 .flush_tlb_others = xen_flush_tlb_others,
1288
1289 .pte_update = paravirt_nop,
1290 .pte_update_defer = paravirt_nop,
1291
1292 .pgd_alloc = xen_pgd_alloc,
1293 .pgd_free = xen_pgd_free,
1294
1295 .alloc_pte = xen_alloc_pte_init,
1296 .release_pte = xen_release_pte_init,
1297 .alloc_pmd = xen_alloc_pte_init,
1298 .alloc_pmd_clone = paravirt_nop,
1299 .release_pmd = xen_release_pte_init,
1300
1301#ifdef CONFIG_HIGHPTE
1302 .kmap_atomic_pte = xen_kmap_atomic_pte,
1303#endif
1304
1305#ifdef CONFIG_X86_64
1306 .set_pte = xen_set_pte,
1307#else
1308 .set_pte = xen_set_pte_init,
1309#endif
1310 .set_pte_at = xen_set_pte_at,
1311 .set_pmd = xen_set_pmd_hyper,
1312
1313 .ptep_modify_prot_start = __ptep_modify_prot_start,
1314 .ptep_modify_prot_commit = __ptep_modify_prot_commit,
1315
1316 .pte_val = xen_pte_val,
1317 .pte_flags = native_pte_flags,
1318 .pgd_val = xen_pgd_val,
1319
1320 .make_pte = xen_make_pte,
1321 .make_pgd = xen_make_pgd,
1322
1323#ifdef CONFIG_X86_PAE
1324 .set_pte_atomic = xen_set_pte_atomic,
1325 .set_pte_present = xen_set_pte_at,
1326 .pte_clear = xen_pte_clear,
1327 .pmd_clear = xen_pmd_clear,
1328#endif /* CONFIG_X86_PAE */
1329 .set_pud = xen_set_pud_hyper,
1330
1331 .make_pmd = xen_make_pmd,
1332 .pmd_val = xen_pmd_val,
1333
1334#if PAGETABLE_LEVELS == 4
1335 .pud_val = xen_pud_val,
1336 .make_pud = xen_make_pud,
1337 .set_pgd = xen_set_pgd_hyper,
1338
1339 .alloc_pud = xen_alloc_pte_init,
1340 .release_pud = xen_release_pte_init,
1341#endif /* PAGETABLE_LEVELS == 4 */
1342
1343 .activate_mm = xen_activate_mm,
1344 .dup_mmap = xen_dup_mmap,
1345 .exit_mmap = xen_exit_mmap,
1346
1347 .lazy_mode = {
1348 .enter = paravirt_enter_lazy_mmu,
1349 .leave = xen_leave_lazy,
1350 },
1351
1352 .set_fixmap = xen_set_fixmap,
1353};
1354
1355static void xen_reboot(int reason) 831static void xen_reboot(int reason)
1356{ 832{
1357 struct sched_shutdown r = { .reason = reason }; 833 struct sched_shutdown r = { .reason = reason };
@@ -1394,223 +870,6 @@ static const struct machine_ops __initdata xen_machine_ops = {
1394}; 870};
1395 871
1396 872
1397static void __init xen_reserve_top(void)
1398{
1399#ifdef CONFIG_X86_32
1400 unsigned long top = HYPERVISOR_VIRT_START;
1401 struct xen_platform_parameters pp;
1402
1403 if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
1404 top = pp.virt_start;
1405
1406 reserve_top_address(-top);
1407#endif /* CONFIG_X86_32 */
1408}
1409
1410/*
1411 * Like __va(), but returns address in the kernel mapping (which is
1412 * all we have until the physical memory mapping has been set up.
1413 */
1414static void *__ka(phys_addr_t paddr)
1415{
1416#ifdef CONFIG_X86_64
1417 return (void *)(paddr + __START_KERNEL_map);
1418#else
1419 return __va(paddr);
1420#endif
1421}
1422
1423/* Convert a machine address to physical address */
1424static unsigned long m2p(phys_addr_t maddr)
1425{
1426 phys_addr_t paddr;
1427
1428 maddr &= PTE_PFN_MASK;
1429 paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
1430
1431 return paddr;
1432}
1433
1434/* Convert a machine address to kernel virtual */
1435static void *m2v(phys_addr_t maddr)
1436{
1437 return __ka(m2p(maddr));
1438}
1439
1440static void set_page_prot(void *addr, pgprot_t prot)
1441{
1442 unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
1443 pte_t pte = pfn_pte(pfn, prot);
1444
1445 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0))
1446 BUG();
1447}
1448
1449static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
1450{
1451 unsigned pmdidx, pteidx;
1452 unsigned ident_pte;
1453 unsigned long pfn;
1454
1455 ident_pte = 0;
1456 pfn = 0;
1457 for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
1458 pte_t *pte_page;
1459
1460 /* Reuse or allocate a page of ptes */
1461 if (pmd_present(pmd[pmdidx]))
1462 pte_page = m2v(pmd[pmdidx].pmd);
1463 else {
1464 /* Check for free pte pages */
1465 if (ident_pte == ARRAY_SIZE(level1_ident_pgt))
1466 break;
1467
1468 pte_page = &level1_ident_pgt[ident_pte];
1469 ident_pte += PTRS_PER_PTE;
1470
1471 pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE);
1472 }
1473
1474 /* Install mappings */
1475 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
1476 pte_t pte;
1477
1478 if (pfn > max_pfn_mapped)
1479 max_pfn_mapped = pfn;
1480
1481 if (!pte_none(pte_page[pteidx]))
1482 continue;
1483
1484 pte = pfn_pte(pfn, PAGE_KERNEL_EXEC);
1485 pte_page[pteidx] = pte;
1486 }
1487 }
1488
1489 for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
1490 set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO);
1491
1492 set_page_prot(pmd, PAGE_KERNEL_RO);
1493}
1494
1495#ifdef CONFIG_X86_64
1496static void convert_pfn_mfn(void *v)
1497{
1498 pte_t *pte = v;
1499 int i;
1500
1501 /* All levels are converted the same way, so just treat them
1502 as ptes. */
1503 for (i = 0; i < PTRS_PER_PTE; i++)
1504 pte[i] = xen_make_pte(pte[i].pte);
1505}
1506
1507/*
1508 * Set up the inital kernel pagetable.
1509 *
1510 * We can construct this by grafting the Xen provided pagetable into
1511 * head_64.S's preconstructed pagetables. We copy the Xen L2's into
1512 * level2_ident_pgt, level2_kernel_pgt and level2_fixmap_pgt. This
1513 * means that only the kernel has a physical mapping to start with -
1514 * but that's enough to get __va working. We need to fill in the rest
1515 * of the physical mapping once some sort of allocator has been set
1516 * up.
1517 */
1518static __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
1519 unsigned long max_pfn)
1520{
1521 pud_t *l3;
1522 pmd_t *l2;
1523
1524 /* Zap identity mapping */
1525 init_level4_pgt[0] = __pgd(0);
1526
1527 /* Pre-constructed entries are in pfn, so convert to mfn */
1528 convert_pfn_mfn(init_level4_pgt);
1529 convert_pfn_mfn(level3_ident_pgt);
1530 convert_pfn_mfn(level3_kernel_pgt);
1531
1532 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
1533 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
1534
1535 memcpy(level2_ident_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1536 memcpy(level2_kernel_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1537
1538 l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd);
1539 l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud);
1540 memcpy(level2_fixmap_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1541
1542 /* Set up identity map */
1543 xen_map_identity_early(level2_ident_pgt, max_pfn);
1544
1545 /* Make pagetable pieces RO */
1546 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
1547 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
1548 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
1549 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
1550 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1551 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
1552
1553 /* Pin down new L4 */
1554 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
1555 PFN_DOWN(__pa_symbol(init_level4_pgt)));
1556
1557 /* Unpin Xen-provided one */
1558 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1559
1560 /* Switch over */
1561 pgd = init_level4_pgt;
1562
1563 /*
1564 * At this stage there can be no user pgd, and no page
1565 * structure to attach it to, so make sure we just set kernel
1566 * pgd.
1567 */
1568 xen_mc_batch();
1569 __xen_write_cr3(true, __pa(pgd));
1570 xen_mc_issue(PARAVIRT_LAZY_CPU);
1571
1572 reserve_early(__pa(xen_start_info->pt_base),
1573 __pa(xen_start_info->pt_base +
1574 xen_start_info->nr_pt_frames * PAGE_SIZE),
1575 "XEN PAGETABLES");
1576
1577 return pgd;
1578}
1579#else /* !CONFIG_X86_64 */
1580static pmd_t level2_kernel_pgt[PTRS_PER_PMD] __page_aligned_bss;
1581
1582static __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
1583 unsigned long max_pfn)
1584{
1585 pmd_t *kernel_pmd;
1586
1587 init_pg_tables_start = __pa(pgd);
1588 init_pg_tables_end = __pa(pgd) + xen_start_info->nr_pt_frames*PAGE_SIZE;
1589 max_pfn_mapped = PFN_DOWN(init_pg_tables_end + 512*1024);
1590
1591 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
1592 memcpy(level2_kernel_pgt, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD);
1593
1594 xen_map_identity_early(level2_kernel_pgt, max_pfn);
1595
1596 memcpy(swapper_pg_dir, pgd, sizeof(pgd_t) * PTRS_PER_PGD);
1597 set_pgd(&swapper_pg_dir[KERNEL_PGD_BOUNDARY],
1598 __pgd(__pa(level2_kernel_pgt) | _PAGE_PRESENT));
1599
1600 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1601 set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
1602 set_page_prot(empty_zero_page, PAGE_KERNEL_RO);
1603
1604 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1605
1606 xen_write_cr3(__pa(swapper_pg_dir));
1607
1608 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(swapper_pg_dir)));
1609
1610 return swapper_pg_dir;
1611}
1612#endif /* CONFIG_X86_64 */
1613
1614/* First C function to be called on Xen boot */ 873/* First C function to be called on Xen boot */
1615asmlinkage void __init xen_start_kernel(void) 874asmlinkage void __init xen_start_kernel(void)
1616{ 875{
@@ -1650,10 +909,18 @@ asmlinkage void __init xen_start_kernel(void)
1650 machine_ops = xen_machine_ops; 909 machine_ops = xen_machine_ops;
1651 910
1652#ifdef CONFIG_X86_64 911#ifdef CONFIG_X86_64
1653 /* Disable until direct per-cpu data access. */ 912 /*
1654 have_vcpu_info_placement = 0; 913 * Setup percpu state. We only need to do this for 64-bit
1655 x86_64_init_pda(); 914 * because 32-bit already has %fs set properly.
915 */
916 load_percpu_segment(0);
1656#endif 917#endif
918 /*
919 * The only reliable way to retain the initial address of the
920 * percpu gdt_page is to remember it here, so we can go and
921 * mark it RW later, when the initial percpu area is freed.
922 */
923 xen_initial_gdt = &per_cpu(gdt_page, 0);
1657 924
1658 xen_smp_init(); 925 xen_smp_init();
1659 926
diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c
index bb042608c602..cfd17799bd6d 100644
--- a/arch/x86/xen/irq.c
+++ b/arch/x86/xen/irq.c
@@ -19,27 +19,12 @@ void xen_force_evtchn_callback(void)
19 (void)HYPERVISOR_xen_version(0, NULL); 19 (void)HYPERVISOR_xen_version(0, NULL);
20} 20}
21 21
22static void __init __xen_init_IRQ(void)
23{
24 int i;
25
26 /* Create identity vector->irq map */
27 for(i = 0; i < NR_VECTORS; i++) {
28 int cpu;
29
30 for_each_possible_cpu(cpu)
31 per_cpu(vector_irq, cpu)[i] = i;
32 }
33
34 xen_init_IRQ();
35}
36
37static unsigned long xen_save_fl(void) 22static unsigned long xen_save_fl(void)
38{ 23{
39 struct vcpu_info *vcpu; 24 struct vcpu_info *vcpu;
40 unsigned long flags; 25 unsigned long flags;
41 26
42 vcpu = x86_read_percpu(xen_vcpu); 27 vcpu = percpu_read(xen_vcpu);
43 28
44 /* flag has opposite sense of mask */ 29 /* flag has opposite sense of mask */
45 flags = !vcpu->evtchn_upcall_mask; 30 flags = !vcpu->evtchn_upcall_mask;
@@ -50,6 +35,7 @@ static unsigned long xen_save_fl(void)
50 */ 35 */
51 return (-flags) & X86_EFLAGS_IF; 36 return (-flags) & X86_EFLAGS_IF;
52} 37}
38PV_CALLEE_SAVE_REGS_THUNK(xen_save_fl);
53 39
54static void xen_restore_fl(unsigned long flags) 40static void xen_restore_fl(unsigned long flags)
55{ 41{
@@ -62,7 +48,7 @@ static void xen_restore_fl(unsigned long flags)
62 make sure we're don't switch CPUs between getting the vcpu 48 make sure we're don't switch CPUs between getting the vcpu
63 pointer and updating the mask. */ 49 pointer and updating the mask. */
64 preempt_disable(); 50 preempt_disable();
65 vcpu = x86_read_percpu(xen_vcpu); 51 vcpu = percpu_read(xen_vcpu);
66 vcpu->evtchn_upcall_mask = flags; 52 vcpu->evtchn_upcall_mask = flags;
67 preempt_enable_no_resched(); 53 preempt_enable_no_resched();
68 54
@@ -76,6 +62,7 @@ static void xen_restore_fl(unsigned long flags)
76 xen_force_evtchn_callback(); 62 xen_force_evtchn_callback();
77 } 63 }
78} 64}
65PV_CALLEE_SAVE_REGS_THUNK(xen_restore_fl);
79 66
80static void xen_irq_disable(void) 67static void xen_irq_disable(void)
81{ 68{
@@ -83,9 +70,10 @@ static void xen_irq_disable(void)
83 make sure we're don't switch CPUs between getting the vcpu 70 make sure we're don't switch CPUs between getting the vcpu
84 pointer and updating the mask. */ 71 pointer and updating the mask. */
85 preempt_disable(); 72 preempt_disable();
86 x86_read_percpu(xen_vcpu)->evtchn_upcall_mask = 1; 73 percpu_read(xen_vcpu)->evtchn_upcall_mask = 1;
87 preempt_enable_no_resched(); 74 preempt_enable_no_resched();
88} 75}
76PV_CALLEE_SAVE_REGS_THUNK(xen_irq_disable);
89 77
90static void xen_irq_enable(void) 78static void xen_irq_enable(void)
91{ 79{
@@ -96,7 +84,7 @@ static void xen_irq_enable(void)
96 the caller is confused and is trying to re-enable interrupts 84 the caller is confused and is trying to re-enable interrupts
97 on an indeterminate processor. */ 85 on an indeterminate processor. */
98 86
99 vcpu = x86_read_percpu(xen_vcpu); 87 vcpu = percpu_read(xen_vcpu);
100 vcpu->evtchn_upcall_mask = 0; 88 vcpu->evtchn_upcall_mask = 0;
101 89
102 /* Doesn't matter if we get preempted here, because any 90 /* Doesn't matter if we get preempted here, because any
@@ -106,6 +94,7 @@ static void xen_irq_enable(void)
106 if (unlikely(vcpu->evtchn_upcall_pending)) 94 if (unlikely(vcpu->evtchn_upcall_pending))
107 xen_force_evtchn_callback(); 95 xen_force_evtchn_callback();
108} 96}
97PV_CALLEE_SAVE_REGS_THUNK(xen_irq_enable);
109 98
110static void xen_safe_halt(void) 99static void xen_safe_halt(void)
111{ 100{
@@ -123,11 +112,13 @@ static void xen_halt(void)
123} 112}
124 113
125static const struct pv_irq_ops xen_irq_ops __initdata = { 114static const struct pv_irq_ops xen_irq_ops __initdata = {
126 .init_IRQ = __xen_init_IRQ, 115 .init_IRQ = xen_init_IRQ,
127 .save_fl = xen_save_fl, 116
128 .restore_fl = xen_restore_fl, 117 .save_fl = PV_CALLEE_SAVE(xen_save_fl),
129 .irq_disable = xen_irq_disable, 118 .restore_fl = PV_CALLEE_SAVE(xen_restore_fl),
130 .irq_enable = xen_irq_enable, 119 .irq_disable = PV_CALLEE_SAVE(xen_irq_disable),
120 .irq_enable = PV_CALLEE_SAVE(xen_irq_enable),
121
131 .safe_halt = xen_safe_halt, 122 .safe_halt = xen_safe_halt,
132 .halt = xen_halt, 123 .halt = xen_halt,
133#ifdef CONFIG_X86_64 124#ifdef CONFIG_X86_64
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 503c240e26c7..d2e8ed1aff3d 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -47,6 +47,7 @@
47#include <asm/tlbflush.h> 47#include <asm/tlbflush.h>
48#include <asm/fixmap.h> 48#include <asm/fixmap.h>
49#include <asm/mmu_context.h> 49#include <asm/mmu_context.h>
50#include <asm/setup.h>
50#include <asm/paravirt.h> 51#include <asm/paravirt.h>
51#include <asm/linkage.h> 52#include <asm/linkage.h>
52 53
@@ -55,6 +56,8 @@
55 56
56#include <xen/page.h> 57#include <xen/page.h>
57#include <xen/interface/xen.h> 58#include <xen/interface/xen.h>
59#include <xen/interface/version.h>
60#include <xen/hvc-console.h>
58 61
59#include "multicalls.h" 62#include "multicalls.h"
60#include "mmu.h" 63#include "mmu.h"
@@ -114,6 +117,37 @@ static inline void check_zero(void)
114 117
115#endif /* CONFIG_XEN_DEBUG_FS */ 118#endif /* CONFIG_XEN_DEBUG_FS */
116 119
120
121/*
122 * Identity map, in addition to plain kernel map. This needs to be
123 * large enough to allocate page table pages to allocate the rest.
124 * Each page can map 2MB.
125 */
126static pte_t level1_ident_pgt[PTRS_PER_PTE * 4] __page_aligned_bss;
127
128#ifdef CONFIG_X86_64
129/* l3 pud for userspace vsyscall mapping */
130static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
131#endif /* CONFIG_X86_64 */
132
133/*
134 * Note about cr3 (pagetable base) values:
135 *
136 * xen_cr3 contains the current logical cr3 value; it contains the
137 * last set cr3. This may not be the current effective cr3, because
138 * its update may be being lazily deferred. However, a vcpu looking
139 * at its own cr3 can use this value knowing that it everything will
140 * be self-consistent.
141 *
142 * xen_current_cr3 contains the actual vcpu cr3; it is set once the
143 * hypercall to set the vcpu cr3 is complete (so it may be a little
144 * out of date, but it will never be set early). If one vcpu is
145 * looking at another vcpu's cr3 value, it should use this variable.
146 */
147DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */
148DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */
149
150
117/* 151/*
118 * Just beyond the highest usermode address. STACK_TOP_MAX has a 152 * Just beyond the highest usermode address. STACK_TOP_MAX has a
119 * redzone above it, so round it up to a PGD boundary. 153 * redzone above it, so round it up to a PGD boundary.
@@ -458,28 +492,33 @@ pteval_t xen_pte_val(pte_t pte)
458{ 492{
459 return pte_mfn_to_pfn(pte.pte); 493 return pte_mfn_to_pfn(pte.pte);
460} 494}
495PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
461 496
462pgdval_t xen_pgd_val(pgd_t pgd) 497pgdval_t xen_pgd_val(pgd_t pgd)
463{ 498{
464 return pte_mfn_to_pfn(pgd.pgd); 499 return pte_mfn_to_pfn(pgd.pgd);
465} 500}
501PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
466 502
467pte_t xen_make_pte(pteval_t pte) 503pte_t xen_make_pte(pteval_t pte)
468{ 504{
469 pte = pte_pfn_to_mfn(pte); 505 pte = pte_pfn_to_mfn(pte);
470 return native_make_pte(pte); 506 return native_make_pte(pte);
471} 507}
508PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
472 509
473pgd_t xen_make_pgd(pgdval_t pgd) 510pgd_t xen_make_pgd(pgdval_t pgd)
474{ 511{
475 pgd = pte_pfn_to_mfn(pgd); 512 pgd = pte_pfn_to_mfn(pgd);
476 return native_make_pgd(pgd); 513 return native_make_pgd(pgd);
477} 514}
515PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
478 516
479pmdval_t xen_pmd_val(pmd_t pmd) 517pmdval_t xen_pmd_val(pmd_t pmd)
480{ 518{
481 return pte_mfn_to_pfn(pmd.pmd); 519 return pte_mfn_to_pfn(pmd.pmd);
482} 520}
521PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val);
483 522
484void xen_set_pud_hyper(pud_t *ptr, pud_t val) 523void xen_set_pud_hyper(pud_t *ptr, pud_t val)
485{ 524{
@@ -556,12 +595,14 @@ pmd_t xen_make_pmd(pmdval_t pmd)
556 pmd = pte_pfn_to_mfn(pmd); 595 pmd = pte_pfn_to_mfn(pmd);
557 return native_make_pmd(pmd); 596 return native_make_pmd(pmd);
558} 597}
598PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
559 599
560#if PAGETABLE_LEVELS == 4 600#if PAGETABLE_LEVELS == 4
561pudval_t xen_pud_val(pud_t pud) 601pudval_t xen_pud_val(pud_t pud)
562{ 602{
563 return pte_mfn_to_pfn(pud.pud); 603 return pte_mfn_to_pfn(pud.pud);
564} 604}
605PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
565 606
566pud_t xen_make_pud(pudval_t pud) 607pud_t xen_make_pud(pudval_t pud)
567{ 608{
@@ -569,6 +610,7 @@ pud_t xen_make_pud(pudval_t pud)
569 610
570 return native_make_pud(pud); 611 return native_make_pud(pud);
571} 612}
613PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud);
572 614
573pgd_t *xen_get_user_pgd(pgd_t *pgd) 615pgd_t *xen_get_user_pgd(pgd_t *pgd)
574{ 616{
@@ -1063,18 +1105,14 @@ static void drop_other_mm_ref(void *info)
1063 struct mm_struct *mm = info; 1105 struct mm_struct *mm = info;
1064 struct mm_struct *active_mm; 1106 struct mm_struct *active_mm;
1065 1107
1066#ifdef CONFIG_X86_64 1108 active_mm = percpu_read(cpu_tlbstate.active_mm);
1067 active_mm = read_pda(active_mm);
1068#else
1069 active_mm = __get_cpu_var(cpu_tlbstate).active_mm;
1070#endif
1071 1109
1072 if (active_mm == mm) 1110 if (active_mm == mm)
1073 leave_mm(smp_processor_id()); 1111 leave_mm(smp_processor_id());
1074 1112
1075 /* If this cpu still has a stale cr3 reference, then make sure 1113 /* If this cpu still has a stale cr3 reference, then make sure
1076 it has been flushed. */ 1114 it has been flushed. */
1077 if (x86_read_percpu(xen_current_cr3) == __pa(mm->pgd)) { 1115 if (percpu_read(xen_current_cr3) == __pa(mm->pgd)) {
1078 load_cr3(swapper_pg_dir); 1116 load_cr3(swapper_pg_dir);
1079 arch_flush_lazy_cpu_mode(); 1117 arch_flush_lazy_cpu_mode();
1080 } 1118 }
@@ -1156,6 +1194,709 @@ void xen_exit_mmap(struct mm_struct *mm)
1156 spin_unlock(&mm->page_table_lock); 1194 spin_unlock(&mm->page_table_lock);
1157} 1195}
1158 1196
1197static __init void xen_pagetable_setup_start(pgd_t *base)
1198{
1199}
1200
1201static __init void xen_pagetable_setup_done(pgd_t *base)
1202{
1203 xen_setup_shared_info();
1204}
1205
1206static void xen_write_cr2(unsigned long cr2)
1207{
1208 percpu_read(xen_vcpu)->arch.cr2 = cr2;
1209}
1210
1211static unsigned long xen_read_cr2(void)
1212{
1213 return percpu_read(xen_vcpu)->arch.cr2;
1214}
1215
1216unsigned long xen_read_cr2_direct(void)
1217{
1218 return percpu_read(xen_vcpu_info.arch.cr2);
1219}
1220
1221static void xen_flush_tlb(void)
1222{
1223 struct mmuext_op *op;
1224 struct multicall_space mcs;
1225
1226 preempt_disable();
1227
1228 mcs = xen_mc_entry(sizeof(*op));
1229
1230 op = mcs.args;
1231 op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
1232 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1233
1234 xen_mc_issue(PARAVIRT_LAZY_MMU);
1235
1236 preempt_enable();
1237}
1238
1239static void xen_flush_tlb_single(unsigned long addr)
1240{
1241 struct mmuext_op *op;
1242 struct multicall_space mcs;
1243
1244 preempt_disable();
1245
1246 mcs = xen_mc_entry(sizeof(*op));
1247 op = mcs.args;
1248 op->cmd = MMUEXT_INVLPG_LOCAL;
1249 op->arg1.linear_addr = addr & PAGE_MASK;
1250 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1251
1252 xen_mc_issue(PARAVIRT_LAZY_MMU);
1253
1254 preempt_enable();
1255}
1256
1257static void xen_flush_tlb_others(const struct cpumask *cpus,
1258 struct mm_struct *mm, unsigned long va)
1259{
1260 struct {
1261 struct mmuext_op op;
1262 DECLARE_BITMAP(mask, NR_CPUS);
1263 } *args;
1264 struct multicall_space mcs;
1265
1266 BUG_ON(cpumask_empty(cpus));
1267 BUG_ON(!mm);
1268
1269 mcs = xen_mc_entry(sizeof(*args));
1270 args = mcs.args;
1271 args->op.arg2.vcpumask = to_cpumask(args->mask);
1272
1273 /* Remove us, and any offline CPUS. */
1274 cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
1275 cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
1276 if (unlikely(cpumask_empty(to_cpumask(args->mask))))
1277 goto issue;
1278
1279 if (va == TLB_FLUSH_ALL) {
1280 args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
1281 } else {
1282 args->op.cmd = MMUEXT_INVLPG_MULTI;
1283 args->op.arg1.linear_addr = va;
1284 }
1285
1286 MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
1287
1288issue:
1289 xen_mc_issue(PARAVIRT_LAZY_MMU);
1290}
1291
1292static unsigned long xen_read_cr3(void)
1293{
1294 return percpu_read(xen_cr3);
1295}
1296
1297static void set_current_cr3(void *v)
1298{
1299 percpu_write(xen_current_cr3, (unsigned long)v);
1300}
1301
1302static void __xen_write_cr3(bool kernel, unsigned long cr3)
1303{
1304 struct mmuext_op *op;
1305 struct multicall_space mcs;
1306 unsigned long mfn;
1307
1308 if (cr3)
1309 mfn = pfn_to_mfn(PFN_DOWN(cr3));
1310 else
1311 mfn = 0;
1312
1313 WARN_ON(mfn == 0 && kernel);
1314
1315 mcs = __xen_mc_entry(sizeof(*op));
1316
1317 op = mcs.args;
1318 op->cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
1319 op->arg1.mfn = mfn;
1320
1321 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1322
1323 if (kernel) {
1324 percpu_write(xen_cr3, cr3);
1325
1326 /* Update xen_current_cr3 once the batch has actually
1327 been submitted. */
1328 xen_mc_callback(set_current_cr3, (void *)cr3);
1329 }
1330}
1331
1332static void xen_write_cr3(unsigned long cr3)
1333{
1334 BUG_ON(preemptible());
1335
1336 xen_mc_batch(); /* disables interrupts */
1337
1338 /* Update while interrupts are disabled, so its atomic with
1339 respect to ipis */
1340 percpu_write(xen_cr3, cr3);
1341
1342 __xen_write_cr3(true, cr3);
1343
1344#ifdef CONFIG_X86_64
1345 {
1346 pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
1347 if (user_pgd)
1348 __xen_write_cr3(false, __pa(user_pgd));
1349 else
1350 __xen_write_cr3(false, 0);
1351 }
1352#endif
1353
1354 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
1355}
1356
1357static int xen_pgd_alloc(struct mm_struct *mm)
1358{
1359 pgd_t *pgd = mm->pgd;
1360 int ret = 0;
1361
1362 BUG_ON(PagePinned(virt_to_page(pgd)));
1363
1364#ifdef CONFIG_X86_64
1365 {
1366 struct page *page = virt_to_page(pgd);
1367 pgd_t *user_pgd;
1368
1369 BUG_ON(page->private != 0);
1370
1371 ret = -ENOMEM;
1372
1373 user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
1374 page->private = (unsigned long)user_pgd;
1375
1376 if (user_pgd != NULL) {
1377 user_pgd[pgd_index(VSYSCALL_START)] =
1378 __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
1379 ret = 0;
1380 }
1381
1382 BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
1383 }
1384#endif
1385
1386 return ret;
1387}
1388
1389static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1390{
1391#ifdef CONFIG_X86_64
1392 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1393
1394 if (user_pgd)
1395 free_page((unsigned long)user_pgd);
1396#endif
1397}
1398
1399#ifdef CONFIG_HIGHPTE
1400static void *xen_kmap_atomic_pte(struct page *page, enum km_type type)
1401{
1402 pgprot_t prot = PAGE_KERNEL;
1403
1404 if (PagePinned(page))
1405 prot = PAGE_KERNEL_RO;
1406
1407 if (0 && PageHighMem(page))
1408 printk("mapping highpte %lx type %d prot %s\n",
1409 page_to_pfn(page), type,
1410 (unsigned long)pgprot_val(prot) & _PAGE_RW ? "WRITE" : "READ");
1411
1412 return kmap_atomic_prot(page, type, prot);
1413}
1414#endif
1415
1416#ifdef CONFIG_X86_32
1417static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
1418{
1419 /* If there's an existing pte, then don't allow _PAGE_RW to be set */
1420 if (pte_val_ma(*ptep) & _PAGE_PRESENT)
1421 pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
1422 pte_val_ma(pte));
1423
1424 return pte;
1425}
1426
1427/* Init-time set_pte while constructing initial pagetables, which
1428 doesn't allow RO pagetable pages to be remapped RW */
1429static __init void xen_set_pte_init(pte_t *ptep, pte_t pte)
1430{
1431 pte = mask_rw_pte(ptep, pte);
1432
1433 xen_set_pte(ptep, pte);
1434}
1435#endif
1436
1437/* Early in boot, while setting up the initial pagetable, assume
1438 everything is pinned. */
1439static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
1440{
1441#ifdef CONFIG_FLATMEM
1442 BUG_ON(mem_map); /* should only be used early */
1443#endif
1444 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
1445}
1446
1447/* Early release_pte assumes that all pts are pinned, since there's
1448 only init_mm and anything attached to that is pinned. */
1449static void xen_release_pte_init(unsigned long pfn)
1450{
1451 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1452}
1453
1454static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1455{
1456 struct mmuext_op op;
1457 op.cmd = cmd;
1458 op.arg1.mfn = pfn_to_mfn(pfn);
1459 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
1460 BUG();
1461}
1462
1463/* This needs to make sure the new pte page is pinned iff its being
1464 attached to a pinned pagetable. */
1465static void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned level)
1466{
1467 struct page *page = pfn_to_page(pfn);
1468
1469 if (PagePinned(virt_to_page(mm->pgd))) {
1470 SetPagePinned(page);
1471
1472 vm_unmap_aliases();
1473 if (!PageHighMem(page)) {
1474 make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn)));
1475 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
1476 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1477 } else {
1478 /* make sure there are no stray mappings of
1479 this page */
1480 kmap_flush_unused();
1481 }
1482 }
1483}
1484
1485static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
1486{
1487 xen_alloc_ptpage(mm, pfn, PT_PTE);
1488}
1489
1490static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
1491{
1492 xen_alloc_ptpage(mm, pfn, PT_PMD);
1493}
1494
1495/* This should never happen until we're OK to use struct page */
1496static void xen_release_ptpage(unsigned long pfn, unsigned level)
1497{
1498 struct page *page = pfn_to_page(pfn);
1499
1500 if (PagePinned(page)) {
1501 if (!PageHighMem(page)) {
1502 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
1503 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1504 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1505 }
1506 ClearPagePinned(page);
1507 }
1508}
1509
1510static void xen_release_pte(unsigned long pfn)
1511{
1512 xen_release_ptpage(pfn, PT_PTE);
1513}
1514
1515static void xen_release_pmd(unsigned long pfn)
1516{
1517 xen_release_ptpage(pfn, PT_PMD);
1518}
1519
1520#if PAGETABLE_LEVELS == 4
1521static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
1522{
1523 xen_alloc_ptpage(mm, pfn, PT_PUD);
1524}
1525
1526static void xen_release_pud(unsigned long pfn)
1527{
1528 xen_release_ptpage(pfn, PT_PUD);
1529}
1530#endif
1531
1532void __init xen_reserve_top(void)
1533{
1534#ifdef CONFIG_X86_32
1535 unsigned long top = HYPERVISOR_VIRT_START;
1536 struct xen_platform_parameters pp;
1537
1538 if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
1539 top = pp.virt_start;
1540
1541 reserve_top_address(-top);
1542#endif /* CONFIG_X86_32 */
1543}
1544
1545/*
1546 * Like __va(), but returns address in the kernel mapping (which is
1547 * all we have until the physical memory mapping has been set up.
1548 */
1549static void *__ka(phys_addr_t paddr)
1550{
1551#ifdef CONFIG_X86_64
1552 return (void *)(paddr + __START_KERNEL_map);
1553#else
1554 return __va(paddr);
1555#endif
1556}
1557
1558/* Convert a machine address to physical address */
1559static unsigned long m2p(phys_addr_t maddr)
1560{
1561 phys_addr_t paddr;
1562
1563 maddr &= PTE_PFN_MASK;
1564 paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
1565
1566 return paddr;
1567}
1568
1569/* Convert a machine address to kernel virtual */
1570static void *m2v(phys_addr_t maddr)
1571{
1572 return __ka(m2p(maddr));
1573}
1574
1575static void set_page_prot(void *addr, pgprot_t prot)
1576{
1577 unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
1578 pte_t pte = pfn_pte(pfn, prot);
1579
1580 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0))
1581 BUG();
1582}
1583
1584static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
1585{
1586 unsigned pmdidx, pteidx;
1587 unsigned ident_pte;
1588 unsigned long pfn;
1589
1590 ident_pte = 0;
1591 pfn = 0;
1592 for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
1593 pte_t *pte_page;
1594
1595 /* Reuse or allocate a page of ptes */
1596 if (pmd_present(pmd[pmdidx]))
1597 pte_page = m2v(pmd[pmdidx].pmd);
1598 else {
1599 /* Check for free pte pages */
1600 if (ident_pte == ARRAY_SIZE(level1_ident_pgt))
1601 break;
1602
1603 pte_page = &level1_ident_pgt[ident_pte];
1604 ident_pte += PTRS_PER_PTE;
1605
1606 pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE);
1607 }
1608
1609 /* Install mappings */
1610 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
1611 pte_t pte;
1612
1613 if (pfn > max_pfn_mapped)
1614 max_pfn_mapped = pfn;
1615
1616 if (!pte_none(pte_page[pteidx]))
1617 continue;
1618
1619 pte = pfn_pte(pfn, PAGE_KERNEL_EXEC);
1620 pte_page[pteidx] = pte;
1621 }
1622 }
1623
1624 for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
1625 set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO);
1626
1627 set_page_prot(pmd, PAGE_KERNEL_RO);
1628}
1629
1630#ifdef CONFIG_X86_64
1631static void convert_pfn_mfn(void *v)
1632{
1633 pte_t *pte = v;
1634 int i;
1635
1636 /* All levels are converted the same way, so just treat them
1637 as ptes. */
1638 for (i = 0; i < PTRS_PER_PTE; i++)
1639 pte[i] = xen_make_pte(pte[i].pte);
1640}
1641
1642/*
1643 * Set up the inital kernel pagetable.
1644 *
1645 * We can construct this by grafting the Xen provided pagetable into
1646 * head_64.S's preconstructed pagetables. We copy the Xen L2's into
1647 * level2_ident_pgt, level2_kernel_pgt and level2_fixmap_pgt. This
1648 * means that only the kernel has a physical mapping to start with -
1649 * but that's enough to get __va working. We need to fill in the rest
1650 * of the physical mapping once some sort of allocator has been set
1651 * up.
1652 */
1653__init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
1654 unsigned long max_pfn)
1655{
1656 pud_t *l3;
1657 pmd_t *l2;
1658
1659 /* Zap identity mapping */
1660 init_level4_pgt[0] = __pgd(0);
1661
1662 /* Pre-constructed entries are in pfn, so convert to mfn */
1663 convert_pfn_mfn(init_level4_pgt);
1664 convert_pfn_mfn(level3_ident_pgt);
1665 convert_pfn_mfn(level3_kernel_pgt);
1666
1667 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
1668 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
1669
1670 memcpy(level2_ident_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1671 memcpy(level2_kernel_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1672
1673 l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd);
1674 l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud);
1675 memcpy(level2_fixmap_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1676
1677 /* Set up identity map */
1678 xen_map_identity_early(level2_ident_pgt, max_pfn);
1679
1680 /* Make pagetable pieces RO */
1681 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
1682 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
1683 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
1684 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
1685 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1686 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
1687
1688 /* Pin down new L4 */
1689 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
1690 PFN_DOWN(__pa_symbol(init_level4_pgt)));
1691
1692 /* Unpin Xen-provided one */
1693 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1694
1695 /* Switch over */
1696 pgd = init_level4_pgt;
1697
1698 /*
1699 * At this stage there can be no user pgd, and no page
1700 * structure to attach it to, so make sure we just set kernel
1701 * pgd.
1702 */
1703 xen_mc_batch();
1704 __xen_write_cr3(true, __pa(pgd));
1705 xen_mc_issue(PARAVIRT_LAZY_CPU);
1706
1707 reserve_early(__pa(xen_start_info->pt_base),
1708 __pa(xen_start_info->pt_base +
1709 xen_start_info->nr_pt_frames * PAGE_SIZE),
1710 "XEN PAGETABLES");
1711
1712 return pgd;
1713}
1714#else /* !CONFIG_X86_64 */
1715static pmd_t level2_kernel_pgt[PTRS_PER_PMD] __page_aligned_bss;
1716
1717__init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
1718 unsigned long max_pfn)
1719{
1720 pmd_t *kernel_pmd;
1721
1722 init_pg_tables_start = __pa(pgd);
1723 init_pg_tables_end = __pa(pgd) + xen_start_info->nr_pt_frames*PAGE_SIZE;
1724 max_pfn_mapped = PFN_DOWN(init_pg_tables_end + 512*1024);
1725
1726 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
1727 memcpy(level2_kernel_pgt, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD);
1728
1729 xen_map_identity_early(level2_kernel_pgt, max_pfn);
1730
1731 memcpy(swapper_pg_dir, pgd, sizeof(pgd_t) * PTRS_PER_PGD);
1732 set_pgd(&swapper_pg_dir[KERNEL_PGD_BOUNDARY],
1733 __pgd(__pa(level2_kernel_pgt) | _PAGE_PRESENT));
1734
1735 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1736 set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
1737 set_page_prot(empty_zero_page, PAGE_KERNEL_RO);
1738
1739 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1740
1741 xen_write_cr3(__pa(swapper_pg_dir));
1742
1743 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(swapper_pg_dir)));
1744
1745 return swapper_pg_dir;
1746}
1747#endif /* CONFIG_X86_64 */
1748
1749static void xen_set_fixmap(unsigned idx, unsigned long phys, pgprot_t prot)
1750{
1751 pte_t pte;
1752
1753 phys >>= PAGE_SHIFT;
1754
1755 switch (idx) {
1756 case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
1757#ifdef CONFIG_X86_F00F_BUG
1758 case FIX_F00F_IDT:
1759#endif
1760#ifdef CONFIG_X86_32
1761 case FIX_WP_TEST:
1762 case FIX_VDSO:
1763# ifdef CONFIG_HIGHMEM
1764 case FIX_KMAP_BEGIN ... FIX_KMAP_END:
1765# endif
1766#else
1767 case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE:
1768#endif
1769#ifdef CONFIG_X86_LOCAL_APIC
1770 case FIX_APIC_BASE: /* maps dummy local APIC */
1771#endif
1772 pte = pfn_pte(phys, prot);
1773 break;
1774
1775 default:
1776 pte = mfn_pte(phys, prot);
1777 break;
1778 }
1779
1780 __native_set_fixmap(idx, pte);
1781
1782#ifdef CONFIG_X86_64
1783 /* Replicate changes to map the vsyscall page into the user
1784 pagetable vsyscall mapping. */
1785 if (idx >= VSYSCALL_LAST_PAGE && idx <= VSYSCALL_FIRST_PAGE) {
1786 unsigned long vaddr = __fix_to_virt(idx);
1787 set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
1788 }
1789#endif
1790}
1791
1792__init void xen_post_allocator_init(void)
1793{
1794 pv_mmu_ops.set_pte = xen_set_pte;
1795 pv_mmu_ops.set_pmd = xen_set_pmd;
1796 pv_mmu_ops.set_pud = xen_set_pud;
1797#if PAGETABLE_LEVELS == 4
1798 pv_mmu_ops.set_pgd = xen_set_pgd;
1799#endif
1800
1801 /* This will work as long as patching hasn't happened yet
1802 (which it hasn't) */
1803 pv_mmu_ops.alloc_pte = xen_alloc_pte;
1804 pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
1805 pv_mmu_ops.release_pte = xen_release_pte;
1806 pv_mmu_ops.release_pmd = xen_release_pmd;
1807#if PAGETABLE_LEVELS == 4
1808 pv_mmu_ops.alloc_pud = xen_alloc_pud;
1809 pv_mmu_ops.release_pud = xen_release_pud;
1810#endif
1811
1812#ifdef CONFIG_X86_64
1813 SetPagePinned(virt_to_page(level3_user_vsyscall));
1814#endif
1815 xen_mark_init_mm_pinned();
1816}
1817
1818
1819const struct pv_mmu_ops xen_mmu_ops __initdata = {
1820 .pagetable_setup_start = xen_pagetable_setup_start,
1821 .pagetable_setup_done = xen_pagetable_setup_done,
1822
1823 .read_cr2 = xen_read_cr2,
1824 .write_cr2 = xen_write_cr2,
1825
1826 .read_cr3 = xen_read_cr3,
1827 .write_cr3 = xen_write_cr3,
1828
1829 .flush_tlb_user = xen_flush_tlb,
1830 .flush_tlb_kernel = xen_flush_tlb,
1831 .flush_tlb_single = xen_flush_tlb_single,
1832 .flush_tlb_others = xen_flush_tlb_others,
1833
1834 .pte_update = paravirt_nop,
1835 .pte_update_defer = paravirt_nop,
1836
1837 .pgd_alloc = xen_pgd_alloc,
1838 .pgd_free = xen_pgd_free,
1839
1840 .alloc_pte = xen_alloc_pte_init,
1841 .release_pte = xen_release_pte_init,
1842 .alloc_pmd = xen_alloc_pte_init,
1843 .alloc_pmd_clone = paravirt_nop,
1844 .release_pmd = xen_release_pte_init,
1845
1846#ifdef CONFIG_HIGHPTE
1847 .kmap_atomic_pte = xen_kmap_atomic_pte,
1848#endif
1849
1850#ifdef CONFIG_X86_64
1851 .set_pte = xen_set_pte,
1852#else
1853 .set_pte = xen_set_pte_init,
1854#endif
1855 .set_pte_at = xen_set_pte_at,
1856 .set_pmd = xen_set_pmd_hyper,
1857
1858 .ptep_modify_prot_start = __ptep_modify_prot_start,
1859 .ptep_modify_prot_commit = __ptep_modify_prot_commit,
1860
1861 .pte_val = PV_CALLEE_SAVE(xen_pte_val),
1862 .pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
1863
1864 .make_pte = PV_CALLEE_SAVE(xen_make_pte),
1865 .make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
1866
1867#ifdef CONFIG_X86_PAE
1868 .set_pte_atomic = xen_set_pte_atomic,
1869 .set_pte_present = xen_set_pte_at,
1870 .pte_clear = xen_pte_clear,
1871 .pmd_clear = xen_pmd_clear,
1872#endif /* CONFIG_X86_PAE */
1873 .set_pud = xen_set_pud_hyper,
1874
1875 .make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
1876 .pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
1877
1878#if PAGETABLE_LEVELS == 4
1879 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
1880 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
1881 .set_pgd = xen_set_pgd_hyper,
1882
1883 .alloc_pud = xen_alloc_pte_init,
1884 .release_pud = xen_release_pte_init,
1885#endif /* PAGETABLE_LEVELS == 4 */
1886
1887 .activate_mm = xen_activate_mm,
1888 .dup_mmap = xen_dup_mmap,
1889 .exit_mmap = xen_exit_mmap,
1890
1891 .lazy_mode = {
1892 .enter = paravirt_enter_lazy_mmu,
1893 .leave = xen_leave_lazy,
1894 },
1895
1896 .set_fixmap = xen_set_fixmap,
1897};
1898
1899
1159#ifdef CONFIG_XEN_DEBUG_FS 1900#ifdef CONFIG_XEN_DEBUG_FS
1160 1901
1161static struct dentry *d_mmu_debug; 1902static struct dentry *d_mmu_debug;
diff --git a/arch/x86/xen/mmu.h b/arch/x86/xen/mmu.h
index 98d71659da5a..24d1b44a337d 100644
--- a/arch/x86/xen/mmu.h
+++ b/arch/x86/xen/mmu.h
@@ -54,4 +54,7 @@ pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, pte_t
54void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, 54void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
55 pte_t *ptep, pte_t pte); 55 pte_t *ptep, pte_t pte);
56 56
57unsigned long xen_read_cr2_direct(void);
58
59extern const struct pv_mmu_ops xen_mmu_ops;
57#endif /* _XEN_MMU_H */ 60#endif /* _XEN_MMU_H */
diff --git a/arch/x86/xen/multicalls.h b/arch/x86/xen/multicalls.h
index fa3e10725d98..9e565da5d1f7 100644
--- a/arch/x86/xen/multicalls.h
+++ b/arch/x86/xen/multicalls.h
@@ -41,7 +41,7 @@ static inline void xen_mc_issue(unsigned mode)
41 xen_mc_flush(); 41 xen_mc_flush();
42 42
43 /* restore flags saved in xen_mc_batch */ 43 /* restore flags saved in xen_mc_batch */
44 local_irq_restore(x86_read_percpu(xen_mc_irq_flags)); 44 local_irq_restore(percpu_read(xen_mc_irq_flags));
45} 45}
46 46
47/* Set up a callback to be called when the current batch is flushed */ 47/* Set up a callback to be called when the current batch is flushed */
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index c44e2069c7c7..035582ae815d 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -50,11 +50,7 @@ static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
50 */ 50 */
51static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id) 51static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
52{ 52{
53#ifdef CONFIG_X86_32 53 inc_irq_stat(irq_resched_count);
54 __get_cpu_var(irq_stat).irq_resched_count++;
55#else
56 add_pda(irq_resched_count, 1);
57#endif
58 54
59 return IRQ_HANDLED; 55 return IRQ_HANDLED;
60} 56}
@@ -78,7 +74,7 @@ static __cpuinit void cpu_bringup(void)
78 xen_setup_cpu_clockevents(); 74 xen_setup_cpu_clockevents();
79 75
80 cpu_set(cpu, cpu_online_map); 76 cpu_set(cpu, cpu_online_map);
81 x86_write_percpu(cpu_state, CPU_ONLINE); 77 percpu_write(cpu_state, CPU_ONLINE);
82 wmb(); 78 wmb();
83 79
84 /* We can take interrupts now: we're officially "up". */ 80 /* We can take interrupts now: we're officially "up". */
@@ -174,7 +170,7 @@ static void __init xen_smp_prepare_boot_cpu(void)
174 170
175 /* We've switched to the "real" per-cpu gdt, so make sure the 171 /* We've switched to the "real" per-cpu gdt, so make sure the
176 old memory can be recycled */ 172 old memory can be recycled */
177 make_lowmem_page_readwrite(&per_cpu_var(gdt_page)); 173 make_lowmem_page_readwrite(xen_initial_gdt);
178 174
179 xen_setup_vcpu_info_placement(); 175 xen_setup_vcpu_info_placement();
180} 176}
@@ -239,6 +235,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
239 ctxt->user_regs.ss = __KERNEL_DS; 235 ctxt->user_regs.ss = __KERNEL_DS;
240#ifdef CONFIG_X86_32 236#ifdef CONFIG_X86_32
241 ctxt->user_regs.fs = __KERNEL_PERCPU; 237 ctxt->user_regs.fs = __KERNEL_PERCPU;
238#else
239 ctxt->gs_base_kernel = per_cpu_offset(cpu);
242#endif 240#endif
243 ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle; 241 ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
244 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */ 242 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
@@ -283,23 +281,14 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
283 struct task_struct *idle = idle_task(cpu); 281 struct task_struct *idle = idle_task(cpu);
284 int rc; 282 int rc;
285 283
286#ifdef CONFIG_X86_64
287 /* Allocate node local memory for AP pdas */
288 WARN_ON(cpu == 0);
289 if (cpu > 0) {
290 rc = get_local_pda(cpu);
291 if (rc)
292 return rc;
293 }
294#endif
295
296#ifdef CONFIG_X86_32
297 init_gdt(cpu);
298 per_cpu(current_task, cpu) = idle; 284 per_cpu(current_task, cpu) = idle;
285#ifdef CONFIG_X86_32
299 irq_ctx_init(cpu); 286 irq_ctx_init(cpu);
300#else 287#else
301 cpu_pda(cpu)->pcurrent = idle;
302 clear_tsk_thread_flag(idle, TIF_FORK); 288 clear_tsk_thread_flag(idle, TIF_FORK);
289 per_cpu(kernel_stack, cpu) =
290 (unsigned long)task_stack_page(idle) -
291 KERNEL_STACK_OFFSET + THREAD_SIZE;
303#endif 292#endif
304 xen_setup_timer(cpu); 293 xen_setup_timer(cpu);
305 xen_init_lock_cpu(cpu); 294 xen_init_lock_cpu(cpu);
@@ -445,11 +434,7 @@ static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
445{ 434{
446 irq_enter(); 435 irq_enter();
447 generic_smp_call_function_interrupt(); 436 generic_smp_call_function_interrupt();
448#ifdef CONFIG_X86_32 437 inc_irq_stat(irq_call_count);
449 __get_cpu_var(irq_stat).irq_call_count++;
450#else
451 add_pda(irq_call_count, 1);
452#endif
453 irq_exit(); 438 irq_exit();
454 439
455 return IRQ_HANDLED; 440 return IRQ_HANDLED;
@@ -459,11 +444,7 @@ static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
459{ 444{
460 irq_enter(); 445 irq_enter();
461 generic_smp_call_function_single_interrupt(); 446 generic_smp_call_function_single_interrupt();
462#ifdef CONFIG_X86_32 447 inc_irq_stat(irq_call_count);
463 __get_cpu_var(irq_stat).irq_call_count++;
464#else
465 add_pda(irq_call_count, 1);
466#endif
467 irq_exit(); 448 irq_exit();
468 449
469 return IRQ_HANDLED; 450 return IRQ_HANDLED;
diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c
index 212ffe012b76..95be7b434724 100644
--- a/arch/x86/xen/suspend.c
+++ b/arch/x86/xen/suspend.c
@@ -6,6 +6,7 @@
6 6
7#include <asm/xen/hypercall.h> 7#include <asm/xen/hypercall.h>
8#include <asm/xen/page.h> 8#include <asm/xen/page.h>
9#include <asm/fixmap.h>
9 10
10#include "xen-ops.h" 11#include "xen-ops.h"
11#include "mmu.h" 12#include "mmu.h"
diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S
new file mode 100644
index 000000000000..79d7362ad6d1
--- /dev/null
+++ b/arch/x86/xen/xen-asm.S
@@ -0,0 +1,142 @@
1/*
2 * Asm versions of Xen pv-ops, suitable for either direct use or
3 * inlining. The inline versions are the same as the direct-use
4 * versions, with the pre- and post-amble chopped off.
5 *
6 * This code is encoded for size rather than absolute efficiency, with
7 * a view to being able to inline as much as possible.
8 *
9 * We only bother with direct forms (ie, vcpu in percpu data) of the
10 * operations here; the indirect forms are better handled in C, since
11 * they're generally too large to inline anyway.
12 */
13
14#include <asm/asm-offsets.h>
15#include <asm/percpu.h>
16#include <asm/processor-flags.h>
17
18#include "xen-asm.h"
19
20/*
21 * Enable events. This clears the event mask and tests the pending
22 * event status with one and operation. If there are pending events,
23 * then enter the hypervisor to get them handled.
24 */
25ENTRY(xen_irq_enable_direct)
26 /* Unmask events */
27 movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
28
29 /*
30 * Preempt here doesn't matter because that will deal with any
31 * pending interrupts. The pending check may end up being run
32 * on the wrong CPU, but that doesn't hurt.
33 */
34
35 /* Test for pending */
36 testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
37 jz 1f
38
392: call check_events
401:
41ENDPATCH(xen_irq_enable_direct)
42 ret
43 ENDPROC(xen_irq_enable_direct)
44 RELOC(xen_irq_enable_direct, 2b+1)
45
46
47/*
48 * Disabling events is simply a matter of making the event mask
49 * non-zero.
50 */
51ENTRY(xen_irq_disable_direct)
52 movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
53ENDPATCH(xen_irq_disable_direct)
54 ret
55 ENDPROC(xen_irq_disable_direct)
56 RELOC(xen_irq_disable_direct, 0)
57
58/*
59 * (xen_)save_fl is used to get the current interrupt enable status.
60 * Callers expect the status to be in X86_EFLAGS_IF, and other bits
61 * may be set in the return value. We take advantage of this by
62 * making sure that X86_EFLAGS_IF has the right value (and other bits
63 * in that byte are 0), but other bits in the return value are
64 * undefined. We need to toggle the state of the bit, because Xen and
65 * x86 use opposite senses (mask vs enable).
66 */
67ENTRY(xen_save_fl_direct)
68 testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
69 setz %ah
70 addb %ah, %ah
71ENDPATCH(xen_save_fl_direct)
72 ret
73 ENDPROC(xen_save_fl_direct)
74 RELOC(xen_save_fl_direct, 0)
75
76
77/*
78 * In principle the caller should be passing us a value return from
79 * xen_save_fl_direct, but for robustness sake we test only the
80 * X86_EFLAGS_IF flag rather than the whole byte. After setting the
81 * interrupt mask state, it checks for unmasked pending events and
82 * enters the hypervisor to get them delivered if so.
83 */
84ENTRY(xen_restore_fl_direct)
85#ifdef CONFIG_X86_64
86 testw $X86_EFLAGS_IF, %di
87#else
88 testb $X86_EFLAGS_IF>>8, %ah
89#endif
90 setz PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
91 /*
92 * Preempt here doesn't matter because that will deal with any
93 * pending interrupts. The pending check may end up being run
94 * on the wrong CPU, but that doesn't hurt.
95 */
96
97 /* check for unmasked and pending */
98 cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
99 jz 1f
1002: call check_events
1011:
102ENDPATCH(xen_restore_fl_direct)
103 ret
104 ENDPROC(xen_restore_fl_direct)
105 RELOC(xen_restore_fl_direct, 2b+1)
106
107
108/*
109 * Force an event check by making a hypercall, but preserve regs
110 * before making the call.
111 */
112check_events:
113#ifdef CONFIG_X86_32
114 push %eax
115 push %ecx
116 push %edx
117 call xen_force_evtchn_callback
118 pop %edx
119 pop %ecx
120 pop %eax
121#else
122 push %rax
123 push %rcx
124 push %rdx
125 push %rsi
126 push %rdi
127 push %r8
128 push %r9
129 push %r10
130 push %r11
131 call xen_force_evtchn_callback
132 pop %r11
133 pop %r10
134 pop %r9
135 pop %r8
136 pop %rdi
137 pop %rsi
138 pop %rdx
139 pop %rcx
140 pop %rax
141#endif
142 ret
diff --git a/arch/x86/xen/xen-asm.h b/arch/x86/xen/xen-asm.h
new file mode 100644
index 000000000000..465276467a47
--- /dev/null
+++ b/arch/x86/xen/xen-asm.h
@@ -0,0 +1,12 @@
1#ifndef _XEN_XEN_ASM_H
2#define _XEN_XEN_ASM_H
3
4#include <linux/linkage.h>
5
6#define RELOC(x, v) .globl x##_reloc; x##_reloc=v
7#define ENDPATCH(x) .globl x##_end; x##_end=.
8
9/* Pseudo-flag used for virtual NMI, which we don't implement yet */
10#define XEN_EFLAGS_NMI 0x80000000
11
12#endif
diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
index 42786f59d9c0..88e15deb8b82 100644
--- a/arch/x86/xen/xen-asm_32.S
+++ b/arch/x86/xen/xen-asm_32.S
@@ -1,117 +1,43 @@
1/* 1/*
2 Asm versions of Xen pv-ops, suitable for either direct use or inlining. 2 * Asm versions of Xen pv-ops, suitable for either direct use or
3 The inline versions are the same as the direct-use versions, with the 3 * inlining. The inline versions are the same as the direct-use
4 pre- and post-amble chopped off. 4 * versions, with the pre- and post-amble chopped off.
5 5 *
6 This code is encoded for size rather than absolute efficiency, 6 * This code is encoded for size rather than absolute efficiency, with
7 with a view to being able to inline as much as possible. 7 * a view to being able to inline as much as possible.
8 8 *
9 We only bother with direct forms (ie, vcpu in pda) of the operations 9 * We only bother with direct forms (ie, vcpu in pda) of the
10 here; the indirect forms are better handled in C, since they're 10 * operations here; the indirect forms are better handled in C, since
11 generally too large to inline anyway. 11 * they're generally too large to inline anyway.
12 */ 12 */
13 13
14#include <linux/linkage.h>
15
16#include <asm/asm-offsets.h>
17#include <asm/thread_info.h> 14#include <asm/thread_info.h>
18#include <asm/percpu.h>
19#include <asm/processor-flags.h> 15#include <asm/processor-flags.h>
20#include <asm/segment.h> 16#include <asm/segment.h>
21 17
22#include <xen/interface/xen.h> 18#include <xen/interface/xen.h>
23 19
24#define RELOC(x, v) .globl x##_reloc; x##_reloc=v 20#include "xen-asm.h"
25#define ENDPATCH(x) .globl x##_end; x##_end=.
26
27/* Pseudo-flag used for virtual NMI, which we don't implement yet */
28#define XEN_EFLAGS_NMI 0x80000000
29
30/*
31 Enable events. This clears the event mask and tests the pending
32 event status with one and operation. If there are pending
33 events, then enter the hypervisor to get them handled.
34 */
35ENTRY(xen_irq_enable_direct)
36 /* Unmask events */
37 movb $0, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_mask
38
39 /* Preempt here doesn't matter because that will deal with
40 any pending interrupts. The pending check may end up being
41 run on the wrong CPU, but that doesn't hurt. */
42
43 /* Test for pending */
44 testb $0xff, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_pending
45 jz 1f
46
472: call check_events
481:
49ENDPATCH(xen_irq_enable_direct)
50 ret
51 ENDPROC(xen_irq_enable_direct)
52 RELOC(xen_irq_enable_direct, 2b+1)
53
54
55/*
56 Disabling events is simply a matter of making the event mask
57 non-zero.
58 */
59ENTRY(xen_irq_disable_direct)
60 movb $1, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_mask
61ENDPATCH(xen_irq_disable_direct)
62 ret
63 ENDPROC(xen_irq_disable_direct)
64 RELOC(xen_irq_disable_direct, 0)
65 21
66/* 22/*
67 (xen_)save_fl is used to get the current interrupt enable status. 23 * Force an event check by making a hypercall, but preserve regs
68 Callers expect the status to be in X86_EFLAGS_IF, and other bits 24 * before making the call.
69 may be set in the return value. We take advantage of this by
70 making sure that X86_EFLAGS_IF has the right value (and other bits
71 in that byte are 0), but other bits in the return value are
72 undefined. We need to toggle the state of the bit, because
73 Xen and x86 use opposite senses (mask vs enable).
74 */ 25 */
75ENTRY(xen_save_fl_direct) 26check_events:
76 testb $0xff, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_mask 27 push %eax
77 setz %ah 28 push %ecx
78 addb %ah,%ah 29 push %edx
79ENDPATCH(xen_save_fl_direct) 30 call xen_force_evtchn_callback
80 ret 31 pop %edx
81 ENDPROC(xen_save_fl_direct) 32 pop %ecx
82 RELOC(xen_save_fl_direct, 0) 33 pop %eax
83
84
85/*
86 In principle the caller should be passing us a value return
87 from xen_save_fl_direct, but for robustness sake we test only
88 the X86_EFLAGS_IF flag rather than the whole byte. After
89 setting the interrupt mask state, it checks for unmasked
90 pending events and enters the hypervisor to get them delivered
91 if so.
92 */
93ENTRY(xen_restore_fl_direct)
94 testb $X86_EFLAGS_IF>>8, %ah
95 setz PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_mask
96 /* Preempt here doesn't matter because that will deal with
97 any pending interrupts. The pending check may end up being
98 run on the wrong CPU, but that doesn't hurt. */
99
100 /* check for unmasked and pending */
101 cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_pending
102 jz 1f
1032: call check_events
1041:
105ENDPATCH(xen_restore_fl_direct)
106 ret 34 ret
107 ENDPROC(xen_restore_fl_direct)
108 RELOC(xen_restore_fl_direct, 2b+1)
109 35
110/* 36/*
111 We can't use sysexit directly, because we're not running in ring0. 37 * We can't use sysexit directly, because we're not running in ring0.
112 But we can easily fake it up using iret. Assuming xen_sysexit 38 * But we can easily fake it up using iret. Assuming xen_sysexit is
113 is jumped to with a standard stack frame, we can just strip it 39 * jumped to with a standard stack frame, we can just strip it back to
114 back to a standard iret frame and use iret. 40 * a standard iret frame and use iret.
115 */ 41 */
116ENTRY(xen_sysexit) 42ENTRY(xen_sysexit)
117 movl PT_EAX(%esp), %eax /* Shouldn't be necessary? */ 43 movl PT_EAX(%esp), %eax /* Shouldn't be necessary? */
@@ -122,33 +48,31 @@ ENTRY(xen_sysexit)
122ENDPROC(xen_sysexit) 48ENDPROC(xen_sysexit)
123 49
124/* 50/*
125 This is run where a normal iret would be run, with the same stack setup: 51 * This is run where a normal iret would be run, with the same stack setup:
126 8: eflags 52 * 8: eflags
127 4: cs 53 * 4: cs
128 esp-> 0: eip 54 * esp-> 0: eip
129 55 *
130 This attempts to make sure that any pending events are dealt 56 * This attempts to make sure that any pending events are dealt with
131 with on return to usermode, but there is a small window in 57 * on return to usermode, but there is a small window in which an
132 which an event can happen just before entering usermode. If 58 * event can happen just before entering usermode. If the nested
133 the nested interrupt ends up setting one of the TIF_WORK_MASK 59 * interrupt ends up setting one of the TIF_WORK_MASK pending work
134 pending work flags, they will not be tested again before 60 * flags, they will not be tested again before returning to
135 returning to usermode. This means that a process can end up 61 * usermode. This means that a process can end up with pending work,
136 with pending work, which will be unprocessed until the process 62 * which will be unprocessed until the process enters and leaves the
137 enters and leaves the kernel again, which could be an 63 * kernel again, which could be an unbounded amount of time. This
138 unbounded amount of time. This means that a pending signal or 64 * means that a pending signal or reschedule event could be
139 reschedule event could be indefinitely delayed. 65 * indefinitely delayed.
140 66 *
141 The fix is to notice a nested interrupt in the critical 67 * The fix is to notice a nested interrupt in the critical window, and
142 window, and if one occurs, then fold the nested interrupt into 68 * if one occurs, then fold the nested interrupt into the current
143 the current interrupt stack frame, and re-process it 69 * interrupt stack frame, and re-process it iteratively rather than
144 iteratively rather than recursively. This means that it will 70 * recursively. This means that it will exit via the normal path, and
145 exit via the normal path, and all pending work will be dealt 71 * all pending work will be dealt with appropriately.
146 with appropriately. 72 *
147 73 * Because the nested interrupt handler needs to deal with the current
148 Because the nested interrupt handler needs to deal with the 74 * stack state in whatever form its in, we keep things simple by only
149 current stack state in whatever form its in, we keep things 75 * using a single register which is pushed/popped on the stack.
150 simple by only using a single register which is pushed/popped
151 on the stack.
152 */ 76 */
153ENTRY(xen_iret) 77ENTRY(xen_iret)
154 /* test eflags for special cases */ 78 /* test eflags for special cases */
@@ -158,13 +82,15 @@ ENTRY(xen_iret)
158 push %eax 82 push %eax
159 ESP_OFFSET=4 # bytes pushed onto stack 83 ESP_OFFSET=4 # bytes pushed onto stack
160 84
161 /* Store vcpu_info pointer for easy access. Do it this 85 /*
162 way to avoid having to reload %fs */ 86 * Store vcpu_info pointer for easy access. Do it this way to
87 * avoid having to reload %fs
88 */
163#ifdef CONFIG_SMP 89#ifdef CONFIG_SMP
164 GET_THREAD_INFO(%eax) 90 GET_THREAD_INFO(%eax)
165 movl TI_cpu(%eax),%eax 91 movl TI_cpu(%eax), %eax
166 movl __per_cpu_offset(,%eax,4),%eax 92 movl __per_cpu_offset(,%eax,4), %eax
167 mov per_cpu__xen_vcpu(%eax),%eax 93 mov per_cpu__xen_vcpu(%eax), %eax
168#else 94#else
169 movl per_cpu__xen_vcpu, %eax 95 movl per_cpu__xen_vcpu, %eax
170#endif 96#endif
@@ -172,37 +98,46 @@ ENTRY(xen_iret)
172 /* check IF state we're restoring */ 98 /* check IF state we're restoring */
173 testb $X86_EFLAGS_IF>>8, 8+1+ESP_OFFSET(%esp) 99 testb $X86_EFLAGS_IF>>8, 8+1+ESP_OFFSET(%esp)
174 100
175 /* Maybe enable events. Once this happens we could get a 101 /*
176 recursive event, so the critical region starts immediately 102 * Maybe enable events. Once this happens we could get a
177 afterwards. However, if that happens we don't end up 103 * recursive event, so the critical region starts immediately
178 resuming the code, so we don't have to be worried about 104 * afterwards. However, if that happens we don't end up
179 being preempted to another CPU. */ 105 * resuming the code, so we don't have to be worried about
106 * being preempted to another CPU.
107 */
180 setz XEN_vcpu_info_mask(%eax) 108 setz XEN_vcpu_info_mask(%eax)
181xen_iret_start_crit: 109xen_iret_start_crit:
182 110
183 /* check for unmasked and pending */ 111 /* check for unmasked and pending */
184 cmpw $0x0001, XEN_vcpu_info_pending(%eax) 112 cmpw $0x0001, XEN_vcpu_info_pending(%eax)
185 113
186 /* If there's something pending, mask events again so we 114 /*
187 can jump back into xen_hypervisor_callback */ 115 * If there's something pending, mask events again so we can
116 * jump back into xen_hypervisor_callback
117 */
188 sete XEN_vcpu_info_mask(%eax) 118 sete XEN_vcpu_info_mask(%eax)
189 119
190 popl %eax 120 popl %eax
191 121
192 /* From this point on the registers are restored and the stack 122 /*
193 updated, so we don't need to worry about it if we're preempted */ 123 * From this point on the registers are restored and the stack
124 * updated, so we don't need to worry about it if we're
125 * preempted
126 */
194iret_restore_end: 127iret_restore_end:
195 128
196 /* Jump to hypervisor_callback after fixing up the stack. 129 /*
197 Events are masked, so jumping out of the critical 130 * Jump to hypervisor_callback after fixing up the stack.
198 region is OK. */ 131 * Events are masked, so jumping out of the critical region is
132 * OK.
133 */
199 je xen_hypervisor_callback 134 je xen_hypervisor_callback
200 135
2011: iret 1361: iret
202xen_iret_end_crit: 137xen_iret_end_crit:
203.section __ex_table,"a" 138.section __ex_table, "a"
204 .align 4 139 .align 4
205 .long 1b,iret_exc 140 .long 1b, iret_exc
206.previous 141.previous
207 142
208hyper_iret: 143hyper_iret:
@@ -212,55 +147,55 @@ hyper_iret:
212 .globl xen_iret_start_crit, xen_iret_end_crit 147 .globl xen_iret_start_crit, xen_iret_end_crit
213 148
214/* 149/*
215 This is called by xen_hypervisor_callback in entry.S when it sees 150 * This is called by xen_hypervisor_callback in entry.S when it sees
216 that the EIP at the time of interrupt was between xen_iret_start_crit 151 * that the EIP at the time of interrupt was between
217 and xen_iret_end_crit. We're passed the EIP in %eax so we can do 152 * xen_iret_start_crit and xen_iret_end_crit. We're passed the EIP in
218 a more refined determination of what to do. 153 * %eax so we can do a more refined determination of what to do.
219 154 *
220 The stack format at this point is: 155 * The stack format at this point is:
221 ---------------- 156 * ----------------
222 ss : (ss/esp may be present if we came from usermode) 157 * ss : (ss/esp may be present if we came from usermode)
223 esp : 158 * esp :
224 eflags } outer exception info 159 * eflags } outer exception info
225 cs } 160 * cs }
226 eip } 161 * eip }
227 ---------------- <- edi (copy dest) 162 * ---------------- <- edi (copy dest)
228 eax : outer eax if it hasn't been restored 163 * eax : outer eax if it hasn't been restored
229 ---------------- 164 * ----------------
230 eflags } nested exception info 165 * eflags } nested exception info
231 cs } (no ss/esp because we're nested 166 * cs } (no ss/esp because we're nested
232 eip } from the same ring) 167 * eip } from the same ring)
233 orig_eax }<- esi (copy src) 168 * orig_eax }<- esi (copy src)
234 - - - - - - - - 169 * - - - - - - - -
235 fs } 170 * fs }
236 es } 171 * es }
237 ds } SAVE_ALL state 172 * ds } SAVE_ALL state
238 eax } 173 * eax }
239 : : 174 * : :
240 ebx }<- esp 175 * ebx }<- esp
241 ---------------- 176 * ----------------
242 177 *
243 In order to deliver the nested exception properly, we need to shift 178 * In order to deliver the nested exception properly, we need to shift
244 everything from the return addr up to the error code so it 179 * everything from the return addr up to the error code so it sits
245 sits just under the outer exception info. This means that when we 180 * just under the outer exception info. This means that when we
246 handle the exception, we do it in the context of the outer exception 181 * handle the exception, we do it in the context of the outer
247 rather than starting a new one. 182 * exception rather than starting a new one.
248 183 *
249 The only caveat is that if the outer eax hasn't been 184 * The only caveat is that if the outer eax hasn't been restored yet
250 restored yet (ie, it's still on stack), we need to insert 185 * (ie, it's still on stack), we need to insert its value into the
251 its value into the SAVE_ALL state before going on, since 186 * SAVE_ALL state before going on, since it's usermode state which we
252 it's usermode state which we eventually need to restore. 187 * eventually need to restore.
253 */ 188 */
254ENTRY(xen_iret_crit_fixup) 189ENTRY(xen_iret_crit_fixup)
255 /* 190 /*
256 Paranoia: Make sure we're really coming from kernel space. 191 * Paranoia: Make sure we're really coming from kernel space.
257 One could imagine a case where userspace jumps into the 192 * One could imagine a case where userspace jumps into the
258 critical range address, but just before the CPU delivers a GP, 193 * critical range address, but just before the CPU delivers a
259 it decides to deliver an interrupt instead. Unlikely? 194 * GP, it decides to deliver an interrupt instead. Unlikely?
260 Definitely. Easy to avoid? Yes. The Intel documents 195 * Definitely. Easy to avoid? Yes. The Intel documents
261 explicitly say that the reported EIP for a bad jump is the 196 * explicitly say that the reported EIP for a bad jump is the
262 jump instruction itself, not the destination, but some virtual 197 * jump instruction itself, not the destination, but some
263 environments get this wrong. 198 * virtual environments get this wrong.
264 */ 199 */
265 movl PT_CS(%esp), %ecx 200 movl PT_CS(%esp), %ecx
266 andl $SEGMENT_RPL_MASK, %ecx 201 andl $SEGMENT_RPL_MASK, %ecx
@@ -270,15 +205,17 @@ ENTRY(xen_iret_crit_fixup)
270 lea PT_ORIG_EAX(%esp), %esi 205 lea PT_ORIG_EAX(%esp), %esi
271 lea PT_EFLAGS(%esp), %edi 206 lea PT_EFLAGS(%esp), %edi
272 207
273 /* If eip is before iret_restore_end then stack 208 /*
274 hasn't been restored yet. */ 209 * If eip is before iret_restore_end then stack
210 * hasn't been restored yet.
211 */
275 cmp $iret_restore_end, %eax 212 cmp $iret_restore_end, %eax
276 jae 1f 213 jae 1f
277 214
278 movl 0+4(%edi),%eax /* copy EAX (just above top of frame) */ 215 movl 0+4(%edi), %eax /* copy EAX (just above top of frame) */
279 movl %eax, PT_EAX(%esp) 216 movl %eax, PT_EAX(%esp)
280 217
281 lea ESP_OFFSET(%edi),%edi /* move dest up over saved regs */ 218 lea ESP_OFFSET(%edi), %edi /* move dest up over saved regs */
282 219
283 /* set up the copy */ 220 /* set up the copy */
2841: std 2211: std
@@ -286,20 +223,6 @@ ENTRY(xen_iret_crit_fixup)
286 rep movsl 223 rep movsl
287 cld 224 cld
288 225
289 lea 4(%edi),%esp /* point esp to new frame */ 226 lea 4(%edi), %esp /* point esp to new frame */
2902: jmp xen_do_upcall 2272: jmp xen_do_upcall
291 228
292
293/*
294 Force an event check by making a hypercall,
295 but preserve regs before making the call.
296 */
297check_events:
298 push %eax
299 push %ecx
300 push %edx
301 call xen_force_evtchn_callback
302 pop %edx
303 pop %ecx
304 pop %eax
305 ret
diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S
index 05794c566e87..02f496a8dbaa 100644
--- a/arch/x86/xen/xen-asm_64.S
+++ b/arch/x86/xen/xen-asm_64.S
@@ -1,174 +1,45 @@
1/* 1/*
2 Asm versions of Xen pv-ops, suitable for either direct use or inlining. 2 * Asm versions of Xen pv-ops, suitable for either direct use or
3 The inline versions are the same as the direct-use versions, with the 3 * inlining. The inline versions are the same as the direct-use
4 pre- and post-amble chopped off. 4 * versions, with the pre- and post-amble chopped off.
5 5 *
6 This code is encoded for size rather than absolute efficiency, 6 * This code is encoded for size rather than absolute efficiency, with
7 with a view to being able to inline as much as possible. 7 * a view to being able to inline as much as possible.
8 8 *
9 We only bother with direct forms (ie, vcpu in pda) of the operations 9 * We only bother with direct forms (ie, vcpu in pda) of the
10 here; the indirect forms are better handled in C, since they're 10 * operations here; the indirect forms are better handled in C, since
11 generally too large to inline anyway. 11 * they're generally too large to inline anyway.
12 */ 12 */
13 13
14#include <linux/linkage.h>
15
16#include <asm/asm-offsets.h>
17#include <asm/processor-flags.h>
18#include <asm/errno.h> 14#include <asm/errno.h>
15#include <asm/percpu.h>
16#include <asm/processor-flags.h>
19#include <asm/segment.h> 17#include <asm/segment.h>
20 18
21#include <xen/interface/xen.h> 19#include <xen/interface/xen.h>
22 20
23#define RELOC(x, v) .globl x##_reloc; x##_reloc=v 21#include "xen-asm.h"
24#define ENDPATCH(x) .globl x##_end; x##_end=.
25
26/* Pseudo-flag used for virtual NMI, which we don't implement yet */
27#define XEN_EFLAGS_NMI 0x80000000
28
29#if 1
30/*
31 x86-64 does not yet support direct access to percpu variables
32 via a segment override, so we just need to make sure this code
33 never gets used
34 */
35#define BUG ud2a
36#define PER_CPU_VAR(var, off) 0xdeadbeef
37#endif
38
39/*
40 Enable events. This clears the event mask and tests the pending
41 event status with one and operation. If there are pending
42 events, then enter the hypervisor to get them handled.
43 */
44ENTRY(xen_irq_enable_direct)
45 BUG
46
47 /* Unmask events */
48 movb $0, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask)
49
50 /* Preempt here doesn't matter because that will deal with
51 any pending interrupts. The pending check may end up being
52 run on the wrong CPU, but that doesn't hurt. */
53
54 /* Test for pending */
55 testb $0xff, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_pending)
56 jz 1f
57
582: call check_events
591:
60ENDPATCH(xen_irq_enable_direct)
61 ret
62 ENDPROC(xen_irq_enable_direct)
63 RELOC(xen_irq_enable_direct, 2b+1)
64
65/*
66 Disabling events is simply a matter of making the event mask
67 non-zero.
68 */
69ENTRY(xen_irq_disable_direct)
70 BUG
71
72 movb $1, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask)
73ENDPATCH(xen_irq_disable_direct)
74 ret
75 ENDPROC(xen_irq_disable_direct)
76 RELOC(xen_irq_disable_direct, 0)
77
78/*
79 (xen_)save_fl is used to get the current interrupt enable status.
80 Callers expect the status to be in X86_EFLAGS_IF, and other bits
81 may be set in the return value. We take advantage of this by
82 making sure that X86_EFLAGS_IF has the right value (and other bits
83 in that byte are 0), but other bits in the return value are
84 undefined. We need to toggle the state of the bit, because
85 Xen and x86 use opposite senses (mask vs enable).
86 */
87ENTRY(xen_save_fl_direct)
88 BUG
89
90 testb $0xff, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask)
91 setz %ah
92 addb %ah,%ah
93ENDPATCH(xen_save_fl_direct)
94 ret
95 ENDPROC(xen_save_fl_direct)
96 RELOC(xen_save_fl_direct, 0)
97
98/*
99 In principle the caller should be passing us a value return
100 from xen_save_fl_direct, but for robustness sake we test only
101 the X86_EFLAGS_IF flag rather than the whole byte. After
102 setting the interrupt mask state, it checks for unmasked
103 pending events and enters the hypervisor to get them delivered
104 if so.
105 */
106ENTRY(xen_restore_fl_direct)
107 BUG
108
109 testb $X86_EFLAGS_IF>>8, %ah
110 setz PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask)
111 /* Preempt here doesn't matter because that will deal with
112 any pending interrupts. The pending check may end up being
113 run on the wrong CPU, but that doesn't hurt. */
114
115 /* check for unmasked and pending */
116 cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_pending)
117 jz 1f
1182: call check_events
1191:
120ENDPATCH(xen_restore_fl_direct)
121 ret
122 ENDPROC(xen_restore_fl_direct)
123 RELOC(xen_restore_fl_direct, 2b+1)
124
125
126/*
127 Force an event check by making a hypercall,
128 but preserve regs before making the call.
129 */
130check_events:
131 push %rax
132 push %rcx
133 push %rdx
134 push %rsi
135 push %rdi
136 push %r8
137 push %r9
138 push %r10
139 push %r11
140 call xen_force_evtchn_callback
141 pop %r11
142 pop %r10
143 pop %r9
144 pop %r8
145 pop %rdi
146 pop %rsi
147 pop %rdx
148 pop %rcx
149 pop %rax
150 ret
151 22
152ENTRY(xen_adjust_exception_frame) 23ENTRY(xen_adjust_exception_frame)
153 mov 8+0(%rsp),%rcx 24 mov 8+0(%rsp), %rcx
154 mov 8+8(%rsp),%r11 25 mov 8+8(%rsp), %r11
155 ret $16 26 ret $16
156 27
157hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32 28hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
158/* 29/*
159 Xen64 iret frame: 30 * Xen64 iret frame:
160 31 *
161 ss 32 * ss
162 rsp 33 * rsp
163 rflags 34 * rflags
164 cs 35 * cs
165 rip <-- standard iret frame 36 * rip <-- standard iret frame
166 37 *
167 flags 38 * flags
168 39 *
169 rcx } 40 * rcx }
170 r11 }<-- pushed by hypercall page 41 * r11 }<-- pushed by hypercall page
171rsp -> rax } 42 * rsp->rax }
172 */ 43 */
173ENTRY(xen_iret) 44ENTRY(xen_iret)
174 pushq $0 45 pushq $0
@@ -177,8 +48,8 @@ ENDPATCH(xen_iret)
177RELOC(xen_iret, 1b+1) 48RELOC(xen_iret, 1b+1)
178 49
179/* 50/*
180 sysexit is not used for 64-bit processes, so it's 51 * sysexit is not used for 64-bit processes, so it's only ever used to
181 only ever used to return to 32-bit compat userspace. 52 * return to 32-bit compat userspace.
182 */ 53 */
183ENTRY(xen_sysexit) 54ENTRY(xen_sysexit)
184 pushq $__USER32_DS 55 pushq $__USER32_DS
@@ -193,13 +64,15 @@ ENDPATCH(xen_sysexit)
193RELOC(xen_sysexit, 1b+1) 64RELOC(xen_sysexit, 1b+1)
194 65
195ENTRY(xen_sysret64) 66ENTRY(xen_sysret64)
196 /* We're already on the usermode stack at this point, but still 67 /*
197 with the kernel gs, so we can easily switch back */ 68 * We're already on the usermode stack at this point, but
198 movq %rsp, %gs:pda_oldrsp 69 * still with the kernel gs, so we can easily switch back
199 movq %gs:pda_kernelstack,%rsp 70 */
71 movq %rsp, PER_CPU_VAR(old_rsp)
72 movq PER_CPU_VAR(kernel_stack), %rsp
200 73
201 pushq $__USER_DS 74 pushq $__USER_DS
202 pushq %gs:pda_oldrsp 75 pushq PER_CPU_VAR(old_rsp)
203 pushq %r11 76 pushq %r11
204 pushq $__USER_CS 77 pushq $__USER_CS
205 pushq %rcx 78 pushq %rcx
@@ -210,13 +83,15 @@ ENDPATCH(xen_sysret64)
210RELOC(xen_sysret64, 1b+1) 83RELOC(xen_sysret64, 1b+1)
211 84
212ENTRY(xen_sysret32) 85ENTRY(xen_sysret32)
213 /* We're already on the usermode stack at this point, but still 86 /*
214 with the kernel gs, so we can easily switch back */ 87 * We're already on the usermode stack at this point, but
215 movq %rsp, %gs:pda_oldrsp 88 * still with the kernel gs, so we can easily switch back
216 movq %gs:pda_kernelstack, %rsp 89 */
90 movq %rsp, PER_CPU_VAR(old_rsp)
91 movq PER_CPU_VAR(kernel_stack), %rsp
217 92
218 pushq $__USER32_DS 93 pushq $__USER32_DS
219 pushq %gs:pda_oldrsp 94 pushq PER_CPU_VAR(old_rsp)
220 pushq %r11 95 pushq %r11
221 pushq $__USER32_CS 96 pushq $__USER32_CS
222 pushq %rcx 97 pushq %rcx
@@ -227,28 +102,27 @@ ENDPATCH(xen_sysret32)
227RELOC(xen_sysret32, 1b+1) 102RELOC(xen_sysret32, 1b+1)
228 103
229/* 104/*
230 Xen handles syscall callbacks much like ordinary exceptions, 105 * Xen handles syscall callbacks much like ordinary exceptions, which
231 which means we have: 106 * means we have:
232 - kernel gs 107 * - kernel gs
233 - kernel rsp 108 * - kernel rsp
234 - an iret-like stack frame on the stack (including rcx and r11): 109 * - an iret-like stack frame on the stack (including rcx and r11):
235 ss 110 * ss
236 rsp 111 * rsp
237 rflags 112 * rflags
238 cs 113 * cs
239 rip 114 * rip
240 r11 115 * r11
241 rsp-> rcx 116 * rsp->rcx
242 117 *
243 In all the entrypoints, we undo all that to make it look 118 * In all the entrypoints, we undo all that to make it look like a
244 like a CPU-generated syscall/sysenter and jump to the normal 119 * CPU-generated syscall/sysenter and jump to the normal entrypoint.
245 entrypoint.
246 */ 120 */
247 121
248.macro undo_xen_syscall 122.macro undo_xen_syscall
249 mov 0*8(%rsp),%rcx 123 mov 0*8(%rsp), %rcx
250 mov 1*8(%rsp),%r11 124 mov 1*8(%rsp), %r11
251 mov 5*8(%rsp),%rsp 125 mov 5*8(%rsp), %rsp
252.endm 126.endm
253 127
254/* Normal 64-bit system call target */ 128/* Normal 64-bit system call target */
@@ -275,7 +149,7 @@ ENDPROC(xen_sysenter_target)
275 149
276ENTRY(xen_syscall32_target) 150ENTRY(xen_syscall32_target)
277ENTRY(xen_sysenter_target) 151ENTRY(xen_sysenter_target)
278 lea 16(%rsp), %rsp /* strip %rcx,%r11 */ 152 lea 16(%rsp), %rsp /* strip %rcx, %r11 */
279 mov $-ENOSYS, %rax 153 mov $-ENOSYS, %rax
280 pushq $VGCF_in_syscall 154 pushq $VGCF_in_syscall
281 jmp hypercall_iret 155 jmp hypercall_iret
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index c1f8faf0a2c5..2f5ef2632ea2 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -10,9 +10,12 @@
10extern const char xen_hypervisor_callback[]; 10extern const char xen_hypervisor_callback[];
11extern const char xen_failsafe_callback[]; 11extern const char xen_failsafe_callback[];
12 12
13extern void *xen_initial_gdt;
14
13struct trap_info; 15struct trap_info;
14void xen_copy_trap_info(struct trap_info *traps); 16void xen_copy_trap_info(struct trap_info *traps);
15 17
18DECLARE_PER_CPU(struct vcpu_info, xen_vcpu_info);
16DECLARE_PER_CPU(unsigned long, xen_cr3); 19DECLARE_PER_CPU(unsigned long, xen_cr3);
17DECLARE_PER_CPU(unsigned long, xen_current_cr3); 20DECLARE_PER_CPU(unsigned long, xen_current_cr3);
18 21
@@ -22,6 +25,13 @@ extern struct shared_info *HYPERVISOR_shared_info;
22 25
23void xen_setup_mfn_list_list(void); 26void xen_setup_mfn_list_list(void);
24void xen_setup_shared_info(void); 27void xen_setup_shared_info(void);
28void xen_setup_machphys_mapping(void);
29pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn);
30void xen_ident_map_ISA(void);
31void xen_reserve_top(void);
32
33void xen_leave_lazy(void);
34void xen_post_allocator_init(void);
25 35
26char * __init xen_memory_setup(void); 36char * __init xen_memory_setup(void);
27void __init xen_arch_setup(void); 37void __init xen_arch_setup(void);