aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig618
-rw-r--r--arch/x86/Kconfig.cpu80
-rw-r--r--arch/x86/Kconfig.debug50
-rw-r--r--arch/x86/Makefile28
-rw-r--r--arch/x86/configs/i386_defconfig1
-rw-r--r--arch/x86/configs/x86_64_defconfig1
-rw-r--r--arch/x86/ia32/ia32_signal.c365
-rw-r--r--arch/x86/ia32/ia32entry.S8
-rw-r--r--arch/x86/include/asm/apic.h74
-rw-r--r--arch/x86/include/asm/apicnum.h12
-rw-r--r--arch/x86/include/asm/apm.h (renamed from arch/x86/include/asm/mach-default/apm.h)0
-rw-r--r--arch/x86/include/asm/bigsmp/apic.h155
-rw-r--r--arch/x86/include/asm/bigsmp/apicdef.h13
-rw-r--r--arch/x86/include/asm/bigsmp/ipi.h22
-rw-r--r--arch/x86/include/asm/cpu.h17
-rw-r--r--arch/x86/include/asm/cpumask.h32
-rw-r--r--arch/x86/include/asm/current.h24
-rw-r--r--arch/x86/include/asm/do_timer.h (renamed from arch/x86/include/asm/mach-default/do_timer.h)0
-rw-r--r--arch/x86/include/asm/entry_arch.h (renamed from arch/x86/include/asm/mach-default/entry_arch.h)25
-rw-r--r--arch/x86/include/asm/es7000/apic.h242
-rw-r--r--arch/x86/include/asm/es7000/apicdef.h13
-rw-r--r--arch/x86/include/asm/es7000/ipi.h22
-rw-r--r--arch/x86/include/asm/es7000/mpparse.h29
-rw-r--r--arch/x86/include/asm/es7000/wakecpu.h37
-rw-r--r--arch/x86/include/asm/genapic.h262
-rw-r--r--arch/x86/include/asm/genapic_32.h148
-rw-r--r--arch/x86/include/asm/genapic_64.h66
-rw-r--r--arch/x86/include/asm/hardirq.h49
-rw-r--r--arch/x86/include/asm/hardirq_32.h30
-rw-r--r--arch/x86/include/asm/hardirq_64.h25
-rw-r--r--arch/x86/include/asm/hw_irq.h24
-rw-r--r--arch/x86/include/asm/io.h2
-rw-r--r--arch/x86/include/asm/io_apic.h41
-rw-r--r--arch/x86/include/asm/ipi.h77
-rw-r--r--arch/x86/include/asm/irq.h4
-rw-r--r--arch/x86/include/asm/irq_regs.h36
-rw-r--r--arch/x86/include/asm/irq_regs_32.h31
-rw-r--r--arch/x86/include/asm/irq_regs_64.h1
-rw-r--r--arch/x86/include/asm/irq_vectors.h209
-rw-r--r--arch/x86/include/asm/mach-default/mach_apic.h168
-rw-r--r--arch/x86/include/asm/mach-default/mach_apicdef.h24
-rw-r--r--arch/x86/include/asm/mach-default/mach_ipi.h64
-rw-r--r--arch/x86/include/asm/mach-default/mach_mpparse.h17
-rw-r--r--arch/x86/include/asm/mach-default/mach_mpspec.h12
-rw-r--r--arch/x86/include/asm/mach-default/mach_wakecpu.h41
-rw-r--r--arch/x86/include/asm/mach-generic/gpio.h15
-rw-r--r--arch/x86/include/asm/mach-generic/mach_apic.h35
-rw-r--r--arch/x86/include/asm/mach-generic/mach_apicdef.h11
-rw-r--r--arch/x86/include/asm/mach-generic/mach_ipi.h10
-rw-r--r--arch/x86/include/asm/mach-generic/mach_mpparse.h9
-rw-r--r--arch/x86/include/asm/mach-generic/mach_mpspec.h12
-rw-r--r--arch/x86/include/asm/mach-generic/mach_wakecpu.h12
-rw-r--r--arch/x86/include/asm/mach-rdc321x/gpio.h60
-rw-r--r--arch/x86/include/asm/mach_timer.h (renamed from arch/x86/include/asm/mach-default/mach_timer.h)0
-rw-r--r--arch/x86/include/asm/mach_traps.h (renamed from arch/x86/include/asm/mach-default/mach_traps.h)0
-rw-r--r--arch/x86/include/asm/mmu_context.h63
-rw-r--r--arch/x86/include/asm/mmu_context_32.h55
-rw-r--r--arch/x86/include/asm/mmu_context_64.h54
-rw-r--r--arch/x86/include/asm/mpspec.h35
-rw-r--r--arch/x86/include/asm/mpspec_def.h23
-rw-r--r--arch/x86/include/asm/numaq.h2
-rw-r--r--arch/x86/include/asm/numaq/apic.h142
-rw-r--r--arch/x86/include/asm/numaq/apicdef.h14
-rw-r--r--arch/x86/include/asm/numaq/ipi.h22
-rw-r--r--arch/x86/include/asm/numaq/mpparse.h6
-rw-r--r--arch/x86/include/asm/numaq/wakecpu.h45
-rw-r--r--arch/x86/include/asm/page.h3
-rw-r--r--arch/x86/include/asm/page_64.h4
-rw-r--r--arch/x86/include/asm/paravirt.h463
-rw-r--r--arch/x86/include/asm/pat.h4
-rw-r--r--arch/x86/include/asm/pci-functions.h (renamed from arch/x86/include/asm/mach-default/pci-functions.h)0
-rw-r--r--arch/x86/include/asm/pda.h137
-rw-r--r--arch/x86/include/asm/percpu.h153
-rw-r--r--arch/x86/include/asm/pgtable.h38
-rw-r--r--arch/x86/include/asm/pgtable_64.h1
-rw-r--r--arch/x86/include/asm/prctl.h4
-rw-r--r--arch/x86/include/asm/processor.h22
-rw-r--r--arch/x86/include/asm/proto.h4
-rw-r--r--arch/x86/include/asm/rdc321x_defs.h (renamed from arch/x86/include/asm/mach-rdc321x/rdc321x_defs.h)0
-rw-r--r--arch/x86/include/asm/setup.h9
-rw-r--r--arch/x86/include/asm/setup_arch.h (renamed from arch/x86/include/asm/mach-default/setup_arch.h)0
-rw-r--r--arch/x86/include/asm/smp.h69
-rw-r--r--arch/x86/include/asm/smpboot_hooks.h (renamed from arch/x86/include/asm/mach-default/smpboot_hooks.h)6
-rw-r--r--arch/x86/include/asm/spinlock.h69
-rw-r--r--arch/x86/include/asm/stackprotector.h38
-rw-r--r--arch/x86/include/asm/summit/apic.h202
-rw-r--r--arch/x86/include/asm/summit/apicdef.h13
-rw-r--r--arch/x86/include/asm/summit/ipi.h26
-rw-r--r--arch/x86/include/asm/summit/mpparse.h109
-rw-r--r--arch/x86/include/asm/system.h29
-rw-r--r--arch/x86/include/asm/thread_info.h21
-rw-r--r--arch/x86/include/asm/tlbflush.h17
-rw-r--r--arch/x86/include/asm/topology.h31
-rw-r--r--arch/x86/include/asm/trampoline.h1
-rw-r--r--arch/x86/include/asm/uaccess.h115
-rw-r--r--arch/x86/include/asm/uv/uv.h33
-rw-r--r--arch/x86/include/asm/uv/uv_bau.h1
-rw-r--r--arch/x86/include/asm/voyager.h42
-rw-r--r--arch/x86/include/asm/xen/events.h6
-rw-r--r--arch/x86/kernel/Makefile25
-rw-r--r--arch/x86/kernel/acpi/boot.c137
-rw-r--r--arch/x86/kernel/acpi/sleep.c1
-rw-r--r--arch/x86/kernel/apic.c142
-rw-r--r--arch/x86/kernel/apm_32.c2
-rw-r--r--arch/x86/kernel/asm-offsets_64.c11
-rw-r--r--arch/x86/kernel/bigsmp_32.c266
-rw-r--r--arch/x86/kernel/cpu/addon_cpuid_features.c54
-rw-r--r--arch/x86/kernel/cpu/amd.c2
-rw-r--r--arch/x86/kernel/cpu/common.c238
-rw-r--r--arch/x86/kernel/cpu/intel.c14
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c63
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd_64.c21
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_intel_64.c1
-rw-r--r--arch/x86/kernel/crash.c4
-rw-r--r--arch/x86/kernel/dumpstack_64.c35
-rw-r--r--arch/x86/kernel/efi.c2
-rw-r--r--arch/x86/kernel/efi_64.c1
-rw-r--r--arch/x86/kernel/entry_32.S8
-rw-r--r--arch/x86/kernel/entry_64.S47
-rw-r--r--arch/x86/kernel/es7000_32.c468
-rw-r--r--arch/x86/kernel/genapic_64.c24
-rw-r--r--arch/x86/kernel/genapic_flat_64.c176
-rw-r--r--arch/x86/kernel/genx2apic_cluster.c133
-rw-r--r--arch/x86/kernel/genx2apic_phys.c125
-rw-r--r--arch/x86/kernel/genx2apic_uv_x.c112
-rw-r--r--arch/x86/kernel/head64.c23
-rw-r--r--arch/x86/kernel/head_32.S19
-rw-r--r--arch/x86/kernel/head_64.S36
-rw-r--r--arch/x86/kernel/io_apic.c423
-rw-r--r--arch/x86/kernel/ipi.c176
-rw-r--r--arch/x86/kernel/irq.c44
-rw-r--r--arch/x86/kernel/irq_32.c33
-rw-r--r--arch/x86/kernel/irq_64.c43
-rw-r--r--arch/x86/kernel/irqinit_32.c23
-rw-r--r--arch/x86/kernel/kgdb.c4
-rw-r--r--arch/x86/kernel/microcode_intel.c10
-rw-r--r--arch/x86/kernel/module_32.c6
-rw-r--r--arch/x86/kernel/module_64.c32
-rw-r--r--arch/x86/kernel/mpparse.c178
-rw-r--r--arch/x86/kernel/msr.c2
-rw-r--r--arch/x86/kernel/nmi.c12
-rw-r--r--arch/x86/kernel/numaq_32.c307
-rw-r--r--arch/x86/kernel/paravirt-spinlocks.c10
-rw-r--r--arch/x86/kernel/paravirt.c55
-rw-r--r--arch/x86/kernel/paravirt_patch_32.c12
-rw-r--r--arch/x86/kernel/paravirt_patch_64.c15
-rw-r--r--arch/x86/kernel/probe_32.c411
-rw-r--r--arch/x86/kernel/probe_roms_32.c2
-rw-r--r--arch/x86/kernel/process.c2
-rw-r--r--arch/x86/kernel/process_32.c6
-rw-r--r--arch/x86/kernel/process_64.c43
-rw-r--r--arch/x86/kernel/reboot.c5
-rw-r--r--arch/x86/kernel/setup.c32
-rw-r--r--arch/x86/kernel/setup_percpu.c412
-rw-r--r--arch/x86/kernel/signal.c291
-rw-r--r--arch/x86/kernel/smp.c15
-rw-r--r--arch/x86/kernel/smpboot.c117
-rw-r--r--arch/x86/kernel/smpcommon.c30
-rw-r--r--arch/x86/kernel/stacktrace.c2
-rw-r--r--arch/x86/kernel/summit_32.c416
-rw-r--r--arch/x86/kernel/time_32.c2
-rw-r--r--arch/x86/kernel/tlb_32.c256
-rw-r--r--arch/x86/kernel/tlb_uv.c72
-rw-r--r--arch/x86/kernel/traps.c3
-rw-r--r--arch/x86/kernel/tsc.c2
-rw-r--r--arch/x86/kernel/visws_quirks.c6
-rw-r--r--arch/x86/kernel/vmi_32.c9
-rw-r--r--arch/x86/kernel/vmiclock_32.c2
-rw-r--r--arch/x86/kernel/vmlinux_32.lds.S9
-rw-r--r--arch/x86/kernel/vmlinux_64.lds.S27
-rw-r--r--arch/x86/kernel/vsmp_64.c12
-rw-r--r--arch/x86/kernel/x8664_ksyms_64.c2
-rw-r--r--arch/x86/lguest/boot.c13
-rw-r--r--arch/x86/mach-default/Makefile5
-rw-r--r--arch/x86/mach-default/setup.c174
-rw-r--r--arch/x86/mach-generic/Makefile11
-rw-r--r--arch/x86/mach-generic/bigsmp.c60
-rw-r--r--arch/x86/mach-generic/default.c27
-rw-r--r--arch/x86/mach-generic/es7000.c103
-rw-r--r--arch/x86/mach-generic/numaq.c53
-rw-r--r--arch/x86/mach-generic/probe.c152
-rw-r--r--arch/x86/mach-generic/summit.c40
-rw-r--r--arch/x86/mach-rdc321x/Makefile5
-rw-r--r--arch/x86/mach-rdc321x/gpio.c194
-rw-r--r--arch/x86/mach-rdc321x/platform.c69
-rw-r--r--arch/x86/mach-voyager/setup.c1
-rw-r--r--arch/x86/mach-voyager/voyager_smp.c9
-rw-r--r--arch/x86/mm/Makefile2
-rw-r--r--arch/x86/mm/extable.c6
-rw-r--r--arch/x86/mm/fault.c447
-rw-r--r--arch/x86/mm/init_32.c1
-rw-r--r--arch/x86/mm/ioremap.c2
-rw-r--r--arch/x86/mm/mmap.c2
-rw-r--r--arch/x86/mm/numa_64.c217
-rw-r--r--arch/x86/mm/pat.c31
-rw-r--r--arch/x86/mm/srat_64.c1
-rw-r--r--arch/x86/mm/tlb.c (renamed from arch/x86/kernel/tlb_64.c)124
-rw-r--r--arch/x86/pci/numaq_32.c6
-rw-r--r--arch/x86/pci/pcbios.c2
-rw-r--r--arch/x86/xen/Makefile3
-rw-r--r--arch/x86/xen/enlighten.c772
-rw-r--r--arch/x86/xen/irq.c39
-rw-r--r--arch/x86/xen/mmu.c753
-rw-r--r--arch/x86/xen/mmu.h3
-rw-r--r--arch/x86/xen/multicalls.h2
-rw-r--r--arch/x86/xen/smp.c41
-rw-r--r--arch/x86/xen/suspend.c1
-rw-r--r--arch/x86/xen/xen-asm.S140
-rw-r--r--arch/x86/xen/xen-asm.h12
-rw-r--r--arch/x86/xen/xen-asm_32.S111
-rw-r--r--arch/x86/xen/xen-asm_64.S147
-rw-r--r--arch/x86/xen/xen-ops.h10
212 files changed, 7238 insertions, 7808 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 9c39095b33fc..148c112c9ca4 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -5,7 +5,7 @@ mainmenu "Linux Kernel Configuration for x86"
5config 64BIT 5config 64BIT
6 bool "64-bit kernel" if ARCH = "x86" 6 bool "64-bit kernel" if ARCH = "x86"
7 default ARCH = "x86_64" 7 default ARCH = "x86_64"
8 help 8 ---help---
9 Say yes to build a 64-bit kernel - formerly known as x86_64 9 Say yes to build a 64-bit kernel - formerly known as x86_64
10 Say no to build a 32-bit kernel - formerly known as i386 10 Say no to build a 32-bit kernel - formerly known as i386
11 11
@@ -34,8 +34,8 @@ config X86
34 select HAVE_FUNCTION_TRACER 34 select HAVE_FUNCTION_TRACER
35 select HAVE_FUNCTION_GRAPH_TRACER 35 select HAVE_FUNCTION_GRAPH_TRACER
36 select HAVE_FUNCTION_TRACE_MCOUNT_TEST 36 select HAVE_FUNCTION_TRACE_MCOUNT_TEST
37 select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64) 37 select HAVE_KVM
38 select HAVE_ARCH_KGDB if !X86_VOYAGER 38 select HAVE_ARCH_KGDB
39 select HAVE_ARCH_TRACEHOOK 39 select HAVE_ARCH_TRACEHOOK
40 select HAVE_GENERIC_DMA_COHERENT if X86_32 40 select HAVE_GENERIC_DMA_COHERENT if X86_32
41 select HAVE_EFFICIENT_UNALIGNED_ACCESS 41 select HAVE_EFFICIENT_UNALIGNED_ACCESS
@@ -133,18 +133,16 @@ config ARCH_HAS_CACHE_LINE_SIZE
133 def_bool y 133 def_bool y
134 134
135config HAVE_SETUP_PER_CPU_AREA 135config HAVE_SETUP_PER_CPU_AREA
136 def_bool X86_64_SMP || (X86_SMP && !X86_VOYAGER) 136 def_bool y
137 137
138config HAVE_CPUMASK_OF_CPU_MAP 138config HAVE_CPUMASK_OF_CPU_MAP
139 def_bool X86_64_SMP 139 def_bool X86_64_SMP
140 140
141config ARCH_HIBERNATION_POSSIBLE 141config ARCH_HIBERNATION_POSSIBLE
142 def_bool y 142 def_bool y
143 depends on !SMP || !X86_VOYAGER
144 143
145config ARCH_SUSPEND_POSSIBLE 144config ARCH_SUSPEND_POSSIBLE
146 def_bool y 145 def_bool y
147 depends on !X86_VOYAGER
148 146
149config ZONE_DMA32 147config ZONE_DMA32
150 bool 148 bool
@@ -174,11 +172,6 @@ config GENERIC_PENDING_IRQ
174 depends on GENERIC_HARDIRQS && SMP 172 depends on GENERIC_HARDIRQS && SMP
175 default y 173 default y
176 174
177config X86_SMP
178 bool
179 depends on SMP && ((X86_32 && !X86_VOYAGER) || X86_64)
180 default y
181
182config USE_GENERIC_SMP_HELPERS 175config USE_GENERIC_SMP_HELPERS
183 def_bool y 176 def_bool y
184 depends on SMP 177 depends on SMP
@@ -194,17 +187,11 @@ config X86_64_SMP
194config X86_HT 187config X86_HT
195 bool 188 bool
196 depends on SMP 189 depends on SMP
197 depends on (X86_32 && !X86_VOYAGER) || X86_64
198 default y
199
200config X86_BIOS_REBOOT
201 bool
202 depends on !X86_VOYAGER
203 default y 190 default y
204 191
205config X86_TRAMPOLINE 192config X86_TRAMPOLINE
206 bool 193 bool
207 depends on X86_SMP || (X86_VOYAGER && SMP) || (64BIT && ACPI_SLEEP) 194 depends on SMP || (64BIT && ACPI_SLEEP)
208 default y 195 default y
209 196
210config KTIME_SCALAR 197config KTIME_SCALAR
@@ -244,14 +231,10 @@ config SMP
244 231
245 If you don't know what to do here, say N. 232 If you don't know what to do here, say N.
246 233
247config X86_HAS_BOOT_CPU_ID
248 def_bool y
249 depends on X86_VOYAGER
250
251config SPARSE_IRQ 234config SPARSE_IRQ
252 bool "Support sparse irq numbering" 235 bool "Support sparse irq numbering"
253 depends on PCI_MSI || HT_IRQ 236 depends on PCI_MSI || HT_IRQ
254 help 237 ---help---
255 This enables support for sparse irqs. This is useful for distro 238 This enables support for sparse irqs. This is useful for distro
256 kernels that want to define a high CONFIG_NR_CPUS value but still 239 kernels that want to define a high CONFIG_NR_CPUS value but still
257 want to have low kernel memory footprint on smaller machines. 240 want to have low kernel memory footprint on smaller machines.
@@ -265,137 +248,154 @@ config NUMA_MIGRATE_IRQ_DESC
265 bool "Move irq desc when changing irq smp_affinity" 248 bool "Move irq desc when changing irq smp_affinity"
266 depends on SPARSE_IRQ && NUMA 249 depends on SPARSE_IRQ && NUMA
267 default n 250 default n
268 help 251 ---help---
269 This enables moving irq_desc to cpu/node that irq will use handled. 252 This enables moving irq_desc to cpu/node that irq will use handled.
270 253
271 If you don't know what to do here, say N. 254 If you don't know what to do here, say N.
272 255
273config X86_FIND_SMP_CONFIG
274 def_bool y
275 depends on X86_MPPARSE || X86_VOYAGER
276
277config X86_MPPARSE 256config X86_MPPARSE
278 bool "Enable MPS table" if ACPI 257 bool "Enable MPS table" if ACPI
279 default y 258 default y
280 depends on X86_LOCAL_APIC 259 depends on X86_LOCAL_APIC
281 help 260 ---help---
282 For old smp systems that do not have proper acpi support. Newer systems 261 For old smp systems that do not have proper acpi support. Newer systems
283 (esp with 64bit cpus) with acpi support, MADT and DSDT will override it 262 (esp with 64bit cpus) with acpi support, MADT and DSDT will override it
284 263
285choice 264config X86_BIGSMP
286 prompt "Subarchitecture Type" 265 bool "Support for big SMP systems with more than 8 CPUs"
287 default X86_PC 266 depends on X86_32 && SMP
267 ---help---
268 This option is needed for the systems that have more than 8 CPUs
288 269
289config X86_PC 270config X86_EXTENDED_PLATFORM
290 bool "PC-compatible" 271 bool "Support for extended (non-PC) x86 platforms"
291 help 272 default y
292 Choose this option if your computer is a standard PC or compatible. 273 ---help---
274 If you disable this option then the kernel will only support
275 standard PC platforms. (which covers the vast majority of
276 systems out there.)
277
278 If you enable this option then you'll be able to select a number
279 of non-PC x86 platforms.
280
281 If you have one of these systems, or if you want to build a
282 generic distribution kernel, say Y here - otherwise say N.
283
284# This is an alphabetically sorted list of 64 bit extended platforms
285# Please maintain the alphabetic order if and when there are additions
286
287config X86_VSMP
288 bool "ScaleMP vSMP"
289 select PARAVIRT
290 depends on X86_64 && PCI
291 depends on X86_EXTENDED_PLATFORM
292 ---help---
293 Support for ScaleMP vSMP systems. Say 'Y' here if this kernel is
294 supposed to run on these EM64T-based machines. Only choose this option
295 if you have one of these machines.
296
297config X86_UV
298 bool "SGI Ultraviolet"
299 depends on X86_64
300 depends on X86_EXTENDED_PLATFORM
301 ---help---
302 This option is needed in order to support SGI Ultraviolet systems.
303 If you don't have one of these, you should say N here.
304
305# Following is an alphabetically sorted list of 32 bit extended platforms
306# Please maintain the alphabetic order if and when there are additions
293 307
294config X86_ELAN 308config X86_ELAN
295 bool "AMD Elan" 309 bool "AMD Elan"
296 depends on X86_32 310 depends on X86_32
297 help 311 depends on X86_EXTENDED_PLATFORM
312 ---help---
298 Select this for an AMD Elan processor. 313 Select this for an AMD Elan processor.
299 314
300 Do not use this option for K6/Athlon/Opteron processors! 315 Do not use this option for K6/Athlon/Opteron processors!
301 316
302 If unsure, choose "PC-compatible" instead. 317 If unsure, choose "PC-compatible" instead.
303 318
304config X86_VOYAGER 319config X86_RDC321X
305 bool "Voyager (NCR)" 320 bool "RDC R-321x SoC"
306 depends on X86_32 && (SMP || BROKEN) && !PCI
307 help
308 Voyager is an MCA-based 32-way capable SMP architecture proprietary
309 to NCR Corp. Machine classes 345x/35xx/4100/51xx are Voyager-based.
310
311 *** WARNING ***
312
313 If you do not specifically know you have a Voyager based machine,
314 say N here, otherwise the kernel you build will not be bootable.
315
316config X86_GENERICARCH
317 bool "Generic architecture"
318 depends on X86_32 321 depends on X86_32
319 help 322 depends on X86_EXTENDED_PLATFORM
320 This option compiles in the NUMAQ, Summit, bigsmp, ES7000, default 323 select M486
324 select X86_REBOOTFIXUPS
325 ---help---
326 This option is needed for RDC R-321x system-on-chip, also known
327 as R-8610-(G).
328 If you don't have one of these chips, you should say N here.
329
330config X86_32_NON_STANDARD
331 bool "Support non-standard 32-bit SMP architectures"
332 depends on X86_32 && SMP
333 depends on X86_EXTENDED_PLATFORM
334 ---help---
335 This option compiles in the NUMAQ, Summit, bigsmp, ES7000, default
321 subarchitectures. It is intended for a generic binary kernel. 336 subarchitectures. It is intended for a generic binary kernel.
322 if you select them all, kernel will probe it one by one. and will 337 if you select them all, kernel will probe it one by one. and will
323 fallback to default. 338 fallback to default.
324 339
325if X86_GENERICARCH 340# Alphabetically sorted list of Non standard 32 bit platforms
326 341
327config X86_NUMAQ 342config X86_NUMAQ
328 bool "NUMAQ (IBM/Sequent)" 343 bool "NUMAQ (IBM/Sequent)"
329 depends on SMP && X86_32 && PCI && X86_MPPARSE 344 depends on X86_32_NON_STANDARD
330 select NUMA 345 select NUMA
331 help 346 select X86_MPPARSE
347 ---help---
332 This option is used for getting Linux to run on a NUMAQ (IBM/Sequent) 348 This option is used for getting Linux to run on a NUMAQ (IBM/Sequent)
333 NUMA multiquad box. This changes the way that processors are 349 NUMA multiquad box. This changes the way that processors are
334 bootstrapped, and uses Clustered Logical APIC addressing mode instead 350 bootstrapped, and uses Clustered Logical APIC addressing mode instead
335 of Flat Logical. You will need a new lynxer.elf file to flash your 351 of Flat Logical. You will need a new lynxer.elf file to flash your
336 firmware with - send email to <Martin.Bligh@us.ibm.com>. 352 firmware with - send email to <Martin.Bligh@us.ibm.com>.
337 353
354config X86_VISWS
355 bool "SGI 320/540 (Visual Workstation)"
356 depends on X86_32 && PCI && X86_MPPARSE && PCI_GODIRECT
357 depends on X86_32_NON_STANDARD
358 ---help---
359 The SGI Visual Workstation series is an IA32-based workstation
360 based on SGI systems chips with some legacy PC hardware attached.
361
362 Say Y here to create a kernel to run on the SGI 320 or 540.
363
364 A kernel compiled for the Visual Workstation will run on general
365 PCs as well. See <file:Documentation/sgi-visws.txt> for details.
366
338config X86_SUMMIT 367config X86_SUMMIT
339 bool "Summit/EXA (IBM x440)" 368 bool "Summit/EXA (IBM x440)"
340 depends on X86_32 && SMP 369 depends on X86_32_NON_STANDARD
341 help 370 ---help---
342 This option is needed for IBM systems that use the Summit/EXA chipset. 371 This option is needed for IBM systems that use the Summit/EXA chipset.
343 In particular, it is needed for the x440. 372 In particular, it is needed for the x440.
344 373
345config X86_ES7000 374config X86_ES7000
346 bool "Support for Unisys ES7000 IA32 series" 375 bool "Unisys ES7000 IA32 series"
347 depends on X86_32 && SMP 376 depends on X86_32_NON_STANDARD && X86_BIGSMP
348 help 377 ---help---
349 Support for Unisys ES7000 systems. Say 'Y' here if this kernel is 378 Support for Unisys ES7000 systems. Say 'Y' here if this kernel is
350 supposed to run on an IA32-based Unisys ES7000 system. 379 supposed to run on an IA32-based Unisys ES7000 system.
351 380
352config X86_BIGSMP 381config X86_VOYAGER
353 bool "Support for big SMP systems with more than 8 CPUs" 382 bool "Voyager (NCR)"
354 depends on X86_32 && SMP 383 depends on SMP && !PCI && BROKEN
355 help 384 depends on X86_32_NON_STANDARD
356 This option is needed for the systems that have more than 8 CPUs 385 ---help---
357 and if the system is not of any sub-arch type above. 386 Voyager is an MCA-based 32-way capable SMP architecture proprietary
358 387 to NCR Corp. Machine classes 345x/35xx/4100/51xx are Voyager-based.
359endif
360
361config X86_VSMP
362 bool "Support for ScaleMP vSMP"
363 select PARAVIRT
364 depends on X86_64 && PCI
365 help
366 Support for ScaleMP vSMP systems. Say 'Y' here if this kernel is
367 supposed to run on these EM64T-based machines. Only choose this option
368 if you have one of these machines.
369
370endchoice
371
372config X86_VISWS
373 bool "SGI 320/540 (Visual Workstation)"
374 depends on X86_32 && PCI && !X86_VOYAGER && X86_MPPARSE && PCI_GODIRECT
375 help
376 The SGI Visual Workstation series is an IA32-based workstation
377 based on SGI systems chips with some legacy PC hardware attached.
378
379 Say Y here to create a kernel to run on the SGI 320 or 540.
380 388
381 A kernel compiled for the Visual Workstation will run on general 389 *** WARNING ***
382 PCs as well. See <file:Documentation/sgi-visws.txt> for details.
383 390
384config X86_RDC321X 391 If you do not specifically know you have a Voyager based machine,
385 bool "RDC R-321x SoC" 392 say N here, otherwise the kernel you build will not be bootable.
386 depends on X86_32
387 select M486
388 select X86_REBOOTFIXUPS
389 help
390 This option is needed for RDC R-321x system-on-chip, also known
391 as R-8610-(G).
392 If you don't have one of these chips, you should say N here.
393 393
394config SCHED_OMIT_FRAME_POINTER 394config SCHED_OMIT_FRAME_POINTER
395 def_bool y 395 def_bool y
396 prompt "Single-depth WCHAN output" 396 prompt "Single-depth WCHAN output"
397 depends on X86 397 depends on X86
398 help 398 ---help---
399 Calculate simpler /proc/<PID>/wchan values. If this option 399 Calculate simpler /proc/<PID>/wchan values. If this option
400 is disabled then wchan values will recurse back to the 400 is disabled then wchan values will recurse back to the
401 caller function. This provides more accurate wchan values, 401 caller function. This provides more accurate wchan values,
@@ -405,7 +405,7 @@ config SCHED_OMIT_FRAME_POINTER
405 405
406menuconfig PARAVIRT_GUEST 406menuconfig PARAVIRT_GUEST
407 bool "Paravirtualized guest support" 407 bool "Paravirtualized guest support"
408 help 408 ---help---
409 Say Y here to get to see options related to running Linux under 409 Say Y here to get to see options related to running Linux under
410 various hypervisors. This option alone does not add any kernel code. 410 various hypervisors. This option alone does not add any kernel code.
411 411
@@ -419,8 +419,7 @@ config VMI
419 bool "VMI Guest support" 419 bool "VMI Guest support"
420 select PARAVIRT 420 select PARAVIRT
421 depends on X86_32 421 depends on X86_32
422 depends on !X86_VOYAGER 422 ---help---
423 help
424 VMI provides a paravirtualized interface to the VMware ESX server 423 VMI provides a paravirtualized interface to the VMware ESX server
425 (it could be used by other hypervisors in theory too, but is not 424 (it could be used by other hypervisors in theory too, but is not
426 at the moment), by linking the kernel to a GPL-ed ROM module 425 at the moment), by linking the kernel to a GPL-ed ROM module
@@ -430,8 +429,7 @@ config KVM_CLOCK
430 bool "KVM paravirtualized clock" 429 bool "KVM paravirtualized clock"
431 select PARAVIRT 430 select PARAVIRT
432 select PARAVIRT_CLOCK 431 select PARAVIRT_CLOCK
433 depends on !X86_VOYAGER 432 ---help---
434 help
435 Turning on this option will allow you to run a paravirtualized clock 433 Turning on this option will allow you to run a paravirtualized clock
436 when running over the KVM hypervisor. Instead of relying on a PIT 434 when running over the KVM hypervisor. Instead of relying on a PIT
437 (or probably other) emulation by the underlying device model, the host 435 (or probably other) emulation by the underlying device model, the host
@@ -441,17 +439,15 @@ config KVM_CLOCK
441config KVM_GUEST 439config KVM_GUEST
442 bool "KVM Guest support" 440 bool "KVM Guest support"
443 select PARAVIRT 441 select PARAVIRT
444 depends on !X86_VOYAGER 442 ---help---
445 help 443 This option enables various optimizations for running under the KVM
446 This option enables various optimizations for running under the KVM 444 hypervisor.
447 hypervisor.
448 445
449source "arch/x86/lguest/Kconfig" 446source "arch/x86/lguest/Kconfig"
450 447
451config PARAVIRT 448config PARAVIRT
452 bool "Enable paravirtualization code" 449 bool "Enable paravirtualization code"
453 depends on !X86_VOYAGER 450 ---help---
454 help
455 This changes the kernel so it can modify itself when it is run 451 This changes the kernel so it can modify itself when it is run
456 under a hypervisor, potentially improving performance significantly 452 under a hypervisor, potentially improving performance significantly
457 over full virtualization. However, when run without a hypervisor 453 over full virtualization. However, when run without a hypervisor
@@ -464,51 +460,51 @@ config PARAVIRT_CLOCK
464endif 460endif
465 461
466config PARAVIRT_DEBUG 462config PARAVIRT_DEBUG
467 bool "paravirt-ops debugging" 463 bool "paravirt-ops debugging"
468 depends on PARAVIRT && DEBUG_KERNEL 464 depends on PARAVIRT && DEBUG_KERNEL
469 help 465 ---help---
470 Enable to debug paravirt_ops internals. Specifically, BUG if 466 Enable to debug paravirt_ops internals. Specifically, BUG if
471 a paravirt_op is missing when it is called. 467 a paravirt_op is missing when it is called.
472 468
473config MEMTEST 469config MEMTEST
474 bool "Memtest" 470 bool "Memtest"
475 help 471 ---help---
476 This option adds a kernel parameter 'memtest', which allows memtest 472 This option adds a kernel parameter 'memtest', which allows memtest
477 to be set. 473 to be set.
478 memtest=0, mean disabled; -- default 474 memtest=0, mean disabled; -- default
479 memtest=1, mean do 1 test pattern; 475 memtest=1, mean do 1 test pattern;
480 ... 476 ...
481 memtest=4, mean do 4 test patterns. 477 memtest=4, mean do 4 test patterns.
482 If you are unsure how to answer this question, answer N. 478 If you are unsure how to answer this question, answer N.
483 479
484config X86_SUMMIT_NUMA 480config X86_SUMMIT_NUMA
485 def_bool y 481 def_bool y
486 depends on X86_32 && NUMA && X86_GENERICARCH 482 depends on X86_32 && NUMA && X86_32_NON_STANDARD
487 483
488config X86_CYCLONE_TIMER 484config X86_CYCLONE_TIMER
489 def_bool y 485 def_bool y
490 depends on X86_GENERICARCH 486 depends on X86_32_NON_STANDARD
491 487
492source "arch/x86/Kconfig.cpu" 488source "arch/x86/Kconfig.cpu"
493 489
494config HPET_TIMER 490config HPET_TIMER
495 def_bool X86_64 491 def_bool X86_64
496 prompt "HPET Timer Support" if X86_32 492 prompt "HPET Timer Support" if X86_32
497 help 493 ---help---
498 Use the IA-PC HPET (High Precision Event Timer) to manage 494 Use the IA-PC HPET (High Precision Event Timer) to manage
499 time in preference to the PIT and RTC, if a HPET is 495 time in preference to the PIT and RTC, if a HPET is
500 present. 496 present.
501 HPET is the next generation timer replacing legacy 8254s. 497 HPET is the next generation timer replacing legacy 8254s.
502 The HPET provides a stable time base on SMP 498 The HPET provides a stable time base on SMP
503 systems, unlike the TSC, but it is more expensive to access, 499 systems, unlike the TSC, but it is more expensive to access,
504 as it is off-chip. You can find the HPET spec at 500 as it is off-chip. You can find the HPET spec at
505 <http://www.intel.com/hardwaredesign/hpetspec_1.pdf>. 501 <http://www.intel.com/hardwaredesign/hpetspec_1.pdf>.
506 502
507 You can safely choose Y here. However, HPET will only be 503 You can safely choose Y here. However, HPET will only be
508 activated if the platform and the BIOS support this feature. 504 activated if the platform and the BIOS support this feature.
509 Otherwise the 8254 will be used for timing services. 505 Otherwise the 8254 will be used for timing services.
510 506
511 Choose N to continue using the legacy 8254 timer. 507 Choose N to continue using the legacy 8254 timer.
512 508
513config HPET_EMULATE_RTC 509config HPET_EMULATE_RTC
514 def_bool y 510 def_bool y
@@ -519,7 +515,7 @@ config HPET_EMULATE_RTC
519config DMI 515config DMI
520 default y 516 default y
521 bool "Enable DMI scanning" if EMBEDDED 517 bool "Enable DMI scanning" if EMBEDDED
522 help 518 ---help---
523 Enabled scanning of DMI to identify machine quirks. Say Y 519 Enabled scanning of DMI to identify machine quirks. Say Y
524 here unless you have verified that your setup is not 520 here unless you have verified that your setup is not
525 affected by entries in the DMI blacklist. Required by PNP 521 affected by entries in the DMI blacklist. Required by PNP
@@ -531,7 +527,7 @@ config GART_IOMMU
531 select SWIOTLB 527 select SWIOTLB
532 select AGP 528 select AGP
533 depends on X86_64 && PCI 529 depends on X86_64 && PCI
534 help 530 ---help---
535 Support for full DMA access of devices with 32bit memory access only 531 Support for full DMA access of devices with 32bit memory access only
536 on systems with more than 3GB. This is usually needed for USB, 532 on systems with more than 3GB. This is usually needed for USB,
537 sound, many IDE/SATA chipsets and some other devices. 533 sound, many IDE/SATA chipsets and some other devices.
@@ -546,7 +542,7 @@ config CALGARY_IOMMU
546 bool "IBM Calgary IOMMU support" 542 bool "IBM Calgary IOMMU support"
547 select SWIOTLB 543 select SWIOTLB
548 depends on X86_64 && PCI && EXPERIMENTAL 544 depends on X86_64 && PCI && EXPERIMENTAL
549 help 545 ---help---
550 Support for hardware IOMMUs in IBM's xSeries x366 and x460 546 Support for hardware IOMMUs in IBM's xSeries x366 and x460
551 systems. Needed to run systems with more than 3GB of memory 547 systems. Needed to run systems with more than 3GB of memory
552 properly with 32-bit PCI devices that do not support DAC 548 properly with 32-bit PCI devices that do not support DAC
@@ -564,7 +560,7 @@ config CALGARY_IOMMU_ENABLED_BY_DEFAULT
564 def_bool y 560 def_bool y
565 prompt "Should Calgary be enabled by default?" 561 prompt "Should Calgary be enabled by default?"
566 depends on CALGARY_IOMMU 562 depends on CALGARY_IOMMU
567 help 563 ---help---
568 Should Calgary be enabled by default? if you choose 'y', Calgary 564 Should Calgary be enabled by default? if you choose 'y', Calgary
569 will be used (if it exists). If you choose 'n', Calgary will not be 565 will be used (if it exists). If you choose 'n', Calgary will not be
570 used even if it exists. If you choose 'n' and would like to use 566 used even if it exists. If you choose 'n' and would like to use
@@ -576,7 +572,7 @@ config AMD_IOMMU
576 select SWIOTLB 572 select SWIOTLB
577 select PCI_MSI 573 select PCI_MSI
578 depends on X86_64 && PCI && ACPI 574 depends on X86_64 && PCI && ACPI
579 help 575 ---help---
580 With this option you can enable support for AMD IOMMU hardware in 576 With this option you can enable support for AMD IOMMU hardware in
581 your system. An IOMMU is a hardware component which provides 577 your system. An IOMMU is a hardware component which provides
582 remapping of DMA memory accesses from devices. With an AMD IOMMU you 578 remapping of DMA memory accesses from devices. With an AMD IOMMU you
@@ -591,7 +587,7 @@ config AMD_IOMMU_STATS
591 bool "Export AMD IOMMU statistics to debugfs" 587 bool "Export AMD IOMMU statistics to debugfs"
592 depends on AMD_IOMMU 588 depends on AMD_IOMMU
593 select DEBUG_FS 589 select DEBUG_FS
594 help 590 ---help---
595 This option enables code in the AMD IOMMU driver to collect various 591 This option enables code in the AMD IOMMU driver to collect various
596 statistics about whats happening in the driver and exports that 592 statistics about whats happening in the driver and exports that
597 information to userspace via debugfs. 593 information to userspace via debugfs.
@@ -600,7 +596,7 @@ config AMD_IOMMU_STATS
600# need this always selected by IOMMU for the VIA workaround 596# need this always selected by IOMMU for the VIA workaround
601config SWIOTLB 597config SWIOTLB
602 def_bool y if X86_64 598 def_bool y if X86_64
603 help 599 ---help---
604 Support for software bounce buffers used on x86-64 systems 600 Support for software bounce buffers used on x86-64 systems
605 which don't have a hardware IOMMU (e.g. the current generation 601 which don't have a hardware IOMMU (e.g. the current generation
606 of Intel's x86-64 CPUs). Using this PCI devices which can only 602 of Intel's x86-64 CPUs). Using this PCI devices which can only
@@ -618,7 +614,7 @@ config MAXSMP
618 depends on X86_64 && SMP && DEBUG_KERNEL && EXPERIMENTAL 614 depends on X86_64 && SMP && DEBUG_KERNEL && EXPERIMENTAL
619 select CPUMASK_OFFSTACK 615 select CPUMASK_OFFSTACK
620 default n 616 default n
621 help 617 ---help---
622 Configure maximum number of CPUS and NUMA Nodes for this architecture. 618 Configure maximum number of CPUS and NUMA Nodes for this architecture.
623 If unsure, say N. 619 If unsure, say N.
624 620
@@ -629,7 +625,7 @@ config NR_CPUS
629 default "4096" if MAXSMP 625 default "4096" if MAXSMP
630 default "32" if SMP && (X86_NUMAQ || X86_SUMMIT || X86_BIGSMP || X86_ES7000) 626 default "32" if SMP && (X86_NUMAQ || X86_SUMMIT || X86_BIGSMP || X86_ES7000)
631 default "8" if SMP 627 default "8" if SMP
632 help 628 ---help---
633 This allows you to specify the maximum number of CPUs which this 629 This allows you to specify the maximum number of CPUs which this
634 kernel will support. The maximum supported value is 512 and the 630 kernel will support. The maximum supported value is 512 and the
635 minimum value which makes sense is 2. 631 minimum value which makes sense is 2.
@@ -640,7 +636,7 @@ config NR_CPUS
640config SCHED_SMT 636config SCHED_SMT
641 bool "SMT (Hyperthreading) scheduler support" 637 bool "SMT (Hyperthreading) scheduler support"
642 depends on X86_HT 638 depends on X86_HT
643 help 639 ---help---
644 SMT scheduler support improves the CPU scheduler's decision making 640 SMT scheduler support improves the CPU scheduler's decision making
645 when dealing with Intel Pentium 4 chips with HyperThreading at a 641 when dealing with Intel Pentium 4 chips with HyperThreading at a
646 cost of slightly increased overhead in some places. If unsure say 642 cost of slightly increased overhead in some places. If unsure say
@@ -650,7 +646,7 @@ config SCHED_MC
650 def_bool y 646 def_bool y
651 prompt "Multi-core scheduler support" 647 prompt "Multi-core scheduler support"
652 depends on X86_HT 648 depends on X86_HT
653 help 649 ---help---
654 Multi-core scheduler support improves the CPU scheduler's decision 650 Multi-core scheduler support improves the CPU scheduler's decision
655 making when dealing with multi-core CPU chips at a cost of slightly 651 making when dealing with multi-core CPU chips at a cost of slightly
656 increased overhead in some places. If unsure say N here. 652 increased overhead in some places. If unsure say N here.
@@ -659,8 +655,8 @@ source "kernel/Kconfig.preempt"
659 655
660config X86_UP_APIC 656config X86_UP_APIC
661 bool "Local APIC support on uniprocessors" 657 bool "Local APIC support on uniprocessors"
662 depends on X86_32 && !SMP && !(X86_VOYAGER || X86_GENERICARCH) 658 depends on X86_32 && !SMP && !X86_32_NON_STANDARD
663 help 659 ---help---
664 A local APIC (Advanced Programmable Interrupt Controller) is an 660 A local APIC (Advanced Programmable Interrupt Controller) is an
665 integrated interrupt controller in the CPU. If you have a single-CPU 661 integrated interrupt controller in the CPU. If you have a single-CPU
666 system which has a processor with a local APIC, you can say Y here to 662 system which has a processor with a local APIC, you can say Y here to
@@ -673,7 +669,7 @@ config X86_UP_APIC
673config X86_UP_IOAPIC 669config X86_UP_IOAPIC
674 bool "IO-APIC support on uniprocessors" 670 bool "IO-APIC support on uniprocessors"
675 depends on X86_UP_APIC 671 depends on X86_UP_APIC
676 help 672 ---help---
677 An IO-APIC (I/O Advanced Programmable Interrupt Controller) is an 673 An IO-APIC (I/O Advanced Programmable Interrupt Controller) is an
678 SMP-capable replacement for PC-style interrupt controllers. Most 674 SMP-capable replacement for PC-style interrupt controllers. Most
679 SMP systems and many recent uniprocessor systems have one. 675 SMP systems and many recent uniprocessor systems have one.
@@ -684,11 +680,11 @@ config X86_UP_IOAPIC
684 680
685config X86_LOCAL_APIC 681config X86_LOCAL_APIC
686 def_bool y 682 def_bool y
687 depends on X86_64 || (X86_32 && (X86_UP_APIC || (SMP && !X86_VOYAGER) || X86_GENERICARCH)) 683 depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC
688 684
689config X86_IO_APIC 685config X86_IO_APIC
690 def_bool y 686 def_bool y
691 depends on X86_64 || (X86_32 && (X86_UP_IOAPIC || (SMP && !X86_VOYAGER) || X86_GENERICARCH)) 687 depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC
692 688
693config X86_VISWS_APIC 689config X86_VISWS_APIC
694 def_bool y 690 def_bool y
@@ -698,7 +694,7 @@ config X86_REROUTE_FOR_BROKEN_BOOT_IRQS
698 bool "Reroute for broken boot IRQs" 694 bool "Reroute for broken boot IRQs"
699 default n 695 default n
700 depends on X86_IO_APIC 696 depends on X86_IO_APIC
701 help 697 ---help---
702 This option enables a workaround that fixes a source of 698 This option enables a workaround that fixes a source of
703 spurious interrupts. This is recommended when threaded 699 spurious interrupts. This is recommended when threaded
704 interrupt handling is used on systems where the generation of 700 interrupt handling is used on systems where the generation of
@@ -720,7 +716,6 @@ config X86_REROUTE_FOR_BROKEN_BOOT_IRQS
720 716
721config X86_MCE 717config X86_MCE
722 bool "Machine Check Exception" 718 bool "Machine Check Exception"
723 depends on !X86_VOYAGER
724 ---help--- 719 ---help---
725 Machine Check Exception support allows the processor to notify the 720 Machine Check Exception support allows the processor to notify the
726 kernel if it detects a problem (e.g. overheating, component failure). 721 kernel if it detects a problem (e.g. overheating, component failure).
@@ -739,7 +734,7 @@ config X86_MCE_INTEL
739 def_bool y 734 def_bool y
740 prompt "Intel MCE features" 735 prompt "Intel MCE features"
741 depends on X86_64 && X86_MCE && X86_LOCAL_APIC 736 depends on X86_64 && X86_MCE && X86_LOCAL_APIC
742 help 737 ---help---
743 Additional support for intel specific MCE features such as 738 Additional support for intel specific MCE features such as
744 the thermal monitor. 739 the thermal monitor.
745 740
@@ -747,14 +742,14 @@ config X86_MCE_AMD
747 def_bool y 742 def_bool y
748 prompt "AMD MCE features" 743 prompt "AMD MCE features"
749 depends on X86_64 && X86_MCE && X86_LOCAL_APIC 744 depends on X86_64 && X86_MCE && X86_LOCAL_APIC
750 help 745 ---help---
751 Additional support for AMD specific MCE features such as 746 Additional support for AMD specific MCE features such as
752 the DRAM Error Threshold. 747 the DRAM Error Threshold.
753 748
754config X86_MCE_NONFATAL 749config X86_MCE_NONFATAL
755 tristate "Check for non-fatal errors on AMD Athlon/Duron / Intel Pentium 4" 750 tristate "Check for non-fatal errors on AMD Athlon/Duron / Intel Pentium 4"
756 depends on X86_32 && X86_MCE 751 depends on X86_32 && X86_MCE
757 help 752 ---help---
758 Enabling this feature starts a timer that triggers every 5 seconds which 753 Enabling this feature starts a timer that triggers every 5 seconds which
759 will look at the machine check registers to see if anything happened. 754 will look at the machine check registers to see if anything happened.
760 Non-fatal problems automatically get corrected (but still logged). 755 Non-fatal problems automatically get corrected (but still logged).
@@ -767,7 +762,7 @@ config X86_MCE_NONFATAL
767config X86_MCE_P4THERMAL 762config X86_MCE_P4THERMAL
768 bool "check for P4 thermal throttling interrupt." 763 bool "check for P4 thermal throttling interrupt."
769 depends on X86_32 && X86_MCE && (X86_UP_APIC || SMP) 764 depends on X86_32 && X86_MCE && (X86_UP_APIC || SMP)
770 help 765 ---help---
771 Enabling this feature will cause a message to be printed when the P4 766 Enabling this feature will cause a message to be printed when the P4
772 enters thermal throttling. 767 enters thermal throttling.
773 768
@@ -775,11 +770,11 @@ config VM86
775 bool "Enable VM86 support" if EMBEDDED 770 bool "Enable VM86 support" if EMBEDDED
776 default y 771 default y
777 depends on X86_32 772 depends on X86_32
778 help 773 ---help---
779 This option is required by programs like DOSEMU to run 16-bit legacy 774 This option is required by programs like DOSEMU to run 16-bit legacy
780 code on X86 processors. It also may be needed by software like 775 code on X86 processors. It also may be needed by software like
781 XFree86 to initialize some video cards via BIOS. Disabling this 776 XFree86 to initialize some video cards via BIOS. Disabling this
782 option saves about 6k. 777 option saves about 6k.
783 778
784config TOSHIBA 779config TOSHIBA
785 tristate "Toshiba Laptop support" 780 tristate "Toshiba Laptop support"
@@ -853,33 +848,33 @@ config MICROCODE
853 module will be called microcode. 848 module will be called microcode.
854 849
855config MICROCODE_INTEL 850config MICROCODE_INTEL
856 bool "Intel microcode patch loading support" 851 bool "Intel microcode patch loading support"
857 depends on MICROCODE 852 depends on MICROCODE
858 default MICROCODE 853 default MICROCODE
859 select FW_LOADER 854 select FW_LOADER
860 --help--- 855 ---help---
861 This options enables microcode patch loading support for Intel 856 This options enables microcode patch loading support for Intel
862 processors. 857 processors.
863 858
864 For latest news and information on obtaining all the required 859 For latest news and information on obtaining all the required
865 Intel ingredients for this driver, check: 860 Intel ingredients for this driver, check:
866 <http://www.urbanmyth.org/microcode/>. 861 <http://www.urbanmyth.org/microcode/>.
867 862
868config MICROCODE_AMD 863config MICROCODE_AMD
869 bool "AMD microcode patch loading support" 864 bool "AMD microcode patch loading support"
870 depends on MICROCODE 865 depends on MICROCODE
871 select FW_LOADER 866 select FW_LOADER
872 --help--- 867 ---help---
873 If you select this option, microcode patch loading support for AMD 868 If you select this option, microcode patch loading support for AMD
874 processors will be enabled. 869 processors will be enabled.
875 870
876 config MICROCODE_OLD_INTERFACE 871config MICROCODE_OLD_INTERFACE
877 def_bool y 872 def_bool y
878 depends on MICROCODE 873 depends on MICROCODE
879 874
880config X86_MSR 875config X86_MSR
881 tristate "/dev/cpu/*/msr - Model-specific register support" 876 tristate "/dev/cpu/*/msr - Model-specific register support"
882 help 877 ---help---
883 This device gives privileged processes access to the x86 878 This device gives privileged processes access to the x86
884 Model-Specific Registers (MSRs). It is a character device with 879 Model-Specific Registers (MSRs). It is a character device with
885 major 202 and minors 0 to 31 for /dev/cpu/0/msr to /dev/cpu/31/msr. 880 major 202 and minors 0 to 31 for /dev/cpu/0/msr to /dev/cpu/31/msr.
@@ -888,7 +883,7 @@ config X86_MSR
888 883
889config X86_CPUID 884config X86_CPUID
890 tristate "/dev/cpu/*/cpuid - CPU information support" 885 tristate "/dev/cpu/*/cpuid - CPU information support"
891 help 886 ---help---
892 This device gives processes access to the x86 CPUID instruction to 887 This device gives processes access to the x86 CPUID instruction to
893 be executed on a specific processor. It is a character device 888 be executed on a specific processor. It is a character device
894 with major 203 and minors 0 to 31 for /dev/cpu/0/cpuid to 889 with major 203 and minors 0 to 31 for /dev/cpu/0/cpuid to
@@ -940,7 +935,7 @@ config NOHIGHMEM
940config HIGHMEM4G 935config HIGHMEM4G
941 bool "4GB" 936 bool "4GB"
942 depends on !X86_NUMAQ 937 depends on !X86_NUMAQ
943 help 938 ---help---
944 Select this if you have a 32-bit processor and between 1 and 4 939 Select this if you have a 32-bit processor and between 1 and 4
945 gigabytes of physical RAM. 940 gigabytes of physical RAM.
946 941
@@ -948,7 +943,7 @@ config HIGHMEM64G
948 bool "64GB" 943 bool "64GB"
949 depends on !M386 && !M486 944 depends on !M386 && !M486
950 select X86_PAE 945 select X86_PAE
951 help 946 ---help---
952 Select this if you have a 32-bit processor and more than 4 947 Select this if you have a 32-bit processor and more than 4
953 gigabytes of physical RAM. 948 gigabytes of physical RAM.
954 949
@@ -959,7 +954,7 @@ choice
959 prompt "Memory split" if EMBEDDED 954 prompt "Memory split" if EMBEDDED
960 default VMSPLIT_3G 955 default VMSPLIT_3G
961 depends on X86_32 956 depends on X86_32
962 help 957 ---help---
963 Select the desired split between kernel and user memory. 958 Select the desired split between kernel and user memory.
964 959
965 If the address range available to the kernel is less than the 960 If the address range available to the kernel is less than the
@@ -1005,20 +1000,20 @@ config HIGHMEM
1005config X86_PAE 1000config X86_PAE
1006 bool "PAE (Physical Address Extension) Support" 1001 bool "PAE (Physical Address Extension) Support"
1007 depends on X86_32 && !HIGHMEM4G 1002 depends on X86_32 && !HIGHMEM4G
1008 help 1003 ---help---
1009 PAE is required for NX support, and furthermore enables 1004 PAE is required for NX support, and furthermore enables
1010 larger swapspace support for non-overcommit purposes. It 1005 larger swapspace support for non-overcommit purposes. It
1011 has the cost of more pagetable lookup overhead, and also 1006 has the cost of more pagetable lookup overhead, and also
1012 consumes more pagetable space per process. 1007 consumes more pagetable space per process.
1013 1008
1014config ARCH_PHYS_ADDR_T_64BIT 1009config ARCH_PHYS_ADDR_T_64BIT
1015 def_bool X86_64 || X86_PAE 1010 def_bool X86_64 || X86_PAE
1016 1011
1017config DIRECT_GBPAGES 1012config DIRECT_GBPAGES
1018 bool "Enable 1GB pages for kernel pagetables" if EMBEDDED 1013 bool "Enable 1GB pages for kernel pagetables" if EMBEDDED
1019 default y 1014 default y
1020 depends on X86_64 1015 depends on X86_64
1021 help 1016 ---help---
1022 Allow the kernel linear mapping to use 1GB pages on CPUs that 1017 Allow the kernel linear mapping to use 1GB pages on CPUs that
1023 support it. This can improve the kernel's performance a tiny bit by 1018 support it. This can improve the kernel's performance a tiny bit by
1024 reducing TLB pressure. If in doubt, say "Y". 1019 reducing TLB pressure. If in doubt, say "Y".
@@ -1028,9 +1023,8 @@ config NUMA
1028 bool "Numa Memory Allocation and Scheduler Support" 1023 bool "Numa Memory Allocation and Scheduler Support"
1029 depends on SMP 1024 depends on SMP
1030 depends on X86_64 || (X86_32 && HIGHMEM64G && (X86_NUMAQ || X86_BIGSMP || X86_SUMMIT && ACPI) && EXPERIMENTAL) 1025 depends on X86_64 || (X86_32 && HIGHMEM64G && (X86_NUMAQ || X86_BIGSMP || X86_SUMMIT && ACPI) && EXPERIMENTAL)
1031 default n if X86_PC
1032 default y if (X86_NUMAQ || X86_SUMMIT || X86_BIGSMP) 1026 default y if (X86_NUMAQ || X86_SUMMIT || X86_BIGSMP)
1033 help 1027 ---help---
1034 Enable NUMA (Non Uniform Memory Access) support. 1028 Enable NUMA (Non Uniform Memory Access) support.
1035 1029
1036 The kernel will try to allocate memory used by a CPU on the 1030 The kernel will try to allocate memory used by a CPU on the
@@ -1053,19 +1047,19 @@ config K8_NUMA
1053 def_bool y 1047 def_bool y
1054 prompt "Old style AMD Opteron NUMA detection" 1048 prompt "Old style AMD Opteron NUMA detection"
1055 depends on X86_64 && NUMA && PCI 1049 depends on X86_64 && NUMA && PCI
1056 help 1050 ---help---
1057 Enable K8 NUMA node topology detection. You should say Y here if 1051 Enable K8 NUMA node topology detection. You should say Y here if
1058 you have a multi processor AMD K8 system. This uses an old 1052 you have a multi processor AMD K8 system. This uses an old
1059 method to read the NUMA configuration directly from the builtin 1053 method to read the NUMA configuration directly from the builtin
1060 Northbridge of Opteron. It is recommended to use X86_64_ACPI_NUMA 1054 Northbridge of Opteron. It is recommended to use X86_64_ACPI_NUMA
1061 instead, which also takes priority if both are compiled in. 1055 instead, which also takes priority if both are compiled in.
1062 1056
1063config X86_64_ACPI_NUMA 1057config X86_64_ACPI_NUMA
1064 def_bool y 1058 def_bool y
1065 prompt "ACPI NUMA detection" 1059 prompt "ACPI NUMA detection"
1066 depends on X86_64 && NUMA && ACPI && PCI 1060 depends on X86_64 && NUMA && ACPI && PCI
1067 select ACPI_NUMA 1061 select ACPI_NUMA
1068 help 1062 ---help---
1069 Enable ACPI SRAT based node topology detection. 1063 Enable ACPI SRAT based node topology detection.
1070 1064
1071# Some NUMA nodes have memory ranges that span 1065# Some NUMA nodes have memory ranges that span
@@ -1080,7 +1074,7 @@ config NODES_SPAN_OTHER_NODES
1080config NUMA_EMU 1074config NUMA_EMU
1081 bool "NUMA emulation" 1075 bool "NUMA emulation"
1082 depends on X86_64 && NUMA 1076 depends on X86_64 && NUMA
1083 help 1077 ---help---
1084 Enable NUMA emulation. A flat machine will be split 1078 Enable NUMA emulation. A flat machine will be split
1085 into virtual nodes when booted with "numa=fake=N", where N is the 1079 into virtual nodes when booted with "numa=fake=N", where N is the
1086 number of nodes. This is only useful for debugging. 1080 number of nodes. This is only useful for debugging.
@@ -1093,7 +1087,7 @@ config NODES_SHIFT
1093 default "4" if X86_NUMAQ 1087 default "4" if X86_NUMAQ
1094 default "3" 1088 default "3"
1095 depends on NEED_MULTIPLE_NODES 1089 depends on NEED_MULTIPLE_NODES
1096 help 1090 ---help---
1097 Specify the maximum number of NUMA Nodes available on the target 1091 Specify the maximum number of NUMA Nodes available on the target
1098 system. Increases memory reserved to accomodate various tables. 1092 system. Increases memory reserved to accomodate various tables.
1099 1093
@@ -1131,7 +1125,7 @@ config ARCH_SPARSEMEM_DEFAULT
1131 1125
1132config ARCH_SPARSEMEM_ENABLE 1126config ARCH_SPARSEMEM_ENABLE
1133 def_bool y 1127 def_bool y
1134 depends on X86_64 || NUMA || (EXPERIMENTAL && X86_PC) || X86_GENERICARCH 1128 depends on X86_64 || NUMA || (EXPERIMENTAL && X86_32) || X86_32_NON_STANDARD
1135 select SPARSEMEM_STATIC if X86_32 1129 select SPARSEMEM_STATIC if X86_32
1136 select SPARSEMEM_VMEMMAP_ENABLE if X86_64 1130 select SPARSEMEM_VMEMMAP_ENABLE if X86_64
1137 1131
@@ -1148,61 +1142,61 @@ source "mm/Kconfig"
1148config HIGHPTE 1142config HIGHPTE
1149 bool "Allocate 3rd-level pagetables from highmem" 1143 bool "Allocate 3rd-level pagetables from highmem"
1150 depends on X86_32 && (HIGHMEM4G || HIGHMEM64G) 1144 depends on X86_32 && (HIGHMEM4G || HIGHMEM64G)
1151 help 1145 ---help---
1152 The VM uses one page table entry for each page of physical memory. 1146 The VM uses one page table entry for each page of physical memory.
1153 For systems with a lot of RAM, this can be wasteful of precious 1147 For systems with a lot of RAM, this can be wasteful of precious
1154 low memory. Setting this option will put user-space page table 1148 low memory. Setting this option will put user-space page table
1155 entries in high memory. 1149 entries in high memory.
1156 1150
1157config X86_CHECK_BIOS_CORRUPTION 1151config X86_CHECK_BIOS_CORRUPTION
1158 bool "Check for low memory corruption" 1152 bool "Check for low memory corruption"
1159 help 1153 ---help---
1160 Periodically check for memory corruption in low memory, which 1154 Periodically check for memory corruption in low memory, which
1161 is suspected to be caused by BIOS. Even when enabled in the 1155 is suspected to be caused by BIOS. Even when enabled in the
1162 configuration, it is disabled at runtime. Enable it by 1156 configuration, it is disabled at runtime. Enable it by
1163 setting "memory_corruption_check=1" on the kernel command 1157 setting "memory_corruption_check=1" on the kernel command
1164 line. By default it scans the low 64k of memory every 60 1158 line. By default it scans the low 64k of memory every 60
1165 seconds; see the memory_corruption_check_size and 1159 seconds; see the memory_corruption_check_size and
1166 memory_corruption_check_period parameters in 1160 memory_corruption_check_period parameters in
1167 Documentation/kernel-parameters.txt to adjust this. 1161 Documentation/kernel-parameters.txt to adjust this.
1168 1162
1169 When enabled with the default parameters, this option has 1163 When enabled with the default parameters, this option has
1170 almost no overhead, as it reserves a relatively small amount 1164 almost no overhead, as it reserves a relatively small amount
1171 of memory and scans it infrequently. It both detects corruption 1165 of memory and scans it infrequently. It both detects corruption
1172 and prevents it from affecting the running system. 1166 and prevents it from affecting the running system.
1173 1167
1174 It is, however, intended as a diagnostic tool; if repeatable 1168 It is, however, intended as a diagnostic tool; if repeatable
1175 BIOS-originated corruption always affects the same memory, 1169 BIOS-originated corruption always affects the same memory,
1176 you can use memmap= to prevent the kernel from using that 1170 you can use memmap= to prevent the kernel from using that
1177 memory. 1171 memory.
1178 1172
1179config X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK 1173config X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK
1180 bool "Set the default setting of memory_corruption_check" 1174 bool "Set the default setting of memory_corruption_check"
1181 depends on X86_CHECK_BIOS_CORRUPTION 1175 depends on X86_CHECK_BIOS_CORRUPTION
1182 default y 1176 default y
1183 help 1177 ---help---
1184 Set whether the default state of memory_corruption_check is 1178 Set whether the default state of memory_corruption_check is
1185 on or off. 1179 on or off.
1186 1180
1187config X86_RESERVE_LOW_64K 1181config X86_RESERVE_LOW_64K
1188 bool "Reserve low 64K of RAM on AMI/Phoenix BIOSen" 1182 bool "Reserve low 64K of RAM on AMI/Phoenix BIOSen"
1189 default y 1183 default y
1190 help 1184 ---help---
1191 Reserve the first 64K of physical RAM on BIOSes that are known 1185 Reserve the first 64K of physical RAM on BIOSes that are known
1192 to potentially corrupt that memory range. A numbers of BIOSes are 1186 to potentially corrupt that memory range. A numbers of BIOSes are
1193 known to utilize this area during suspend/resume, so it must not 1187 known to utilize this area during suspend/resume, so it must not
1194 be used by the kernel. 1188 be used by the kernel.
1195 1189
1196 Set this to N if you are absolutely sure that you trust the BIOS 1190 Set this to N if you are absolutely sure that you trust the BIOS
1197 to get all its memory reservations and usages right. 1191 to get all its memory reservations and usages right.
1198 1192
1199 If you have doubts about the BIOS (e.g. suspend/resume does not 1193 If you have doubts about the BIOS (e.g. suspend/resume does not
1200 work or there's kernel crashes after certain hardware hotplug 1194 work or there's kernel crashes after certain hardware hotplug
1201 events) and it's not AMI or Phoenix, then you might want to enable 1195 events) and it's not AMI or Phoenix, then you might want to enable
1202 X86_CHECK_BIOS_CORRUPTION=y to allow the kernel to check typical 1196 X86_CHECK_BIOS_CORRUPTION=y to allow the kernel to check typical
1203 corruption patterns. 1197 corruption patterns.
1204 1198
1205 Say Y if unsure. 1199 Say Y if unsure.
1206 1200
1207config MATH_EMULATION 1201config MATH_EMULATION
1208 bool 1202 bool
@@ -1268,7 +1262,7 @@ config MTRR_SANITIZER
1268 def_bool y 1262 def_bool y
1269 prompt "MTRR cleanup support" 1263 prompt "MTRR cleanup support"
1270 depends on MTRR 1264 depends on MTRR
1271 help 1265 ---help---
1272 Convert MTRR layout from continuous to discrete, so X drivers can 1266 Convert MTRR layout from continuous to discrete, so X drivers can
1273 add writeback entries. 1267 add writeback entries.
1274 1268
@@ -1283,7 +1277,7 @@ config MTRR_SANITIZER_ENABLE_DEFAULT
1283 range 0 1 1277 range 0 1
1284 default "0" 1278 default "0"
1285 depends on MTRR_SANITIZER 1279 depends on MTRR_SANITIZER
1286 help 1280 ---help---
1287 Enable mtrr cleanup default value 1281 Enable mtrr cleanup default value
1288 1282
1289config MTRR_SANITIZER_SPARE_REG_NR_DEFAULT 1283config MTRR_SANITIZER_SPARE_REG_NR_DEFAULT
@@ -1291,7 +1285,7 @@ config MTRR_SANITIZER_SPARE_REG_NR_DEFAULT
1291 range 0 7 1285 range 0 7
1292 default "1" 1286 default "1"
1293 depends on MTRR_SANITIZER 1287 depends on MTRR_SANITIZER
1294 help 1288 ---help---
1295 mtrr cleanup spare entries default, it can be changed via 1289 mtrr cleanup spare entries default, it can be changed via
1296 mtrr_spare_reg_nr=N on the kernel command line. 1290 mtrr_spare_reg_nr=N on the kernel command line.
1297 1291
@@ -1299,7 +1293,7 @@ config X86_PAT
1299 bool 1293 bool
1300 prompt "x86 PAT support" 1294 prompt "x86 PAT support"
1301 depends on MTRR 1295 depends on MTRR
1302 help 1296 ---help---
1303 Use PAT attributes to setup page level cache control. 1297 Use PAT attributes to setup page level cache control.
1304 1298
1305 PATs are the modern equivalents of MTRRs and are much more 1299 PATs are the modern equivalents of MTRRs and are much more
@@ -1314,20 +1308,20 @@ config EFI
1314 bool "EFI runtime service support" 1308 bool "EFI runtime service support"
1315 depends on ACPI 1309 depends on ACPI
1316 ---help--- 1310 ---help---
1317 This enables the kernel to use EFI runtime services that are 1311 This enables the kernel to use EFI runtime services that are
1318 available (such as the EFI variable services). 1312 available (such as the EFI variable services).
1319 1313
1320 This option is only useful on systems that have EFI firmware. 1314 This option is only useful on systems that have EFI firmware.
1321 In addition, you should use the latest ELILO loader available 1315 In addition, you should use the latest ELILO loader available
1322 at <http://elilo.sourceforge.net> in order to take advantage 1316 at <http://elilo.sourceforge.net> in order to take advantage
1323 of EFI runtime services. However, even with this option, the 1317 of EFI runtime services. However, even with this option, the
1324 resultant kernel should continue to boot on existing non-EFI 1318 resultant kernel should continue to boot on existing non-EFI
1325 platforms. 1319 platforms.
1326 1320
1327config SECCOMP 1321config SECCOMP
1328 def_bool y 1322 def_bool y
1329 prompt "Enable seccomp to safely compute untrusted bytecode" 1323 prompt "Enable seccomp to safely compute untrusted bytecode"
1330 help 1324 ---help---
1331 This kernel feature is useful for number crunching applications 1325 This kernel feature is useful for number crunching applications
1332 that may need to compute untrusted bytecode during their 1326 that may need to compute untrusted bytecode during their
1333 execution. By using pipes or other transports made available to 1327 execution. By using pipes or other transports made available to
@@ -1340,13 +1334,17 @@ config SECCOMP
1340 1334
1341 If unsure, say Y. Only embedded should say N here. 1335 If unsure, say Y. Only embedded should say N here.
1342 1336
1337config CC_STACKPROTECTOR_ALL
1338 bool
1339
1343config CC_STACKPROTECTOR 1340config CC_STACKPROTECTOR
1344 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)" 1341 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
1345 depends on X86_64 && EXPERIMENTAL && BROKEN 1342 depends on X86_64
1346 help 1343 select CC_STACKPROTECTOR_ALL
1347 This option turns on the -fstack-protector GCC feature. This 1344 ---help---
1348 feature puts, at the beginning of critical functions, a canary 1345 This option turns on the -fstack-protector GCC feature. This
1349 value on the stack just before the return address, and validates 1346 feature puts, at the beginning of functions, a canary value on
1347 the stack just before the return address, and validates
1350 the value just before actually returning. Stack based buffer 1348 the value just before actually returning. Stack based buffer
1351 overflows (that need to overwrite this return address) now also 1349 overflows (that need to overwrite this return address) now also
1352 overwrite the canary, which gets detected and the attack is then 1350 overwrite the canary, which gets detected and the attack is then
@@ -1354,22 +1352,14 @@ config CC_STACKPROTECTOR
1354 1352
1355 This feature requires gcc version 4.2 or above, or a distribution 1353 This feature requires gcc version 4.2 or above, or a distribution
1356 gcc with the feature backported. Older versions are automatically 1354 gcc with the feature backported. Older versions are automatically
1357 detected and for those versions, this configuration option is ignored. 1355 detected and for those versions, this configuration option is
1358 1356 ignored. (and a warning is printed during bootup)
1359config CC_STACKPROTECTOR_ALL
1360 bool "Use stack-protector for all functions"
1361 depends on CC_STACKPROTECTOR
1362 help
1363 Normally, GCC only inserts the canary value protection for
1364 functions that use large-ish on-stack buffers. By enabling
1365 this option, GCC will be asked to do this for ALL functions.
1366 1357
1367source kernel/Kconfig.hz 1358source kernel/Kconfig.hz
1368 1359
1369config KEXEC 1360config KEXEC
1370 bool "kexec system call" 1361 bool "kexec system call"
1371 depends on X86_BIOS_REBOOT 1362 ---help---
1372 help
1373 kexec is a system call that implements the ability to shutdown your 1363 kexec is a system call that implements the ability to shutdown your
1374 current kernel, and to start another kernel. It is like a reboot 1364 current kernel, and to start another kernel. It is like a reboot
1375 but it is independent of the system firmware. And like a reboot 1365 but it is independent of the system firmware. And like a reboot
@@ -1386,7 +1376,7 @@ config KEXEC
1386config CRASH_DUMP 1376config CRASH_DUMP
1387 bool "kernel crash dumps" 1377 bool "kernel crash dumps"
1388 depends on X86_64 || (X86_32 && HIGHMEM) 1378 depends on X86_64 || (X86_32 && HIGHMEM)
1389 help 1379 ---help---
1390 Generate crash dump after being started by kexec. 1380 Generate crash dump after being started by kexec.
1391 This should be normally only set in special crash dump kernels 1381 This should be normally only set in special crash dump kernels
1392 which are loaded in the main kernel with kexec-tools into 1382 which are loaded in the main kernel with kexec-tools into
@@ -1401,7 +1391,7 @@ config KEXEC_JUMP
1401 bool "kexec jump (EXPERIMENTAL)" 1391 bool "kexec jump (EXPERIMENTAL)"
1402 depends on EXPERIMENTAL 1392 depends on EXPERIMENTAL
1403 depends on KEXEC && HIBERNATION && X86_32 1393 depends on KEXEC && HIBERNATION && X86_32
1404 help 1394 ---help---
1405 Jump between original kernel and kexeced kernel and invoke 1395 Jump between original kernel and kexeced kernel and invoke
1406 code in physical address mode via KEXEC 1396 code in physical address mode via KEXEC
1407 1397
@@ -1410,7 +1400,7 @@ config PHYSICAL_START
1410 default "0x1000000" if X86_NUMAQ 1400 default "0x1000000" if X86_NUMAQ
1411 default "0x200000" if X86_64 1401 default "0x200000" if X86_64
1412 default "0x100000" 1402 default "0x100000"
1413 help 1403 ---help---
1414 This gives the physical address where the kernel is loaded. 1404 This gives the physical address where the kernel is loaded.
1415 1405
1416 If kernel is a not relocatable (CONFIG_RELOCATABLE=n) then 1406 If kernel is a not relocatable (CONFIG_RELOCATABLE=n) then
@@ -1451,7 +1441,7 @@ config PHYSICAL_START
1451config RELOCATABLE 1441config RELOCATABLE
1452 bool "Build a relocatable kernel (EXPERIMENTAL)" 1442 bool "Build a relocatable kernel (EXPERIMENTAL)"
1453 depends on EXPERIMENTAL 1443 depends on EXPERIMENTAL
1454 help 1444 ---help---
1455 This builds a kernel image that retains relocation information 1445 This builds a kernel image that retains relocation information
1456 so it can be loaded someplace besides the default 1MB. 1446 so it can be loaded someplace besides the default 1MB.
1457 The relocations tend to make the kernel binary about 10% larger, 1447 The relocations tend to make the kernel binary about 10% larger,
@@ -1471,7 +1461,7 @@ config PHYSICAL_ALIGN
1471 default "0x100000" if X86_32 1461 default "0x100000" if X86_32
1472 default "0x200000" if X86_64 1462 default "0x200000" if X86_64
1473 range 0x2000 0x400000 1463 range 0x2000 0x400000
1474 help 1464 ---help---
1475 This value puts the alignment restrictions on physical address 1465 This value puts the alignment restrictions on physical address
1476 where kernel is loaded and run from. Kernel is compiled for an 1466 where kernel is loaded and run from. Kernel is compiled for an
1477 address which meets above alignment restriction. 1467 address which meets above alignment restriction.
@@ -1492,7 +1482,7 @@ config PHYSICAL_ALIGN
1492 1482
1493config HOTPLUG_CPU 1483config HOTPLUG_CPU
1494 bool "Support for hot-pluggable CPUs" 1484 bool "Support for hot-pluggable CPUs"
1495 depends on SMP && HOTPLUG && !X86_VOYAGER 1485 depends on SMP && HOTPLUG
1496 ---help--- 1486 ---help---
1497 Say Y here to allow turning CPUs off and on. CPUs can be 1487 Say Y here to allow turning CPUs off and on. CPUs can be
1498 controlled through /sys/devices/system/cpu. 1488 controlled through /sys/devices/system/cpu.
@@ -1504,7 +1494,7 @@ config COMPAT_VDSO
1504 def_bool y 1494 def_bool y
1505 prompt "Compat VDSO support" 1495 prompt "Compat VDSO support"
1506 depends on X86_32 || IA32_EMULATION 1496 depends on X86_32 || IA32_EMULATION
1507 help 1497 ---help---
1508 Map the 32-bit VDSO to the predictable old-style address too. 1498 Map the 32-bit VDSO to the predictable old-style address too.
1509 ---help--- 1499 ---help---
1510 Say N here if you are running a sufficiently recent glibc 1500 Say N here if you are running a sufficiently recent glibc
@@ -1516,7 +1506,7 @@ config COMPAT_VDSO
1516config CMDLINE_BOOL 1506config CMDLINE_BOOL
1517 bool "Built-in kernel command line" 1507 bool "Built-in kernel command line"
1518 default n 1508 default n
1519 help 1509 ---help---
1520 Allow for specifying boot arguments to the kernel at 1510 Allow for specifying boot arguments to the kernel at
1521 build time. On some systems (e.g. embedded ones), it is 1511 build time. On some systems (e.g. embedded ones), it is
1522 necessary or convenient to provide some or all of the 1512 necessary or convenient to provide some or all of the
@@ -1534,7 +1524,7 @@ config CMDLINE
1534 string "Built-in kernel command string" 1524 string "Built-in kernel command string"
1535 depends on CMDLINE_BOOL 1525 depends on CMDLINE_BOOL
1536 default "" 1526 default ""
1537 help 1527 ---help---
1538 Enter arguments here that should be compiled into the kernel 1528 Enter arguments here that should be compiled into the kernel
1539 image and used at boot time. If the boot loader provides a 1529 image and used at boot time. If the boot loader provides a
1540 command line at boot time, it is appended to this string to 1530 command line at boot time, it is appended to this string to
@@ -1551,7 +1541,7 @@ config CMDLINE_OVERRIDE
1551 bool "Built-in command line overrides boot loader arguments" 1541 bool "Built-in command line overrides boot loader arguments"
1552 default n 1542 default n
1553 depends on CMDLINE_BOOL 1543 depends on CMDLINE_BOOL
1554 help 1544 ---help---
1555 Set this option to 'Y' to have the kernel ignore the boot loader 1545 Set this option to 'Y' to have the kernel ignore the boot loader
1556 command line, and use ONLY the built-in command line. 1546 command line, and use ONLY the built-in command line.
1557 1547
@@ -1573,7 +1563,6 @@ config HAVE_ARCH_EARLY_PFN_TO_NID
1573 depends on NUMA 1563 depends on NUMA
1574 1564
1575menu "Power management and ACPI options" 1565menu "Power management and ACPI options"
1576 depends on !X86_VOYAGER
1577 1566
1578config ARCH_HIBERNATION_HEADER 1567config ARCH_HIBERNATION_HEADER
1579 def_bool y 1568 def_bool y
@@ -1651,7 +1640,7 @@ if APM
1651 1640
1652config APM_IGNORE_USER_SUSPEND 1641config APM_IGNORE_USER_SUSPEND
1653 bool "Ignore USER SUSPEND" 1642 bool "Ignore USER SUSPEND"
1654 help 1643 ---help---
1655 This option will ignore USER SUSPEND requests. On machines with a 1644 This option will ignore USER SUSPEND requests. On machines with a
1656 compliant APM BIOS, you want to say N. However, on the NEC Versa M 1645 compliant APM BIOS, you want to say N. However, on the NEC Versa M
1657 series notebooks, it is necessary to say Y because of a BIOS bug. 1646 series notebooks, it is necessary to say Y because of a BIOS bug.
@@ -1675,7 +1664,7 @@ config APM_DO_ENABLE
1675 1664
1676config APM_CPU_IDLE 1665config APM_CPU_IDLE
1677 bool "Make CPU Idle calls when idle" 1666 bool "Make CPU Idle calls when idle"
1678 help 1667 ---help---
1679 Enable calls to APM CPU Idle/CPU Busy inside the kernel's idle loop. 1668 Enable calls to APM CPU Idle/CPU Busy inside the kernel's idle loop.
1680 On some machines, this can activate improved power savings, such as 1669 On some machines, this can activate improved power savings, such as
1681 a slowed CPU clock rate, when the machine is idle. These idle calls 1670 a slowed CPU clock rate, when the machine is idle. These idle calls
@@ -1686,7 +1675,7 @@ config APM_CPU_IDLE
1686 1675
1687config APM_DISPLAY_BLANK 1676config APM_DISPLAY_BLANK
1688 bool "Enable console blanking using APM" 1677 bool "Enable console blanking using APM"
1689 help 1678 ---help---
1690 Enable console blanking using the APM. Some laptops can use this to 1679 Enable console blanking using the APM. Some laptops can use this to
1691 turn off the LCD backlight when the screen blanker of the Linux 1680 turn off the LCD backlight when the screen blanker of the Linux
1692 virtual console blanks the screen. Note that this is only used by 1681 virtual console blanks the screen. Note that this is only used by
@@ -1699,7 +1688,7 @@ config APM_DISPLAY_BLANK
1699 1688
1700config APM_ALLOW_INTS 1689config APM_ALLOW_INTS
1701 bool "Allow interrupts during APM BIOS calls" 1690 bool "Allow interrupts during APM BIOS calls"
1702 help 1691 ---help---
1703 Normally we disable external interrupts while we are making calls to 1692 Normally we disable external interrupts while we are making calls to
1704 the APM BIOS as a measure to lessen the effects of a badly behaving 1693 the APM BIOS as a measure to lessen the effects of a badly behaving
1705 BIOS implementation. The BIOS should reenable interrupts if it 1694 BIOS implementation. The BIOS should reenable interrupts if it
@@ -1724,7 +1713,7 @@ config PCI
1724 bool "PCI support" 1713 bool "PCI support"
1725 default y 1714 default y
1726 select ARCH_SUPPORTS_MSI if (X86_LOCAL_APIC && X86_IO_APIC) 1715 select ARCH_SUPPORTS_MSI if (X86_LOCAL_APIC && X86_IO_APIC)
1727 help 1716 ---help---
1728 Find out whether you have a PCI motherboard. PCI is the name of a 1717 Find out whether you have a PCI motherboard. PCI is the name of a
1729 bus system, i.e. the way the CPU talks to the other stuff inside 1718 bus system, i.e. the way the CPU talks to the other stuff inside
1730 your box. Other bus systems are ISA, EISA, MicroChannel (MCA) or 1719 your box. Other bus systems are ISA, EISA, MicroChannel (MCA) or
@@ -1795,7 +1784,7 @@ config PCI_MMCONFIG
1795config DMAR 1784config DMAR
1796 bool "Support for DMA Remapping Devices (EXPERIMENTAL)" 1785 bool "Support for DMA Remapping Devices (EXPERIMENTAL)"
1797 depends on X86_64 && PCI_MSI && ACPI && EXPERIMENTAL 1786 depends on X86_64 && PCI_MSI && ACPI && EXPERIMENTAL
1798 help 1787 ---help---
1799 DMA remapping (DMAR) devices support enables independent address 1788 DMA remapping (DMAR) devices support enables independent address
1800 translations for Direct Memory Access (DMA) from devices. 1789 translations for Direct Memory Access (DMA) from devices.
1801 These DMA remapping devices are reported via ACPI tables 1790 These DMA remapping devices are reported via ACPI tables
@@ -1817,29 +1806,29 @@ config DMAR_GFX_WA
1817 def_bool y 1806 def_bool y
1818 prompt "Support for Graphics workaround" 1807 prompt "Support for Graphics workaround"
1819 depends on DMAR 1808 depends on DMAR
1820 help 1809 ---help---
1821 Current Graphics drivers tend to use physical address 1810 Current Graphics drivers tend to use physical address
1822 for DMA and avoid using DMA APIs. Setting this config 1811 for DMA and avoid using DMA APIs. Setting this config
1823 option permits the IOMMU driver to set a unity map for 1812 option permits the IOMMU driver to set a unity map for
1824 all the OS-visible memory. Hence the driver can continue 1813 all the OS-visible memory. Hence the driver can continue
1825 to use physical addresses for DMA. 1814 to use physical addresses for DMA.
1826 1815
1827config DMAR_FLOPPY_WA 1816config DMAR_FLOPPY_WA
1828 def_bool y 1817 def_bool y
1829 depends on DMAR 1818 depends on DMAR
1830 help 1819 ---help---
1831 Floppy disk drivers are know to bypass DMA API calls 1820 Floppy disk drivers are know to bypass DMA API calls
1832 thereby failing to work when IOMMU is enabled. This 1821 thereby failing to work when IOMMU is enabled. This
1833 workaround will setup a 1:1 mapping for the first 1822 workaround will setup a 1:1 mapping for the first
1834 16M to make floppy (an ISA device) work. 1823 16M to make floppy (an ISA device) work.
1835 1824
1836config INTR_REMAP 1825config INTR_REMAP
1837 bool "Support for Interrupt Remapping (EXPERIMENTAL)" 1826 bool "Support for Interrupt Remapping (EXPERIMENTAL)"
1838 depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI && EXPERIMENTAL 1827 depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI && EXPERIMENTAL
1839 help 1828 ---help---
1840 Supports Interrupt remapping for IO-APIC and MSI devices. 1829 Supports Interrupt remapping for IO-APIC and MSI devices.
1841 To use x2apic mode in the CPU's which support x2APIC enhancements or 1830 To use x2apic mode in the CPU's which support x2APIC enhancements or
1842 to support platforms with CPU's having > 8 bit APIC ID, say Y. 1831 to support platforms with CPU's having > 8 bit APIC ID, say Y.
1843 1832
1844source "drivers/pci/pcie/Kconfig" 1833source "drivers/pci/pcie/Kconfig"
1845 1834
@@ -1853,8 +1842,7 @@ if X86_32
1853 1842
1854config ISA 1843config ISA
1855 bool "ISA support" 1844 bool "ISA support"
1856 depends on !X86_VOYAGER 1845 ---help---
1857 help
1858 Find out whether you have ISA slots on your motherboard. ISA is the 1846 Find out whether you have ISA slots on your motherboard. ISA is the
1859 name of a bus system, i.e. the way the CPU talks to the other stuff 1847 name of a bus system, i.e. the way the CPU talks to the other stuff
1860 inside your box. Other bus systems are PCI, EISA, MicroChannel 1848 inside your box. Other bus systems are PCI, EISA, MicroChannel
@@ -1880,9 +1868,8 @@ config EISA
1880source "drivers/eisa/Kconfig" 1868source "drivers/eisa/Kconfig"
1881 1869
1882config MCA 1870config MCA
1883 bool "MCA support" if !X86_VOYAGER 1871 bool "MCA support"
1884 default y if X86_VOYAGER 1872 ---help---
1885 help
1886 MicroChannel Architecture is found in some IBM PS/2 machines and 1873 MicroChannel Architecture is found in some IBM PS/2 machines and
1887 laptops. It is a bus system similar to PCI or ISA. See 1874 laptops. It is a bus system similar to PCI or ISA. See
1888 <file:Documentation/mca.txt> (and especially the web page given 1875 <file:Documentation/mca.txt> (and especially the web page given
@@ -1892,8 +1879,7 @@ source "drivers/mca/Kconfig"
1892 1879
1893config SCx200 1880config SCx200
1894 tristate "NatSemi SCx200 support" 1881 tristate "NatSemi SCx200 support"
1895 depends on !X86_VOYAGER 1882 ---help---
1896 help
1897 This provides basic support for National Semiconductor's 1883 This provides basic support for National Semiconductor's
1898 (now AMD's) Geode processors. The driver probes for the 1884 (now AMD's) Geode processors. The driver probes for the
1899 PCI-IDs of several on-chip devices, so its a good dependency 1885 PCI-IDs of several on-chip devices, so its a good dependency
@@ -1905,7 +1891,7 @@ config SCx200HR_TIMER
1905 tristate "NatSemi SCx200 27MHz High-Resolution Timer Support" 1891 tristate "NatSemi SCx200 27MHz High-Resolution Timer Support"
1906 depends on SCx200 && GENERIC_TIME 1892 depends on SCx200 && GENERIC_TIME
1907 default y 1893 default y
1908 help 1894 ---help---
1909 This driver provides a clocksource built upon the on-chip 1895 This driver provides a clocksource built upon the on-chip
1910 27MHz high-resolution timer. Its also a workaround for 1896 27MHz high-resolution timer. Its also a workaround for
1911 NSC Geode SC-1100's buggy TSC, which loses time when the 1897 NSC Geode SC-1100's buggy TSC, which loses time when the
@@ -1916,7 +1902,7 @@ config GEODE_MFGPT_TIMER
1916 def_bool y 1902 def_bool y
1917 prompt "Geode Multi-Function General Purpose Timer (MFGPT) events" 1903 prompt "Geode Multi-Function General Purpose Timer (MFGPT) events"
1918 depends on MGEODE_LX && GENERIC_TIME && GENERIC_CLOCKEVENTS 1904 depends on MGEODE_LX && GENERIC_TIME && GENERIC_CLOCKEVENTS
1919 help 1905 ---help---
1920 This driver provides a clock event source based on the MFGPT 1906 This driver provides a clock event source based on the MFGPT
1921 timer(s) in the CS5535 and CS5536 companion chip for the geode. 1907 timer(s) in the CS5535 and CS5536 companion chip for the geode.
1922 MFGPTs have a better resolution and max interval than the 1908 MFGPTs have a better resolution and max interval than the
@@ -1925,7 +1911,7 @@ config GEODE_MFGPT_TIMER
1925config OLPC 1911config OLPC
1926 bool "One Laptop Per Child support" 1912 bool "One Laptop Per Child support"
1927 default n 1913 default n
1928 help 1914 ---help---
1929 Add support for detecting the unique features of the OLPC 1915 Add support for detecting the unique features of the OLPC
1930 XO hardware. 1916 XO hardware.
1931 1917
@@ -1950,16 +1936,16 @@ config IA32_EMULATION
1950 bool "IA32 Emulation" 1936 bool "IA32 Emulation"
1951 depends on X86_64 1937 depends on X86_64
1952 select COMPAT_BINFMT_ELF 1938 select COMPAT_BINFMT_ELF
1953 help 1939 ---help---
1954 Include code to run 32-bit programs under a 64-bit kernel. You should 1940 Include code to run 32-bit programs under a 64-bit kernel. You should
1955 likely turn this on, unless you're 100% sure that you don't have any 1941 likely turn this on, unless you're 100% sure that you don't have any
1956 32-bit programs left. 1942 32-bit programs left.
1957 1943
1958config IA32_AOUT 1944config IA32_AOUT
1959 tristate "IA32 a.out support" 1945 tristate "IA32 a.out support"
1960 depends on IA32_EMULATION 1946 depends on IA32_EMULATION
1961 help 1947 ---help---
1962 Support old a.out binaries in the 32bit emulation. 1948 Support old a.out binaries in the 32bit emulation.
1963 1949
1964config COMPAT 1950config COMPAT
1965 def_bool y 1951 def_bool y
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index c98d52e82966..a95eaf0e582a 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -50,7 +50,7 @@ config M386
50config M486 50config M486
51 bool "486" 51 bool "486"
52 depends on X86_32 52 depends on X86_32
53 help 53 ---help---
54 Select this for a 486 series processor, either Intel or one of the 54 Select this for a 486 series processor, either Intel or one of the
55 compatible processors from AMD, Cyrix, IBM, or Intel. Includes DX, 55 compatible processors from AMD, Cyrix, IBM, or Intel. Includes DX,
56 DX2, and DX4 variants; also SL/SLC/SLC2/SLC3/SX/SX2 and UMC U5D or 56 DX2, and DX4 variants; also SL/SLC/SLC2/SLC3/SX/SX2 and UMC U5D or
@@ -59,7 +59,7 @@ config M486
59config M586 59config M586
60 bool "586/K5/5x86/6x86/6x86MX" 60 bool "586/K5/5x86/6x86/6x86MX"
61 depends on X86_32 61 depends on X86_32
62 help 62 ---help---
63 Select this for an 586 or 686 series processor such as the AMD K5, 63 Select this for an 586 or 686 series processor such as the AMD K5,
64 the Cyrix 5x86, 6x86 and 6x86MX. This choice does not 64 the Cyrix 5x86, 6x86 and 6x86MX. This choice does not
65 assume the RDTSC (Read Time Stamp Counter) instruction. 65 assume the RDTSC (Read Time Stamp Counter) instruction.
@@ -67,21 +67,21 @@ config M586
67config M586TSC 67config M586TSC
68 bool "Pentium-Classic" 68 bool "Pentium-Classic"
69 depends on X86_32 69 depends on X86_32
70 help 70 ---help---
71 Select this for a Pentium Classic processor with the RDTSC (Read 71 Select this for a Pentium Classic processor with the RDTSC (Read
72 Time Stamp Counter) instruction for benchmarking. 72 Time Stamp Counter) instruction for benchmarking.
73 73
74config M586MMX 74config M586MMX
75 bool "Pentium-MMX" 75 bool "Pentium-MMX"
76 depends on X86_32 76 depends on X86_32
77 help 77 ---help---
78 Select this for a Pentium with the MMX graphics/multimedia 78 Select this for a Pentium with the MMX graphics/multimedia
79 extended instructions. 79 extended instructions.
80 80
81config M686 81config M686
82 bool "Pentium-Pro" 82 bool "Pentium-Pro"
83 depends on X86_32 83 depends on X86_32
84 help 84 ---help---
85 Select this for Intel Pentium Pro chips. This enables the use of 85 Select this for Intel Pentium Pro chips. This enables the use of
86 Pentium Pro extended instructions, and disables the init-time guard 86 Pentium Pro extended instructions, and disables the init-time guard
87 against the f00f bug found in earlier Pentiums. 87 against the f00f bug found in earlier Pentiums.
@@ -89,7 +89,7 @@ config M686
89config MPENTIUMII 89config MPENTIUMII
90 bool "Pentium-II/Celeron(pre-Coppermine)" 90 bool "Pentium-II/Celeron(pre-Coppermine)"
91 depends on X86_32 91 depends on X86_32
92 help 92 ---help---
93 Select this for Intel chips based on the Pentium-II and 93 Select this for Intel chips based on the Pentium-II and
94 pre-Coppermine Celeron core. This option enables an unaligned 94 pre-Coppermine Celeron core. This option enables an unaligned
95 copy optimization, compiles the kernel with optimization flags 95 copy optimization, compiles the kernel with optimization flags
@@ -99,7 +99,7 @@ config MPENTIUMII
99config MPENTIUMIII 99config MPENTIUMIII
100 bool "Pentium-III/Celeron(Coppermine)/Pentium-III Xeon" 100 bool "Pentium-III/Celeron(Coppermine)/Pentium-III Xeon"
101 depends on X86_32 101 depends on X86_32
102 help 102 ---help---
103 Select this for Intel chips based on the Pentium-III and 103 Select this for Intel chips based on the Pentium-III and
104 Celeron-Coppermine core. This option enables use of some 104 Celeron-Coppermine core. This option enables use of some
105 extended prefetch instructions in addition to the Pentium II 105 extended prefetch instructions in addition to the Pentium II
@@ -108,14 +108,14 @@ config MPENTIUMIII
108config MPENTIUMM 108config MPENTIUMM
109 bool "Pentium M" 109 bool "Pentium M"
110 depends on X86_32 110 depends on X86_32
111 help 111 ---help---
112 Select this for Intel Pentium M (not Pentium-4 M) 112 Select this for Intel Pentium M (not Pentium-4 M)
113 notebook chips. 113 notebook chips.
114 114
115config MPENTIUM4 115config MPENTIUM4
116 bool "Pentium-4/Celeron(P4-based)/Pentium-4 M/older Xeon" 116 bool "Pentium-4/Celeron(P4-based)/Pentium-4 M/older Xeon"
117 depends on X86_32 117 depends on X86_32
118 help 118 ---help---
119 Select this for Intel Pentium 4 chips. This includes the 119 Select this for Intel Pentium 4 chips. This includes the
120 Pentium 4, Pentium D, P4-based Celeron and Xeon, and 120 Pentium 4, Pentium D, P4-based Celeron and Xeon, and
121 Pentium-4 M (not Pentium M) chips. This option enables compile 121 Pentium-4 M (not Pentium M) chips. This option enables compile
@@ -151,7 +151,7 @@ config MPENTIUM4
151config MK6 151config MK6
152 bool "K6/K6-II/K6-III" 152 bool "K6/K6-II/K6-III"
153 depends on X86_32 153 depends on X86_32
154 help 154 ---help---
155 Select this for an AMD K6-family processor. Enables use of 155 Select this for an AMD K6-family processor. Enables use of
156 some extended instructions, and passes appropriate optimization 156 some extended instructions, and passes appropriate optimization
157 flags to GCC. 157 flags to GCC.
@@ -159,14 +159,14 @@ config MK6
159config MK7 159config MK7
160 bool "Athlon/Duron/K7" 160 bool "Athlon/Duron/K7"
161 depends on X86_32 161 depends on X86_32
162 help 162 ---help---
163 Select this for an AMD Athlon K7-family processor. Enables use of 163 Select this for an AMD Athlon K7-family processor. Enables use of
164 some extended instructions, and passes appropriate optimization 164 some extended instructions, and passes appropriate optimization
165 flags to GCC. 165 flags to GCC.
166 166
167config MK8 167config MK8
168 bool "Opteron/Athlon64/Hammer/K8" 168 bool "Opteron/Athlon64/Hammer/K8"
169 help 169 ---help---
170 Select this for an AMD Opteron or Athlon64 Hammer-family processor. 170 Select this for an AMD Opteron or Athlon64 Hammer-family processor.
171 Enables use of some extended instructions, and passes appropriate 171 Enables use of some extended instructions, and passes appropriate
172 optimization flags to GCC. 172 optimization flags to GCC.
@@ -174,7 +174,7 @@ config MK8
174config MCRUSOE 174config MCRUSOE
175 bool "Crusoe" 175 bool "Crusoe"
176 depends on X86_32 176 depends on X86_32
177 help 177 ---help---
178 Select this for a Transmeta Crusoe processor. Treats the processor 178 Select this for a Transmeta Crusoe processor. Treats the processor
179 like a 586 with TSC, and sets some GCC optimization flags (like a 179 like a 586 with TSC, and sets some GCC optimization flags (like a
180 Pentium Pro with no alignment requirements). 180 Pentium Pro with no alignment requirements).
@@ -182,13 +182,13 @@ config MCRUSOE
182config MEFFICEON 182config MEFFICEON
183 bool "Efficeon" 183 bool "Efficeon"
184 depends on X86_32 184 depends on X86_32
185 help 185 ---help---
186 Select this for a Transmeta Efficeon processor. 186 Select this for a Transmeta Efficeon processor.
187 187
188config MWINCHIPC6 188config MWINCHIPC6
189 bool "Winchip-C6" 189 bool "Winchip-C6"
190 depends on X86_32 190 depends on X86_32
191 help 191 ---help---
192 Select this for an IDT Winchip C6 chip. Linux and GCC 192 Select this for an IDT Winchip C6 chip. Linux and GCC
193 treat this chip as a 586TSC with some extended instructions 193 treat this chip as a 586TSC with some extended instructions
194 and alignment requirements. 194 and alignment requirements.
@@ -196,7 +196,7 @@ config MWINCHIPC6
196config MWINCHIP3D 196config MWINCHIP3D
197 bool "Winchip-2/Winchip-2A/Winchip-3" 197 bool "Winchip-2/Winchip-2A/Winchip-3"
198 depends on X86_32 198 depends on X86_32
199 help 199 ---help---
200 Select this for an IDT Winchip-2, 2A or 3. Linux and GCC 200 Select this for an IDT Winchip-2, 2A or 3. Linux and GCC
201 treat this chip as a 586TSC with some extended instructions 201 treat this chip as a 586TSC with some extended instructions
202 and alignment requirements. Also enable out of order memory 202 and alignment requirements. Also enable out of order memory
@@ -206,19 +206,19 @@ config MWINCHIP3D
206config MGEODEGX1 206config MGEODEGX1
207 bool "GeodeGX1" 207 bool "GeodeGX1"
208 depends on X86_32 208 depends on X86_32
209 help 209 ---help---
210 Select this for a Geode GX1 (Cyrix MediaGX) chip. 210 Select this for a Geode GX1 (Cyrix MediaGX) chip.
211 211
212config MGEODE_LX 212config MGEODE_LX
213 bool "Geode GX/LX" 213 bool "Geode GX/LX"
214 depends on X86_32 214 depends on X86_32
215 help 215 ---help---
216 Select this for AMD Geode GX and LX processors. 216 Select this for AMD Geode GX and LX processors.
217 217
218config MCYRIXIII 218config MCYRIXIII
219 bool "CyrixIII/VIA-C3" 219 bool "CyrixIII/VIA-C3"
220 depends on X86_32 220 depends on X86_32
221 help 221 ---help---
222 Select this for a Cyrix III or C3 chip. Presently Linux and GCC 222 Select this for a Cyrix III or C3 chip. Presently Linux and GCC
223 treat this chip as a generic 586. Whilst the CPU is 686 class, 223 treat this chip as a generic 586. Whilst the CPU is 686 class,
224 it lacks the cmov extension which gcc assumes is present when 224 it lacks the cmov extension which gcc assumes is present when
@@ -230,7 +230,7 @@ config MCYRIXIII
230config MVIAC3_2 230config MVIAC3_2
231 bool "VIA C3-2 (Nehemiah)" 231 bool "VIA C3-2 (Nehemiah)"
232 depends on X86_32 232 depends on X86_32
233 help 233 ---help---
234 Select this for a VIA C3 "Nehemiah". Selecting this enables usage 234 Select this for a VIA C3 "Nehemiah". Selecting this enables usage
235 of SSE and tells gcc to treat the CPU as a 686. 235 of SSE and tells gcc to treat the CPU as a 686.
236 Note, this kernel will not boot on older (pre model 9) C3s. 236 Note, this kernel will not boot on older (pre model 9) C3s.
@@ -238,14 +238,14 @@ config MVIAC3_2
238config MVIAC7 238config MVIAC7
239 bool "VIA C7" 239 bool "VIA C7"
240 depends on X86_32 240 depends on X86_32
241 help 241 ---help---
242 Select this for a VIA C7. Selecting this uses the correct cache 242 Select this for a VIA C7. Selecting this uses the correct cache
243 shift and tells gcc to treat the CPU as a 686. 243 shift and tells gcc to treat the CPU as a 686.
244 244
245config MPSC 245config MPSC
246 bool "Intel P4 / older Netburst based Xeon" 246 bool "Intel P4 / older Netburst based Xeon"
247 depends on X86_64 247 depends on X86_64
248 help 248 ---help---
249 Optimize for Intel Pentium 4, Pentium D and older Nocona/Dempsey 249 Optimize for Intel Pentium 4, Pentium D and older Nocona/Dempsey
250 Xeon CPUs with Intel 64bit which is compatible with x86-64. 250 Xeon CPUs with Intel 64bit which is compatible with x86-64.
251 Note that the latest Xeons (Xeon 51xx and 53xx) are not based on the 251 Note that the latest Xeons (Xeon 51xx and 53xx) are not based on the
@@ -255,7 +255,7 @@ config MPSC
255 255
256config MCORE2 256config MCORE2
257 bool "Core 2/newer Xeon" 257 bool "Core 2/newer Xeon"
258 help 258 ---help---
259 259
260 Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and 260 Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and
261 53xx) CPUs. You can distinguish newer from older Xeons by the CPU 261 53xx) CPUs. You can distinguish newer from older Xeons by the CPU
@@ -265,7 +265,7 @@ config MCORE2
265config GENERIC_CPU 265config GENERIC_CPU
266 bool "Generic-x86-64" 266 bool "Generic-x86-64"
267 depends on X86_64 267 depends on X86_64
268 help 268 ---help---
269 Generic x86-64 CPU. 269 Generic x86-64 CPU.
270 Run equally well on all x86-64 CPUs. 270 Run equally well on all x86-64 CPUs.
271 271
@@ -274,7 +274,7 @@ endchoice
274config X86_GENERIC 274config X86_GENERIC
275 bool "Generic x86 support" 275 bool "Generic x86 support"
276 depends on X86_32 276 depends on X86_32
277 help 277 ---help---
278 Instead of just including optimizations for the selected 278 Instead of just including optimizations for the selected
279 x86 variant (e.g. PII, Crusoe or Athlon), include some more 279 x86 variant (e.g. PII, Crusoe or Athlon), include some more
280 generic optimizations as well. This will make the kernel 280 generic optimizations as well. This will make the kernel
@@ -294,25 +294,23 @@ config X86_CPU
294# Define implied options from the CPU selection here 294# Define implied options from the CPU selection here
295config X86_L1_CACHE_BYTES 295config X86_L1_CACHE_BYTES
296 int 296 int
297 default "128" if GENERIC_CPU || MPSC 297 default "128" if MPSC
298 default "64" if MK8 || MCORE2 298 default "64" if GENERIC_CPU || MK8 || MCORE2 || X86_32
299 depends on X86_64
300 299
301config X86_INTERNODE_CACHE_BYTES 300config X86_INTERNODE_CACHE_BYTES
302 int 301 int
303 default "4096" if X86_VSMP 302 default "4096" if X86_VSMP
304 default X86_L1_CACHE_BYTES if !X86_VSMP 303 default X86_L1_CACHE_BYTES if !X86_VSMP
305 depends on X86_64
306 304
307config X86_CMPXCHG 305config X86_CMPXCHG
308 def_bool X86_64 || (X86_32 && !M386) 306 def_bool X86_64 || (X86_32 && !M386)
309 307
310config X86_L1_CACHE_SHIFT 308config X86_L1_CACHE_SHIFT
311 int 309 int
312 default "7" if MPENTIUM4 || X86_GENERIC || GENERIC_CPU || MPSC 310 default "7" if MPENTIUM4 || MPSC
313 default "4" if X86_ELAN || M486 || M386 || MGEODEGX1 311 default "4" if X86_ELAN || M486 || M386 || MGEODEGX1
314 default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX 312 default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
315 default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MVIAC7 313 default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MVIAC7 || X86_GENERIC || GENERIC_CPU
316 314
317config X86_XADD 315config X86_XADD
318 def_bool y 316 def_bool y
@@ -321,7 +319,7 @@ config X86_XADD
321config X86_PPRO_FENCE 319config X86_PPRO_FENCE
322 bool "PentiumPro memory ordering errata workaround" 320 bool "PentiumPro memory ordering errata workaround"
323 depends on M686 || M586MMX || M586TSC || M586 || M486 || M386 || MGEODEGX1 321 depends on M686 || M586MMX || M586TSC || M586 || M486 || M386 || MGEODEGX1
324 help 322 ---help---
325 Old PentiumPro multiprocessor systems had errata that could cause 323 Old PentiumPro multiprocessor systems had errata that could cause
326 memory operations to violate the x86 ordering standard in rare cases. 324 memory operations to violate the x86 ordering standard in rare cases.
327 Enabling this option will attempt to work around some (but not all) 325 Enabling this option will attempt to work around some (but not all)
@@ -414,14 +412,14 @@ config X86_DEBUGCTLMSR
414 412
415menuconfig PROCESSOR_SELECT 413menuconfig PROCESSOR_SELECT
416 bool "Supported processor vendors" if EMBEDDED 414 bool "Supported processor vendors" if EMBEDDED
417 help 415 ---help---
418 This lets you choose what x86 vendor support code your kernel 416 This lets you choose what x86 vendor support code your kernel
419 will include. 417 will include.
420 418
421config CPU_SUP_INTEL 419config CPU_SUP_INTEL
422 default y 420 default y
423 bool "Support Intel processors" if PROCESSOR_SELECT 421 bool "Support Intel processors" if PROCESSOR_SELECT
424 help 422 ---help---
425 This enables detection, tunings and quirks for Intel processors 423 This enables detection, tunings and quirks for Intel processors
426 424
427 You need this enabled if you want your kernel to run on an 425 You need this enabled if you want your kernel to run on an
@@ -435,7 +433,7 @@ config CPU_SUP_CYRIX_32
435 default y 433 default y
436 bool "Support Cyrix processors" if PROCESSOR_SELECT 434 bool "Support Cyrix processors" if PROCESSOR_SELECT
437 depends on !64BIT 435 depends on !64BIT
438 help 436 ---help---
439 This enables detection, tunings and quirks for Cyrix processors 437 This enables detection, tunings and quirks for Cyrix processors
440 438
441 You need this enabled if you want your kernel to run on a 439 You need this enabled if you want your kernel to run on a
@@ -448,7 +446,7 @@ config CPU_SUP_CYRIX_32
448config CPU_SUP_AMD 446config CPU_SUP_AMD
449 default y 447 default y
450 bool "Support AMD processors" if PROCESSOR_SELECT 448 bool "Support AMD processors" if PROCESSOR_SELECT
451 help 449 ---help---
452 This enables detection, tunings and quirks for AMD processors 450 This enables detection, tunings and quirks for AMD processors
453 451
454 You need this enabled if you want your kernel to run on an 452 You need this enabled if you want your kernel to run on an
@@ -462,7 +460,7 @@ config CPU_SUP_CENTAUR_32
462 default y 460 default y
463 bool "Support Centaur processors" if PROCESSOR_SELECT 461 bool "Support Centaur processors" if PROCESSOR_SELECT
464 depends on !64BIT 462 depends on !64BIT
465 help 463 ---help---
466 This enables detection, tunings and quirks for Centaur processors 464 This enables detection, tunings and quirks for Centaur processors
467 465
468 You need this enabled if you want your kernel to run on a 466 You need this enabled if you want your kernel to run on a
@@ -476,7 +474,7 @@ config CPU_SUP_CENTAUR_64
476 default y 474 default y
477 bool "Support Centaur processors" if PROCESSOR_SELECT 475 bool "Support Centaur processors" if PROCESSOR_SELECT
478 depends on 64BIT 476 depends on 64BIT
479 help 477 ---help---
480 This enables detection, tunings and quirks for Centaur processors 478 This enables detection, tunings and quirks for Centaur processors
481 479
482 You need this enabled if you want your kernel to run on a 480 You need this enabled if you want your kernel to run on a
@@ -490,7 +488,7 @@ config CPU_SUP_TRANSMETA_32
490 default y 488 default y
491 bool "Support Transmeta processors" if PROCESSOR_SELECT 489 bool "Support Transmeta processors" if PROCESSOR_SELECT
492 depends on !64BIT 490 depends on !64BIT
493 help 491 ---help---
494 This enables detection, tunings and quirks for Transmeta processors 492 This enables detection, tunings and quirks for Transmeta processors
495 493
496 You need this enabled if you want your kernel to run on a 494 You need this enabled if you want your kernel to run on a
@@ -504,7 +502,7 @@ config CPU_SUP_UMC_32
504 default y 502 default y
505 bool "Support UMC processors" if PROCESSOR_SELECT 503 bool "Support UMC processors" if PROCESSOR_SELECT
506 depends on !64BIT 504 depends on !64BIT
507 help 505 ---help---
508 This enables detection, tunings and quirks for UMC processors 506 This enables detection, tunings and quirks for UMC processors
509 507
510 You need this enabled if you want your kernel to run on a 508 You need this enabled if you want your kernel to run on a
@@ -523,7 +521,7 @@ config X86_PTRACE_BTS
523 bool "Branch Trace Store" 521 bool "Branch Trace Store"
524 default y 522 default y
525 depends on X86_DEBUGCTLMSR 523 depends on X86_DEBUGCTLMSR
526 help 524 ---help---
527 This adds a ptrace interface to the hardware's branch trace store. 525 This adds a ptrace interface to the hardware's branch trace store.
528 526
529 Debuggers may use it to collect an execution trace of the debugged 527 Debuggers may use it to collect an execution trace of the debugged
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 10d6cc3fd052..ba4781b93890 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -7,7 +7,7 @@ source "lib/Kconfig.debug"
7 7
8config STRICT_DEVMEM 8config STRICT_DEVMEM
9 bool "Filter access to /dev/mem" 9 bool "Filter access to /dev/mem"
10 help 10 ---help---
11 If this option is disabled, you allow userspace (root) access to all 11 If this option is disabled, you allow userspace (root) access to all
12 of memory, including kernel and userspace memory. Accidental 12 of memory, including kernel and userspace memory. Accidental
13 access to this is obviously disastrous, but specific access can 13 access to this is obviously disastrous, but specific access can
@@ -25,7 +25,7 @@ config STRICT_DEVMEM
25config X86_VERBOSE_BOOTUP 25config X86_VERBOSE_BOOTUP
26 bool "Enable verbose x86 bootup info messages" 26 bool "Enable verbose x86 bootup info messages"
27 default y 27 default y
28 help 28 ---help---
29 Enables the informational output from the decompression stage 29 Enables the informational output from the decompression stage
30 (e.g. bzImage) of the boot. If you disable this you will still 30 (e.g. bzImage) of the boot. If you disable this you will still
31 see errors. Disable this if you want silent bootup. 31 see errors. Disable this if you want silent bootup.
@@ -33,7 +33,7 @@ config X86_VERBOSE_BOOTUP
33config EARLY_PRINTK 33config EARLY_PRINTK
34 bool "Early printk" if EMBEDDED 34 bool "Early printk" if EMBEDDED
35 default y 35 default y
36 help 36 ---help---
37 Write kernel log output directly into the VGA buffer or to a serial 37 Write kernel log output directly into the VGA buffer or to a serial
38 port. 38 port.
39 39
@@ -47,7 +47,7 @@ config EARLY_PRINTK_DBGP
47 bool "Early printk via EHCI debug port" 47 bool "Early printk via EHCI debug port"
48 default n 48 default n
49 depends on EARLY_PRINTK && PCI 49 depends on EARLY_PRINTK && PCI
50 help 50 ---help---
51 Write kernel log output directly into the EHCI debug port. 51 Write kernel log output directly into the EHCI debug port.
52 52
53 This is useful for kernel debugging when your machine crashes very 53 This is useful for kernel debugging when your machine crashes very
@@ -59,14 +59,14 @@ config EARLY_PRINTK_DBGP
59config DEBUG_STACKOVERFLOW 59config DEBUG_STACKOVERFLOW
60 bool "Check for stack overflows" 60 bool "Check for stack overflows"
61 depends on DEBUG_KERNEL 61 depends on DEBUG_KERNEL
62 help 62 ---help---
63 This option will cause messages to be printed if free stack space 63 This option will cause messages to be printed if free stack space
64 drops below a certain limit. 64 drops below a certain limit.
65 65
66config DEBUG_STACK_USAGE 66config DEBUG_STACK_USAGE
67 bool "Stack utilization instrumentation" 67 bool "Stack utilization instrumentation"
68 depends on DEBUG_KERNEL 68 depends on DEBUG_KERNEL
69 help 69 ---help---
70 Enables the display of the minimum amount of free stack which each 70 Enables the display of the minimum amount of free stack which each
71 task has ever had available in the sysrq-T and sysrq-P debug output. 71 task has ever had available in the sysrq-T and sysrq-P debug output.
72 72
@@ -75,7 +75,7 @@ config DEBUG_STACK_USAGE
75config DEBUG_PAGEALLOC 75config DEBUG_PAGEALLOC
76 bool "Debug page memory allocations" 76 bool "Debug page memory allocations"
77 depends on DEBUG_KERNEL 77 depends on DEBUG_KERNEL
78 help 78 ---help---
79 Unmap pages from the kernel linear mapping after free_pages(). 79 Unmap pages from the kernel linear mapping after free_pages().
80 This results in a large slowdown, but helps to find certain types 80 This results in a large slowdown, but helps to find certain types
81 of memory corruptions. 81 of memory corruptions.
@@ -83,9 +83,9 @@ config DEBUG_PAGEALLOC
83config DEBUG_PER_CPU_MAPS 83config DEBUG_PER_CPU_MAPS
84 bool "Debug access to per_cpu maps" 84 bool "Debug access to per_cpu maps"
85 depends on DEBUG_KERNEL 85 depends on DEBUG_KERNEL
86 depends on X86_SMP 86 depends on SMP
87 default n 87 default n
88 help 88 ---help---
89 Say Y to verify that the per_cpu map being accessed has 89 Say Y to verify that the per_cpu map being accessed has
90 been setup. Adds a fair amount of code to kernel memory 90 been setup. Adds a fair amount of code to kernel memory
91 and decreases performance. 91 and decreases performance.
@@ -96,7 +96,7 @@ config X86_PTDUMP
96 bool "Export kernel pagetable layout to userspace via debugfs" 96 bool "Export kernel pagetable layout to userspace via debugfs"
97 depends on DEBUG_KERNEL 97 depends on DEBUG_KERNEL
98 select DEBUG_FS 98 select DEBUG_FS
99 help 99 ---help---
100 Say Y here if you want to show the kernel pagetable layout in a 100 Say Y here if you want to show the kernel pagetable layout in a
101 debugfs file. This information is only useful for kernel developers 101 debugfs file. This information is only useful for kernel developers
102 who are working in architecture specific areas of the kernel. 102 who are working in architecture specific areas of the kernel.
@@ -108,7 +108,7 @@ config DEBUG_RODATA
108 bool "Write protect kernel read-only data structures" 108 bool "Write protect kernel read-only data structures"
109 default y 109 default y
110 depends on DEBUG_KERNEL 110 depends on DEBUG_KERNEL
111 help 111 ---help---
112 Mark the kernel read-only data as write-protected in the pagetables, 112 Mark the kernel read-only data as write-protected in the pagetables,
113 in order to catch accidental (and incorrect) writes to such const 113 in order to catch accidental (and incorrect) writes to such const
114 data. This is recommended so that we can catch kernel bugs sooner. 114 data. This is recommended so that we can catch kernel bugs sooner.
@@ -117,7 +117,8 @@ config DEBUG_RODATA
117config DEBUG_RODATA_TEST 117config DEBUG_RODATA_TEST
118 bool "Testcase for the DEBUG_RODATA feature" 118 bool "Testcase for the DEBUG_RODATA feature"
119 depends on DEBUG_RODATA 119 depends on DEBUG_RODATA
120 help 120 default y
121 ---help---
121 This option enables a testcase for the DEBUG_RODATA 122 This option enables a testcase for the DEBUG_RODATA
122 feature as well as for the change_page_attr() infrastructure. 123 feature as well as for the change_page_attr() infrastructure.
123 If in doubt, say "N" 124 If in doubt, say "N"
@@ -125,7 +126,7 @@ config DEBUG_RODATA_TEST
125config DEBUG_NX_TEST 126config DEBUG_NX_TEST
126 tristate "Testcase for the NX non-executable stack feature" 127 tristate "Testcase for the NX non-executable stack feature"
127 depends on DEBUG_KERNEL && m 128 depends on DEBUG_KERNEL && m
128 help 129 ---help---
129 This option enables a testcase for the CPU NX capability 130 This option enables a testcase for the CPU NX capability
130 and the software setup of this feature. 131 and the software setup of this feature.
131 If in doubt, say "N" 132 If in doubt, say "N"
@@ -133,7 +134,7 @@ config DEBUG_NX_TEST
133config 4KSTACKS 134config 4KSTACKS
134 bool "Use 4Kb for kernel stacks instead of 8Kb" 135 bool "Use 4Kb for kernel stacks instead of 8Kb"
135 depends on X86_32 136 depends on X86_32
136 help 137 ---help---
137 If you say Y here the kernel will use a 4Kb stacksize for the 138 If you say Y here the kernel will use a 4Kb stacksize for the
138 kernel stack attached to each process/thread. This facilitates 139 kernel stack attached to each process/thread. This facilitates
139 running more threads on a system and also reduces the pressure 140 running more threads on a system and also reduces the pressure
@@ -144,7 +145,7 @@ config DOUBLEFAULT
144 default y 145 default y
145 bool "Enable doublefault exception handler" if EMBEDDED 146 bool "Enable doublefault exception handler" if EMBEDDED
146 depends on X86_32 147 depends on X86_32
147 help 148 ---help---
148 This option allows trapping of rare doublefault exceptions that 149 This option allows trapping of rare doublefault exceptions that
149 would otherwise cause a system to silently reboot. Disabling this 150 would otherwise cause a system to silently reboot. Disabling this
150 option saves about 4k and might cause you much additional grey 151 option saves about 4k and might cause you much additional grey
@@ -154,7 +155,7 @@ config IOMMU_DEBUG
154 bool "Enable IOMMU debugging" 155 bool "Enable IOMMU debugging"
155 depends on GART_IOMMU && DEBUG_KERNEL 156 depends on GART_IOMMU && DEBUG_KERNEL
156 depends on X86_64 157 depends on X86_64
157 help 158 ---help---
158 Force the IOMMU to on even when you have less than 4GB of 159 Force the IOMMU to on even when you have less than 4GB of
159 memory and add debugging code. On overflow always panic. And 160 memory and add debugging code. On overflow always panic. And
160 allow to enable IOMMU leak tracing. Can be disabled at boot 161 allow to enable IOMMU leak tracing. Can be disabled at boot
@@ -170,7 +171,7 @@ config IOMMU_LEAK
170 bool "IOMMU leak tracing" 171 bool "IOMMU leak tracing"
171 depends on DEBUG_KERNEL 172 depends on DEBUG_KERNEL
172 depends on IOMMU_DEBUG 173 depends on IOMMU_DEBUG
173 help 174 ---help---
174 Add a simple leak tracer to the IOMMU code. This is useful when you 175 Add a simple leak tracer to the IOMMU code. This is useful when you
175 are debugging a buggy device driver that leaks IOMMU mappings. 176 are debugging a buggy device driver that leaks IOMMU mappings.
176 177
@@ -223,25 +224,25 @@ choice
223 224
224config IO_DELAY_0X80 225config IO_DELAY_0X80
225 bool "port 0x80 based port-IO delay [recommended]" 226 bool "port 0x80 based port-IO delay [recommended]"
226 help 227 ---help---
227 This is the traditional Linux IO delay used for in/out_p. 228 This is the traditional Linux IO delay used for in/out_p.
228 It is the most tested hence safest selection here. 229 It is the most tested hence safest selection here.
229 230
230config IO_DELAY_0XED 231config IO_DELAY_0XED
231 bool "port 0xed based port-IO delay" 232 bool "port 0xed based port-IO delay"
232 help 233 ---help---
233 Use port 0xed as the IO delay. This frees up port 0x80 which is 234 Use port 0xed as the IO delay. This frees up port 0x80 which is
234 often used as a hardware-debug port. 235 often used as a hardware-debug port.
235 236
236config IO_DELAY_UDELAY 237config IO_DELAY_UDELAY
237 bool "udelay based port-IO delay" 238 bool "udelay based port-IO delay"
238 help 239 ---help---
239 Use udelay(2) as the IO delay method. This provides the delay 240 Use udelay(2) as the IO delay method. This provides the delay
240 while not having any side-effect on the IO port space. 241 while not having any side-effect on the IO port space.
241 242
242config IO_DELAY_NONE 243config IO_DELAY_NONE
243 bool "no port-IO delay" 244 bool "no port-IO delay"
244 help 245 ---help---
245 No port-IO delay. Will break on old boxes that require port-IO 246 No port-IO delay. Will break on old boxes that require port-IO
246 delay for certain operations. Should work on most new machines. 247 delay for certain operations. Should work on most new machines.
247 248
@@ -275,18 +276,18 @@ config DEBUG_BOOT_PARAMS
275 bool "Debug boot parameters" 276 bool "Debug boot parameters"
276 depends on DEBUG_KERNEL 277 depends on DEBUG_KERNEL
277 depends on DEBUG_FS 278 depends on DEBUG_FS
278 help 279 ---help---
279 This option will cause struct boot_params to be exported via debugfs. 280 This option will cause struct boot_params to be exported via debugfs.
280 281
281config CPA_DEBUG 282config CPA_DEBUG
282 bool "CPA self-test code" 283 bool "CPA self-test code"
283 depends on DEBUG_KERNEL 284 depends on DEBUG_KERNEL
284 help 285 ---help---
285 Do change_page_attr() self-tests every 30 seconds. 286 Do change_page_attr() self-tests every 30 seconds.
286 287
287config OPTIMIZE_INLINING 288config OPTIMIZE_INLINING
288 bool "Allow gcc to uninline functions marked 'inline'" 289 bool "Allow gcc to uninline functions marked 'inline'"
289 help 290 ---help---
290 This option determines if the kernel forces gcc to inline the functions 291 This option determines if the kernel forces gcc to inline the functions
291 developers have marked 'inline'. Doing so takes away freedom from gcc to 292 developers have marked 'inline'. Doing so takes away freedom from gcc to
292 do what it thinks is best, which is desirable for the gcc 3.x series of 293 do what it thinks is best, which is desirable for the gcc 3.x series of
@@ -299,4 +300,3 @@ config OPTIMIZE_INLINING
299 If unsure, say N. 300 If unsure, say N.
300 301
301endmenu 302endmenu
302
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index d1a47adb5aec..99550c407990 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -73,7 +73,7 @@ else
73 73
74 stackp := $(CONFIG_SHELL) $(srctree)/scripts/gcc-x86_64-has-stack-protector.sh 74 stackp := $(CONFIG_SHELL) $(srctree)/scripts/gcc-x86_64-has-stack-protector.sh
75 stackp-$(CONFIG_CC_STACKPROTECTOR) := $(shell $(stackp) \ 75 stackp-$(CONFIG_CC_STACKPROTECTOR) := $(shell $(stackp) \
76 "$(CC)" -fstack-protector ) 76 "$(CC)" "-fstack-protector -DGCC_HAS_SP" )
77 stackp-$(CONFIG_CC_STACKPROTECTOR_ALL) += $(shell $(stackp) \ 77 stackp-$(CONFIG_CC_STACKPROTECTOR_ALL) += $(shell $(stackp) \
78 "$(CC)" -fstack-protector-all ) 78 "$(CC)" -fstack-protector-all )
79 79
@@ -102,29 +102,6 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
102# prevent gcc from generating any FP code by mistake 102# prevent gcc from generating any FP code by mistake
103KBUILD_CFLAGS += $(call cc-option,-mno-sse -mno-mmx -mno-sse2 -mno-3dnow,) 103KBUILD_CFLAGS += $(call cc-option,-mno-sse -mno-mmx -mno-sse2 -mno-3dnow,)
104 104
105###
106# Sub architecture support
107# fcore-y is linked before mcore-y files.
108
109# Default subarch .c files
110mcore-y := arch/x86/mach-default/
111
112# Voyager subarch support
113mflags-$(CONFIG_X86_VOYAGER) := -Iarch/x86/include/asm/mach-voyager
114mcore-$(CONFIG_X86_VOYAGER) := arch/x86/mach-voyager/
115
116# generic subarchitecture
117mflags-$(CONFIG_X86_GENERICARCH):= -Iarch/x86/include/asm/mach-generic
118fcore-$(CONFIG_X86_GENERICARCH) += arch/x86/mach-generic/
119mcore-$(CONFIG_X86_GENERICARCH) := arch/x86/mach-default/
120
121# default subarch .h files
122mflags-y += -Iarch/x86/include/asm/mach-default
123
124# 64 bit does not support subarch support - clear sub arch variables
125fcore-$(CONFIG_X86_64) :=
126mcore-$(CONFIG_X86_64) :=
127
128KBUILD_CFLAGS += $(mflags-y) 105KBUILD_CFLAGS += $(mflags-y)
129KBUILD_AFLAGS += $(mflags-y) 106KBUILD_AFLAGS += $(mflags-y)
130 107
@@ -150,9 +127,6 @@ core-$(CONFIG_LGUEST_GUEST) += arch/x86/lguest/
150core-y += arch/x86/kernel/ 127core-y += arch/x86/kernel/
151core-y += arch/x86/mm/ 128core-y += arch/x86/mm/
152 129
153# Remaining sub architecture files
154core-y += $(mcore-y)
155
156core-y += arch/x86/crypto/ 130core-y += arch/x86/crypto/
157core-y += arch/x86/vdso/ 131core-y += arch/x86/vdso/
158core-$(CONFIG_IA32_EMULATION) += arch/x86/ia32/ 132core-$(CONFIG_IA32_EMULATION) += arch/x86/ia32/
diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig
index edba00d98ac3..739bce993b56 100644
--- a/arch/x86/configs/i386_defconfig
+++ b/arch/x86/configs/i386_defconfig
@@ -188,7 +188,6 @@ CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
188CONFIG_SMP=y 188CONFIG_SMP=y
189CONFIG_X86_FIND_SMP_CONFIG=y 189CONFIG_X86_FIND_SMP_CONFIG=y
190CONFIG_X86_MPPARSE=y 190CONFIG_X86_MPPARSE=y
191CONFIG_X86_PC=y
192# CONFIG_X86_ELAN is not set 191# CONFIG_X86_ELAN is not set
193# CONFIG_X86_VOYAGER is not set 192# CONFIG_X86_VOYAGER is not set
194# CONFIG_X86_GENERICARCH is not set 193# CONFIG_X86_GENERICARCH is not set
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
index 322dd2748fc9..02b514e8f4c4 100644
--- a/arch/x86/configs/x86_64_defconfig
+++ b/arch/x86/configs/x86_64_defconfig
@@ -187,7 +187,6 @@ CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
187CONFIG_SMP=y 187CONFIG_SMP=y
188CONFIG_X86_FIND_SMP_CONFIG=y 188CONFIG_X86_FIND_SMP_CONFIG=y
189CONFIG_X86_MPPARSE=y 189CONFIG_X86_MPPARSE=y
190CONFIG_X86_PC=y
191# CONFIG_X86_ELAN is not set 190# CONFIG_X86_ELAN is not set
192# CONFIG_X86_VOYAGER is not set 191# CONFIG_X86_VOYAGER is not set
193# CONFIG_X86_GENERICARCH is not set 192# CONFIG_X86_GENERICARCH is not set
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
index 9dabd00e9805..dd77ac0cac46 100644
--- a/arch/x86/ia32/ia32_signal.c
+++ b/arch/x86/ia32/ia32_signal.c
@@ -46,78 +46,83 @@ void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
46 46
47int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from) 47int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
48{ 48{
49 int err; 49 int err = 0;
50 50
51 if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t))) 51 if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
52 return -EFAULT; 52 return -EFAULT;
53 53
54 /* If you change siginfo_t structure, please make sure that 54 put_user_try {
55 this code is fixed accordingly. 55 /* If you change siginfo_t structure, please make sure that
56 It should never copy any pad contained in the structure 56 this code is fixed accordingly.
57 to avoid security leaks, but must copy the generic 57 It should never copy any pad contained in the structure
58 3 ints plus the relevant union member. */ 58 to avoid security leaks, but must copy the generic
59 err = __put_user(from->si_signo, &to->si_signo); 59 3 ints plus the relevant union member. */
60 err |= __put_user(from->si_errno, &to->si_errno); 60 put_user_ex(from->si_signo, &to->si_signo);
61 err |= __put_user((short)from->si_code, &to->si_code); 61 put_user_ex(from->si_errno, &to->si_errno);
62 62 put_user_ex((short)from->si_code, &to->si_code);
63 if (from->si_code < 0) { 63
64 err |= __put_user(from->si_pid, &to->si_pid); 64 if (from->si_code < 0) {
65 err |= __put_user(from->si_uid, &to->si_uid); 65 put_user_ex(from->si_pid, &to->si_pid);
66 err |= __put_user(ptr_to_compat(from->si_ptr), &to->si_ptr); 66 put_user_ex(from->si_uid, &to->si_uid);
67 } else { 67 put_user_ex(ptr_to_compat(from->si_ptr), &to->si_ptr);
68 /* 68 } else {
69 * First 32bits of unions are always present: 69 /*
70 * si_pid === si_band === si_tid === si_addr(LS half) 70 * First 32bits of unions are always present:
71 */ 71 * si_pid === si_band === si_tid === si_addr(LS half)
72 err |= __put_user(from->_sifields._pad[0], 72 */
73 &to->_sifields._pad[0]); 73 put_user_ex(from->_sifields._pad[0],
74 switch (from->si_code >> 16) { 74 &to->_sifields._pad[0]);
75 case __SI_FAULT >> 16: 75 switch (from->si_code >> 16) {
76 break; 76 case __SI_FAULT >> 16:
77 case __SI_CHLD >> 16: 77 break;
78 err |= __put_user(from->si_utime, &to->si_utime); 78 case __SI_CHLD >> 16:
79 err |= __put_user(from->si_stime, &to->si_stime); 79 put_user_ex(from->si_utime, &to->si_utime);
80 err |= __put_user(from->si_status, &to->si_status); 80 put_user_ex(from->si_stime, &to->si_stime);
81 /* FALL THROUGH */ 81 put_user_ex(from->si_status, &to->si_status);
82 default: 82 /* FALL THROUGH */
83 case __SI_KILL >> 16: 83 default:
84 err |= __put_user(from->si_uid, &to->si_uid); 84 case __SI_KILL >> 16:
85 break; 85 put_user_ex(from->si_uid, &to->si_uid);
86 case __SI_POLL >> 16: 86 break;
87 err |= __put_user(from->si_fd, &to->si_fd); 87 case __SI_POLL >> 16:
88 break; 88 put_user_ex(from->si_fd, &to->si_fd);
89 case __SI_TIMER >> 16: 89 break;
90 err |= __put_user(from->si_overrun, &to->si_overrun); 90 case __SI_TIMER >> 16:
91 err |= __put_user(ptr_to_compat(from->si_ptr), 91 put_user_ex(from->si_overrun, &to->si_overrun);
92 &to->si_ptr); 92 put_user_ex(ptr_to_compat(from->si_ptr),
93 break; 93 &to->si_ptr);
94 /* This is not generated by the kernel as of now. */ 94 break;
95 case __SI_RT >> 16: 95 /* This is not generated by the kernel as of now. */
96 case __SI_MESGQ >> 16: 96 case __SI_RT >> 16:
97 err |= __put_user(from->si_uid, &to->si_uid); 97 case __SI_MESGQ >> 16:
98 err |= __put_user(from->si_int, &to->si_int); 98 put_user_ex(from->si_uid, &to->si_uid);
99 break; 99 put_user_ex(from->si_int, &to->si_int);
100 break;
101 }
100 } 102 }
101 } 103 } put_user_catch(err);
104
102 return err; 105 return err;
103} 106}
104 107
105int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) 108int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
106{ 109{
107 int err; 110 int err = 0;
108 u32 ptr32; 111 u32 ptr32;
109 112
110 if (!access_ok(VERIFY_READ, from, sizeof(compat_siginfo_t))) 113 if (!access_ok(VERIFY_READ, from, sizeof(compat_siginfo_t)))
111 return -EFAULT; 114 return -EFAULT;
112 115
113 err = __get_user(to->si_signo, &from->si_signo); 116 get_user_try {
114 err |= __get_user(to->si_errno, &from->si_errno); 117 get_user_ex(to->si_signo, &from->si_signo);
115 err |= __get_user(to->si_code, &from->si_code); 118 get_user_ex(to->si_errno, &from->si_errno);
119 get_user_ex(to->si_code, &from->si_code);
116 120
117 err |= __get_user(to->si_pid, &from->si_pid); 121 get_user_ex(to->si_pid, &from->si_pid);
118 err |= __get_user(to->si_uid, &from->si_uid); 122 get_user_ex(to->si_uid, &from->si_uid);
119 err |= __get_user(ptr32, &from->si_ptr); 123 get_user_ex(ptr32, &from->si_ptr);
120 to->si_ptr = compat_ptr(ptr32); 124 to->si_ptr = compat_ptr(ptr32);
125 } get_user_catch(err);
121 126
122 return err; 127 return err;
123} 128}
@@ -142,17 +147,23 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
142 struct pt_regs *regs) 147 struct pt_regs *regs)
143{ 148{
144 stack_t uss, uoss; 149 stack_t uss, uoss;
145 int ret; 150 int ret, err = 0;
146 mm_segment_t seg; 151 mm_segment_t seg;
147 152
148 if (uss_ptr) { 153 if (uss_ptr) {
149 u32 ptr; 154 u32 ptr;
150 155
151 memset(&uss, 0, sizeof(stack_t)); 156 memset(&uss, 0, sizeof(stack_t));
152 if (!access_ok(VERIFY_READ, uss_ptr, sizeof(stack_ia32_t)) || 157 if (!access_ok(VERIFY_READ, uss_ptr, sizeof(stack_ia32_t)))
153 __get_user(ptr, &uss_ptr->ss_sp) || 158 return -EFAULT;
154 __get_user(uss.ss_flags, &uss_ptr->ss_flags) || 159
155 __get_user(uss.ss_size, &uss_ptr->ss_size)) 160 get_user_try {
161 get_user_ex(ptr, &uss_ptr->ss_sp);
162 get_user_ex(uss.ss_flags, &uss_ptr->ss_flags);
163 get_user_ex(uss.ss_size, &uss_ptr->ss_size);
164 } get_user_catch(err);
165
166 if (err)
156 return -EFAULT; 167 return -EFAULT;
157 uss.ss_sp = compat_ptr(ptr); 168 uss.ss_sp = compat_ptr(ptr);
158 } 169 }
@@ -161,10 +172,16 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
161 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp); 172 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
162 set_fs(seg); 173 set_fs(seg);
163 if (ret >= 0 && uoss_ptr) { 174 if (ret >= 0 && uoss_ptr) {
164 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)) || 175 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
165 __put_user(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp) || 176 return -EFAULT;
166 __put_user(uoss.ss_flags, &uoss_ptr->ss_flags) || 177
167 __put_user(uoss.ss_size, &uoss_ptr->ss_size)) 178 put_user_try {
179 put_user_ex(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp);
180 put_user_ex(uoss.ss_flags, &uoss_ptr->ss_flags);
181 put_user_ex(uoss.ss_size, &uoss_ptr->ss_size);
182 } put_user_catch(err);
183
184 if (err)
168 ret = -EFAULT; 185 ret = -EFAULT;
169 } 186 }
170 return ret; 187 return ret;
@@ -174,18 +191,18 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
174 * Do a signal return; undo the signal stack. 191 * Do a signal return; undo the signal stack.
175 */ 192 */
176#define COPY(x) { \ 193#define COPY(x) { \
177 err |= __get_user(regs->x, &sc->x); \ 194 get_user_ex(regs->x, &sc->x); \
178} 195}
179 196
180#define COPY_SEG_CPL3(seg) { \ 197#define COPY_SEG_CPL3(seg) { \
181 unsigned short tmp; \ 198 unsigned short tmp; \
182 err |= __get_user(tmp, &sc->seg); \ 199 get_user_ex(tmp, &sc->seg); \
183 regs->seg = tmp | 3; \ 200 regs->seg = tmp | 3; \
184} 201}
185 202
186#define RELOAD_SEG(seg) { \ 203#define RELOAD_SEG(seg) { \
187 unsigned int cur, pre; \ 204 unsigned int cur, pre; \
188 err |= __get_user(pre, &sc->seg); \ 205 get_user_ex(pre, &sc->seg); \
189 savesegment(seg, cur); \ 206 savesegment(seg, cur); \
190 pre |= 3; \ 207 pre |= 3; \
191 if (pre != cur) \ 208 if (pre != cur) \
@@ -209,39 +226,42 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
209 sc, sc->err, sc->ip, sc->cs, sc->flags); 226 sc, sc->err, sc->ip, sc->cs, sc->flags);
210#endif 227#endif
211 228
212 /* 229 get_user_try {
213 * Reload fs and gs if they have changed in the signal 230 /*
214 * handler. This does not handle long fs/gs base changes in 231 * Reload fs and gs if they have changed in the signal
215 * the handler, but does not clobber them at least in the 232 * handler. This does not handle long fs/gs base changes in
216 * normal case. 233 * the handler, but does not clobber them at least in the
217 */ 234 * normal case.
218 err |= __get_user(gs, &sc->gs); 235 */
219 gs |= 3; 236 get_user_ex(gs, &sc->gs);
220 savesegment(gs, oldgs); 237 gs |= 3;
221 if (gs != oldgs) 238 savesegment(gs, oldgs);
222 load_gs_index(gs); 239 if (gs != oldgs)
223 240 load_gs_index(gs);
224 RELOAD_SEG(fs); 241
225 RELOAD_SEG(ds); 242 RELOAD_SEG(fs);
226 RELOAD_SEG(es); 243 RELOAD_SEG(ds);
227 244 RELOAD_SEG(es);
228 COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx); 245
229 COPY(dx); COPY(cx); COPY(ip); 246 COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
230 /* Don't touch extended registers */ 247 COPY(dx); COPY(cx); COPY(ip);
231 248 /* Don't touch extended registers */
232 COPY_SEG_CPL3(cs); 249
233 COPY_SEG_CPL3(ss); 250 COPY_SEG_CPL3(cs);
234 251 COPY_SEG_CPL3(ss);
235 err |= __get_user(tmpflags, &sc->flags); 252
236 regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS); 253 get_user_ex(tmpflags, &sc->flags);
237 /* disable syscall checks */ 254 regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
238 regs->orig_ax = -1; 255 /* disable syscall checks */
239 256 regs->orig_ax = -1;
240 err |= __get_user(tmp, &sc->fpstate); 257
241 buf = compat_ptr(tmp); 258 get_user_ex(tmp, &sc->fpstate);
242 err |= restore_i387_xstate_ia32(buf); 259 buf = compat_ptr(tmp);
243 260 err |= restore_i387_xstate_ia32(buf);
244 err |= __get_user(*pax, &sc->ax); 261
262 get_user_ex(*pax, &sc->ax);
263 } get_user_catch(err);
264
245 return err; 265 return err;
246} 266}
247 267
@@ -319,36 +339,38 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
319{ 339{
320 int tmp, err = 0; 340 int tmp, err = 0;
321 341
322 savesegment(gs, tmp); 342 put_user_try {
323 err |= __put_user(tmp, (unsigned int __user *)&sc->gs); 343 savesegment(gs, tmp);
324 savesegment(fs, tmp); 344 put_user_ex(tmp, (unsigned int __user *)&sc->gs);
325 err |= __put_user(tmp, (unsigned int __user *)&sc->fs); 345 savesegment(fs, tmp);
326 savesegment(ds, tmp); 346 put_user_ex(tmp, (unsigned int __user *)&sc->fs);
327 err |= __put_user(tmp, (unsigned int __user *)&sc->ds); 347 savesegment(ds, tmp);
328 savesegment(es, tmp); 348 put_user_ex(tmp, (unsigned int __user *)&sc->ds);
329 err |= __put_user(tmp, (unsigned int __user *)&sc->es); 349 savesegment(es, tmp);
330 350 put_user_ex(tmp, (unsigned int __user *)&sc->es);
331 err |= __put_user(regs->di, &sc->di); 351
332 err |= __put_user(regs->si, &sc->si); 352 put_user_ex(regs->di, &sc->di);
333 err |= __put_user(regs->bp, &sc->bp); 353 put_user_ex(regs->si, &sc->si);
334 err |= __put_user(regs->sp, &sc->sp); 354 put_user_ex(regs->bp, &sc->bp);
335 err |= __put_user(regs->bx, &sc->bx); 355 put_user_ex(regs->sp, &sc->sp);
336 err |= __put_user(regs->dx, &sc->dx); 356 put_user_ex(regs->bx, &sc->bx);
337 err |= __put_user(regs->cx, &sc->cx); 357 put_user_ex(regs->dx, &sc->dx);
338 err |= __put_user(regs->ax, &sc->ax); 358 put_user_ex(regs->cx, &sc->cx);
339 err |= __put_user(current->thread.trap_no, &sc->trapno); 359 put_user_ex(regs->ax, &sc->ax);
340 err |= __put_user(current->thread.error_code, &sc->err); 360 put_user_ex(current->thread.trap_no, &sc->trapno);
341 err |= __put_user(regs->ip, &sc->ip); 361 put_user_ex(current->thread.error_code, &sc->err);
342 err |= __put_user(regs->cs, (unsigned int __user *)&sc->cs); 362 put_user_ex(regs->ip, &sc->ip);
343 err |= __put_user(regs->flags, &sc->flags); 363 put_user_ex(regs->cs, (unsigned int __user *)&sc->cs);
344 err |= __put_user(regs->sp, &sc->sp_at_signal); 364 put_user_ex(regs->flags, &sc->flags);
345 err |= __put_user(regs->ss, (unsigned int __user *)&sc->ss); 365 put_user_ex(regs->sp, &sc->sp_at_signal);
346 366 put_user_ex(regs->ss, (unsigned int __user *)&sc->ss);
347 err |= __put_user(ptr_to_compat(fpstate), &sc->fpstate); 367
348 368 put_user_ex(ptr_to_compat(fpstate), &sc->fpstate);
349 /* non-iBCS2 extensions.. */ 369
350 err |= __put_user(mask, &sc->oldmask); 370 /* non-iBCS2 extensions.. */
351 err |= __put_user(current->thread.cr2, &sc->cr2); 371 put_user_ex(mask, &sc->oldmask);
372 put_user_ex(current->thread.cr2, &sc->cr2);
373 } put_user_catch(err);
352 374
353 return err; 375 return err;
354} 376}
@@ -437,13 +459,17 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
437 else 459 else
438 restorer = &frame->retcode; 460 restorer = &frame->retcode;
439 } 461 }
440 err |= __put_user(ptr_to_compat(restorer), &frame->pretcode);
441 462
442 /* 463 put_user_try {
443 * These are actually not used anymore, but left because some 464 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
444 * gdb versions depend on them as a marker. 465
445 */ 466 /*
446 err |= __put_user(*((u64 *)&code), (u64 *)frame->retcode); 467 * These are actually not used anymore, but left because some
468 * gdb versions depend on them as a marker.
469 */
470 put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
471 } put_user_catch(err);
472
447 if (err) 473 if (err)
448 return -EFAULT; 474 return -EFAULT;
449 475
@@ -496,41 +522,40 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
496 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) 522 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
497 return -EFAULT; 523 return -EFAULT;
498 524
499 err |= __put_user(sig, &frame->sig); 525 put_user_try {
500 err |= __put_user(ptr_to_compat(&frame->info), &frame->pinfo); 526 put_user_ex(sig, &frame->sig);
501 err |= __put_user(ptr_to_compat(&frame->uc), &frame->puc); 527 put_user_ex(ptr_to_compat(&frame->info), &frame->pinfo);
502 err |= copy_siginfo_to_user32(&frame->info, info); 528 put_user_ex(ptr_to_compat(&frame->uc), &frame->puc);
503 if (err) 529 err |= copy_siginfo_to_user32(&frame->info, info);
504 return -EFAULT;
505 530
506 /* Create the ucontext. */ 531 /* Create the ucontext. */
507 if (cpu_has_xsave) 532 if (cpu_has_xsave)
508 err |= __put_user(UC_FP_XSTATE, &frame->uc.uc_flags); 533 put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags);
509 else 534 else
510 err |= __put_user(0, &frame->uc.uc_flags); 535 put_user_ex(0, &frame->uc.uc_flags);
511 err |= __put_user(0, &frame->uc.uc_link); 536 put_user_ex(0, &frame->uc.uc_link);
512 err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); 537 put_user_ex(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
513 err |= __put_user(sas_ss_flags(regs->sp), 538 put_user_ex(sas_ss_flags(regs->sp),
514 &frame->uc.uc_stack.ss_flags); 539 &frame->uc.uc_stack.ss_flags);
515 err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); 540 put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
516 err |= ia32_setup_sigcontext(&frame->uc.uc_mcontext, fpstate, 541 err |= ia32_setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
517 regs, set->sig[0]); 542 regs, set->sig[0]);
518 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); 543 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
519 if (err) 544
520 return -EFAULT; 545 if (ka->sa.sa_flags & SA_RESTORER)
546 restorer = ka->sa.sa_restorer;
547 else
548 restorer = VDSO32_SYMBOL(current->mm->context.vdso,
549 rt_sigreturn);
550 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
551
552 /*
553 * Not actually used anymore, but left because some gdb
554 * versions need it.
555 */
556 put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
557 } put_user_catch(err);
521 558
522 if (ka->sa.sa_flags & SA_RESTORER)
523 restorer = ka->sa.sa_restorer;
524 else
525 restorer = VDSO32_SYMBOL(current->mm->context.vdso,
526 rt_sigreturn);
527 err |= __put_user(ptr_to_compat(restorer), &frame->pretcode);
528
529 /*
530 * Not actually used anymore, but left because some gdb
531 * versions need it.
532 */
533 err |= __put_user(*((u64 *)&code), (u64 *)frame->retcode);
534 if (err) 559 if (err)
535 return -EFAULT; 560 return -EFAULT;
536 561
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index 5a0d76dc56a4..097a6b64c24d 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -112,8 +112,8 @@ ENTRY(ia32_sysenter_target)
112 CFI_DEF_CFA rsp,0 112 CFI_DEF_CFA rsp,0
113 CFI_REGISTER rsp,rbp 113 CFI_REGISTER rsp,rbp
114 SWAPGS_UNSAFE_STACK 114 SWAPGS_UNSAFE_STACK
115 movq %gs:pda_kernelstack, %rsp 115 movq PER_CPU_VAR(kernel_stack), %rsp
116 addq $(PDA_STACKOFFSET),%rsp 116 addq $(KERNEL_STACK_OFFSET),%rsp
117 /* 117 /*
118 * No need to follow this irqs on/off section: the syscall 118 * No need to follow this irqs on/off section: the syscall
119 * disabled irqs, here we enable it straight after entry: 119 * disabled irqs, here we enable it straight after entry:
@@ -273,13 +273,13 @@ ENDPROC(ia32_sysenter_target)
273ENTRY(ia32_cstar_target) 273ENTRY(ia32_cstar_target)
274 CFI_STARTPROC32 simple 274 CFI_STARTPROC32 simple
275 CFI_SIGNAL_FRAME 275 CFI_SIGNAL_FRAME
276 CFI_DEF_CFA rsp,PDA_STACKOFFSET 276 CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
277 CFI_REGISTER rip,rcx 277 CFI_REGISTER rip,rcx
278 /*CFI_REGISTER rflags,r11*/ 278 /*CFI_REGISTER rflags,r11*/
279 SWAPGS_UNSAFE_STACK 279 SWAPGS_UNSAFE_STACK
280 movl %esp,%r8d 280 movl %esp,%r8d
281 CFI_REGISTER rsp,r8 281 CFI_REGISTER rsp,r8
282 movq %gs:pda_kernelstack,%rsp 282 movq PER_CPU_VAR(kernel_stack),%rsp
283 /* 283 /*
284 * No need to follow this irqs on/off section: the syscall 284 * No need to follow this irqs on/off section: the syscall
285 * disabled irqs and here we enable it straight after entry: 285 * disabled irqs and here we enable it straight after entry:
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index ab1d51a8855e..fba49f66228f 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -33,7 +33,13 @@
33 } while (0) 33 } while (0)
34 34
35 35
36#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
36extern void generic_apic_probe(void); 37extern void generic_apic_probe(void);
38#else
39static inline void generic_apic_probe(void)
40{
41}
42#endif
37 43
38#ifdef CONFIG_X86_LOCAL_APIC 44#ifdef CONFIG_X86_LOCAL_APIC
39 45
@@ -41,6 +47,21 @@ extern unsigned int apic_verbosity;
41extern int local_apic_timer_c2_ok; 47extern int local_apic_timer_c2_ok;
42 48
43extern int disable_apic; 49extern int disable_apic;
50
51#ifdef CONFIG_SMP
52extern void __inquire_remote_apic(int apicid);
53#else /* CONFIG_SMP */
54static inline void __inquire_remote_apic(int apicid)
55{
56}
57#endif /* CONFIG_SMP */
58
59static inline void default_inquire_remote_apic(int apicid)
60{
61 if (apic_verbosity >= APIC_DEBUG)
62 __inquire_remote_apic(apicid);
63}
64
44/* 65/*
45 * Basic functions accessing APICs. 66 * Basic functions accessing APICs.
46 */ 67 */
@@ -124,12 +145,35 @@ struct apic_ops {
124 145
125extern struct apic_ops *apic_ops; 146extern struct apic_ops *apic_ops;
126 147
127#define apic_read (apic_ops->read) 148static inline u32 apic_read(u32 reg)
128#define apic_write (apic_ops->write) 149{
129#define apic_icr_read (apic_ops->icr_read) 150 return apic_ops->read(reg);
130#define apic_icr_write (apic_ops->icr_write) 151}
131#define apic_wait_icr_idle (apic_ops->wait_icr_idle) 152
132#define safe_apic_wait_icr_idle (apic_ops->safe_wait_icr_idle) 153static inline void apic_write(u32 reg, u32 val)
154{
155 apic_ops->write(reg, val);
156}
157
158static inline u64 apic_icr_read(void)
159{
160 return apic_ops->icr_read();
161}
162
163static inline void apic_icr_write(u32 low, u32 high)
164{
165 apic_ops->icr_write(low, high);
166}
167
168static inline void apic_wait_icr_idle(void)
169{
170 apic_ops->wait_icr_idle();
171}
172
173static inline u32 safe_apic_wait_icr_idle(void)
174{
175 return apic_ops->safe_wait_icr_idle();
176}
133 177
134extern int get_physical_broadcast(void); 178extern int get_physical_broadcast(void);
135 179
@@ -196,4 +240,22 @@ static inline void disable_local_APIC(void) { }
196 240
197#endif /* !CONFIG_X86_LOCAL_APIC */ 241#endif /* !CONFIG_X86_LOCAL_APIC */
198 242
243#ifdef CONFIG_X86_64
244#define SET_APIC_ID(x) (apic->set_apic_id(x))
245#else
246
247#ifdef CONFIG_X86_LOCAL_APIC
248static inline unsigned default_get_apic_id(unsigned long x)
249{
250 unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR));
251
252 if (APIC_XAPIC(ver))
253 return (x >> 24) & 0xFF;
254 else
255 return (x >> 24) & 0x0F;
256}
257#endif
258
259#endif
260
199#endif /* _ASM_X86_APIC_H */ 261#endif /* _ASM_X86_APIC_H */
diff --git a/arch/x86/include/asm/apicnum.h b/arch/x86/include/asm/apicnum.h
new file mode 100644
index 000000000000..82f613c607ce
--- /dev/null
+++ b/arch/x86/include/asm/apicnum.h
@@ -0,0 +1,12 @@
1#ifndef _ASM_X86_APICNUM_H
2#define _ASM_X86_APICNUM_H
3
4/* define MAX_IO_APICS */
5#ifdef CONFIG_X86_32
6# define MAX_IO_APICS 64
7#else
8# define MAX_IO_APICS 128
9# define MAX_LOCAL_APIC 32768
10#endif
11
12#endif /* _ASM_X86_APICNUM_H */
diff --git a/arch/x86/include/asm/mach-default/apm.h b/arch/x86/include/asm/apm.h
index 20370c6db74b..20370c6db74b 100644
--- a/arch/x86/include/asm/mach-default/apm.h
+++ b/arch/x86/include/asm/apm.h
diff --git a/arch/x86/include/asm/bigsmp/apic.h b/arch/x86/include/asm/bigsmp/apic.h
deleted file mode 100644
index d8dd9f537911..000000000000
--- a/arch/x86/include/asm/bigsmp/apic.h
+++ /dev/null
@@ -1,155 +0,0 @@
1#ifndef __ASM_MACH_APIC_H
2#define __ASM_MACH_APIC_H
3
4#define xapic_phys_to_log_apicid(cpu) (per_cpu(x86_bios_cpu_apicid, cpu))
5#define esr_disable (1)
6
7static inline int apic_id_registered(void)
8{
9 return (1);
10}
11
12static inline const cpumask_t *target_cpus(void)
13{
14#ifdef CONFIG_SMP
15 return &cpu_online_map;
16#else
17 return &cpumask_of_cpu(0);
18#endif
19}
20
21#undef APIC_DEST_LOGICAL
22#define APIC_DEST_LOGICAL 0
23#define APIC_DFR_VALUE (APIC_DFR_FLAT)
24#define INT_DELIVERY_MODE (dest_Fixed)
25#define INT_DEST_MODE (0) /* phys delivery to target proc */
26#define NO_BALANCE_IRQ (0)
27
28static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
29{
30 return (0);
31}
32
33static inline unsigned long check_apicid_present(int bit)
34{
35 return (1);
36}
37
38static inline unsigned long calculate_ldr(int cpu)
39{
40 unsigned long val, id;
41 val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
42 id = xapic_phys_to_log_apicid(cpu);
43 val |= SET_APIC_LOGICAL_ID(id);
44 return val;
45}
46
47/*
48 * Set up the logical destination ID.
49 *
50 * Intel recommends to set DFR, LDR and TPR before enabling
51 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
52 * document number 292116). So here it goes...
53 */
54static inline void init_apic_ldr(void)
55{
56 unsigned long val;
57 int cpu = smp_processor_id();
58
59 apic_write(APIC_DFR, APIC_DFR_VALUE);
60 val = calculate_ldr(cpu);
61 apic_write(APIC_LDR, val);
62}
63
64static inline void setup_apic_routing(void)
65{
66 printk("Enabling APIC mode: %s. Using %d I/O APICs\n",
67 "Physflat", nr_ioapics);
68}
69
70static inline int multi_timer_check(int apic, int irq)
71{
72 return (0);
73}
74
75static inline int apicid_to_node(int logical_apicid)
76{
77 return apicid_2_node[hard_smp_processor_id()];
78}
79
80static inline int cpu_present_to_apicid(int mps_cpu)
81{
82 if (mps_cpu < nr_cpu_ids)
83 return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
84
85 return BAD_APICID;
86}
87
88static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
89{
90 return physid_mask_of_physid(phys_apicid);
91}
92
93extern u8 cpu_2_logical_apicid[];
94/* Mapping from cpu number to logical apicid */
95static inline int cpu_to_logical_apicid(int cpu)
96{
97 if (cpu >= nr_cpu_ids)
98 return BAD_APICID;
99 return cpu_physical_id(cpu);
100}
101
102static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
103{
104 /* For clustered we don't have a good way to do this yet - hack */
105 return physids_promote(0xFFL);
106}
107
108static inline void setup_portio_remap(void)
109{
110}
111
112static inline void enable_apic_mode(void)
113{
114}
115
116static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
117{
118 return (1);
119}
120
121/* As we are using single CPU as destination, pick only one CPU here */
122static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
123{
124 int cpu;
125 int apicid;
126
127 cpu = first_cpu(*cpumask);
128 apicid = cpu_to_logical_apicid(cpu);
129 return apicid;
130}
131
132static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
133 const struct cpumask *andmask)
134{
135 int cpu;
136
137 /*
138 * We're using fixed IRQ delivery, can only return one phys APIC ID.
139 * May as well be the first.
140 */
141 for_each_cpu_and(cpu, cpumask, andmask)
142 if (cpumask_test_cpu(cpu, cpu_online_mask))
143 break;
144 if (cpu < nr_cpu_ids)
145 return cpu_to_logical_apicid(cpu);
146
147 return BAD_APICID;
148}
149
150static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
151{
152 return cpuid_apic >> index_msb;
153}
154
155#endif /* __ASM_MACH_APIC_H */
diff --git a/arch/x86/include/asm/bigsmp/apicdef.h b/arch/x86/include/asm/bigsmp/apicdef.h
deleted file mode 100644
index 392c3f5ef2fe..000000000000
--- a/arch/x86/include/asm/bigsmp/apicdef.h
+++ /dev/null
@@ -1,13 +0,0 @@
1#ifndef __ASM_MACH_APICDEF_H
2#define __ASM_MACH_APICDEF_H
3
4#define APIC_ID_MASK (0xFF<<24)
5
6static inline unsigned get_apic_id(unsigned long x)
7{
8 return (((x)>>24)&0xFF);
9}
10
11#define GET_APIC_ID(x) get_apic_id(x)
12
13#endif
diff --git a/arch/x86/include/asm/bigsmp/ipi.h b/arch/x86/include/asm/bigsmp/ipi.h
deleted file mode 100644
index 27fcd01b3ae6..000000000000
--- a/arch/x86/include/asm/bigsmp/ipi.h
+++ /dev/null
@@ -1,22 +0,0 @@
1#ifndef __ASM_MACH_IPI_H
2#define __ASM_MACH_IPI_H
3
4void send_IPI_mask_sequence(const struct cpumask *mask, int vector);
5void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
6
7static inline void send_IPI_mask(const struct cpumask *mask, int vector)
8{
9 send_IPI_mask_sequence(mask, vector);
10}
11
12static inline void send_IPI_allbutself(int vector)
13{
14 send_IPI_mask_allbutself(cpu_online_mask, vector);
15}
16
17static inline void send_IPI_all(int vector)
18{
19 send_IPI_mask(cpu_online_mask, vector);
20}
21
22#endif /* __ASM_MACH_IPI_H */
diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h
index bae482df6039..b185091bf19c 100644
--- a/arch/x86/include/asm/cpu.h
+++ b/arch/x86/include/asm/cpu.h
@@ -7,6 +7,20 @@
7#include <linux/nodemask.h> 7#include <linux/nodemask.h>
8#include <linux/percpu.h> 8#include <linux/percpu.h>
9 9
10#ifdef CONFIG_SMP
11
12extern void prefill_possible_map(void);
13
14#else /* CONFIG_SMP */
15
16static inline void prefill_possible_map(void) {}
17
18#define cpu_physical_id(cpu) boot_cpu_physical_apicid
19#define safe_smp_processor_id() 0
20#define stack_smp_processor_id() 0
21
22#endif /* CONFIG_SMP */
23
10struct x86_cpu { 24struct x86_cpu {
11 struct cpu cpu; 25 struct cpu cpu;
12}; 26};
@@ -17,4 +31,7 @@ extern void arch_unregister_cpu(int);
17#endif 31#endif
18 32
19DECLARE_PER_CPU(int, cpu_state); 33DECLARE_PER_CPU(int, cpu_state);
34
35extern unsigned int boot_cpu_id;
36
20#endif /* _ASM_X86_CPU_H */ 37#endif /* _ASM_X86_CPU_H */
diff --git a/arch/x86/include/asm/cpumask.h b/arch/x86/include/asm/cpumask.h
new file mode 100644
index 000000000000..a7f3c75f8ad7
--- /dev/null
+++ b/arch/x86/include/asm/cpumask.h
@@ -0,0 +1,32 @@
1#ifndef _ASM_X86_CPUMASK_H
2#define _ASM_X86_CPUMASK_H
3#ifndef __ASSEMBLY__
4#include <linux/cpumask.h>
5
6#ifdef CONFIG_X86_64
7
8extern cpumask_var_t cpu_callin_mask;
9extern cpumask_var_t cpu_callout_mask;
10extern cpumask_var_t cpu_initialized_mask;
11extern cpumask_var_t cpu_sibling_setup_mask;
12
13extern void setup_cpu_local_masks(void);
14
15#else /* CONFIG_X86_32 */
16
17extern cpumask_t cpu_callin_map;
18extern cpumask_t cpu_callout_map;
19extern cpumask_t cpu_initialized;
20extern cpumask_t cpu_sibling_setup_map;
21
22#define cpu_callin_mask ((struct cpumask *)&cpu_callin_map)
23#define cpu_callout_mask ((struct cpumask *)&cpu_callout_map)
24#define cpu_initialized_mask ((struct cpumask *)&cpu_initialized)
25#define cpu_sibling_setup_mask ((struct cpumask *)&cpu_sibling_setup_map)
26
27static inline void setup_cpu_local_masks(void) { }
28
29#endif /* CONFIG_X86_32 */
30
31#endif /* __ASSEMBLY__ */
32#endif /* _ASM_X86_CPUMASK_H */
diff --git a/arch/x86/include/asm/current.h b/arch/x86/include/asm/current.h
index 0930b4f8d672..c68c361697e1 100644
--- a/arch/x86/include/asm/current.h
+++ b/arch/x86/include/asm/current.h
@@ -1,39 +1,21 @@
1#ifndef _ASM_X86_CURRENT_H 1#ifndef _ASM_X86_CURRENT_H
2#define _ASM_X86_CURRENT_H 2#define _ASM_X86_CURRENT_H
3 3
4#ifdef CONFIG_X86_32
5#include <linux/compiler.h> 4#include <linux/compiler.h>
6#include <asm/percpu.h> 5#include <asm/percpu.h>
7 6
7#ifndef __ASSEMBLY__
8struct task_struct; 8struct task_struct;
9 9
10DECLARE_PER_CPU(struct task_struct *, current_task); 10DECLARE_PER_CPU(struct task_struct *, current_task);
11static __always_inline struct task_struct *get_current(void)
12{
13 return x86_read_percpu(current_task);
14}
15
16#else /* X86_32 */
17
18#ifndef __ASSEMBLY__
19#include <asm/pda.h>
20
21struct task_struct;
22 11
23static __always_inline struct task_struct *get_current(void) 12static __always_inline struct task_struct *get_current(void)
24{ 13{
25 return read_pda(pcurrent); 14 return percpu_read(current_task);
26} 15}
27 16
28#else /* __ASSEMBLY__ */ 17#define current get_current()
29
30#include <asm/asm-offsets.h>
31#define GET_CURRENT(reg) movq %gs:(pda_pcurrent),reg
32 18
33#endif /* __ASSEMBLY__ */ 19#endif /* __ASSEMBLY__ */
34 20
35#endif /* X86_32 */
36
37#define current get_current()
38
39#endif /* _ASM_X86_CURRENT_H */ 21#endif /* _ASM_X86_CURRENT_H */
diff --git a/arch/x86/include/asm/mach-default/do_timer.h b/arch/x86/include/asm/do_timer.h
index 23ecda0b28a0..23ecda0b28a0 100644
--- a/arch/x86/include/asm/mach-default/do_timer.h
+++ b/arch/x86/include/asm/do_timer.h
diff --git a/arch/x86/include/asm/mach-default/entry_arch.h b/arch/x86/include/asm/entry_arch.h
index 6b1add8e31dd..854d538ae857 100644
--- a/arch/x86/include/asm/mach-default/entry_arch.h
+++ b/arch/x86/include/asm/entry_arch.h
@@ -9,12 +9,28 @@
9 * is no hardware IRQ pin equivalent for them, they are triggered 9 * is no hardware IRQ pin equivalent for them, they are triggered
10 * through the ICC by us (IPIs) 10 * through the ICC by us (IPIs)
11 */ 11 */
12#ifdef CONFIG_X86_SMP 12#ifdef CONFIG_SMP
13BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR) 13BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR)
14BUILD_INTERRUPT(invalidate_interrupt,INVALIDATE_TLB_VECTOR)
15BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR) 14BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR)
16BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR) 15BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR)
17BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR) 16BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR)
17
18BUILD_INTERRUPT3(invalidate_interrupt0,INVALIDATE_TLB_VECTOR_START+0,
19 smp_invalidate_interrupt)
20BUILD_INTERRUPT3(invalidate_interrupt1,INVALIDATE_TLB_VECTOR_START+1,
21 smp_invalidate_interrupt)
22BUILD_INTERRUPT3(invalidate_interrupt2,INVALIDATE_TLB_VECTOR_START+2,
23 smp_invalidate_interrupt)
24BUILD_INTERRUPT3(invalidate_interrupt3,INVALIDATE_TLB_VECTOR_START+3,
25 smp_invalidate_interrupt)
26BUILD_INTERRUPT3(invalidate_interrupt4,INVALIDATE_TLB_VECTOR_START+4,
27 smp_invalidate_interrupt)
28BUILD_INTERRUPT3(invalidate_interrupt5,INVALIDATE_TLB_VECTOR_START+5,
29 smp_invalidate_interrupt)
30BUILD_INTERRUPT3(invalidate_interrupt6,INVALIDATE_TLB_VECTOR_START+6,
31 smp_invalidate_interrupt)
32BUILD_INTERRUPT3(invalidate_interrupt7,INVALIDATE_TLB_VECTOR_START+7,
33 smp_invalidate_interrupt)
18#endif 34#endif
19 35
20/* 36/*
@@ -25,10 +41,15 @@ BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR)
25 * a much simpler SMP time architecture: 41 * a much simpler SMP time architecture:
26 */ 42 */
27#ifdef CONFIG_X86_LOCAL_APIC 43#ifdef CONFIG_X86_LOCAL_APIC
44
28BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR) 45BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR)
29BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR) 46BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR)
30BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR) 47BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR)
31 48
49#ifdef CONFIG_PERF_COUNTERS
50BUILD_INTERRUPT(perf_counter_interrupt, LOCAL_PERF_VECTOR)
51#endif
52
32#ifdef CONFIG_X86_MCE_P4THERMAL 53#ifdef CONFIG_X86_MCE_P4THERMAL
33BUILD_INTERRUPT(thermal_interrupt,THERMAL_APIC_VECTOR) 54BUILD_INTERRUPT(thermal_interrupt,THERMAL_APIC_VECTOR)
34#endif 55#endif
diff --git a/arch/x86/include/asm/es7000/apic.h b/arch/x86/include/asm/es7000/apic.h
deleted file mode 100644
index c58b9cc74465..000000000000
--- a/arch/x86/include/asm/es7000/apic.h
+++ /dev/null
@@ -1,242 +0,0 @@
1#ifndef __ASM_ES7000_APIC_H
2#define __ASM_ES7000_APIC_H
3
4#include <linux/gfp.h>
5
6#define xapic_phys_to_log_apicid(cpu) per_cpu(x86_bios_cpu_apicid, cpu)
7#define esr_disable (1)
8
9static inline int apic_id_registered(void)
10{
11 return (1);
12}
13
14static inline const cpumask_t *target_cpus_cluster(void)
15{
16 return &CPU_MASK_ALL;
17}
18
19static inline const cpumask_t *target_cpus(void)
20{
21 return &cpumask_of_cpu(smp_processor_id());
22}
23
24#define APIC_DFR_VALUE_CLUSTER (APIC_DFR_CLUSTER)
25#define INT_DELIVERY_MODE_CLUSTER (dest_LowestPrio)
26#define INT_DEST_MODE_CLUSTER (1) /* logical delivery broadcast to all procs */
27#define NO_BALANCE_IRQ_CLUSTER (1)
28
29#define APIC_DFR_VALUE (APIC_DFR_FLAT)
30#define INT_DELIVERY_MODE (dest_Fixed)
31#define INT_DEST_MODE (0) /* phys delivery to target procs */
32#define NO_BALANCE_IRQ (0)
33#undef APIC_DEST_LOGICAL
34#define APIC_DEST_LOGICAL 0x0
35
36static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
37{
38 return 0;
39}
40static inline unsigned long check_apicid_present(int bit)
41{
42 return physid_isset(bit, phys_cpu_present_map);
43}
44
45#define apicid_cluster(apicid) (apicid & 0xF0)
46
47static inline unsigned long calculate_ldr(int cpu)
48{
49 unsigned long id;
50 id = xapic_phys_to_log_apicid(cpu);
51 return (SET_APIC_LOGICAL_ID(id));
52}
53
54/*
55 * Set up the logical destination ID.
56 *
57 * Intel recommends to set DFR, LdR and TPR before enabling
58 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
59 * document number 292116). So here it goes...
60 */
61static inline void init_apic_ldr_cluster(void)
62{
63 unsigned long val;
64 int cpu = smp_processor_id();
65
66 apic_write(APIC_DFR, APIC_DFR_VALUE_CLUSTER);
67 val = calculate_ldr(cpu);
68 apic_write(APIC_LDR, val);
69}
70
71static inline void init_apic_ldr(void)
72{
73 unsigned long val;
74 int cpu = smp_processor_id();
75
76 apic_write(APIC_DFR, APIC_DFR_VALUE);
77 val = calculate_ldr(cpu);
78 apic_write(APIC_LDR, val);
79}
80
81extern int apic_version [MAX_APICS];
82static inline void setup_apic_routing(void)
83{
84 int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id());
85 printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n",
86 (apic_version[apic] == 0x14) ?
87 "Physical Cluster" : "Logical Cluster",
88 nr_ioapics, cpus_addr(*target_cpus())[0]);
89}
90
91static inline int multi_timer_check(int apic, int irq)
92{
93 return 0;
94}
95
96static inline int apicid_to_node(int logical_apicid)
97{
98 return 0;
99}
100
101
102static inline int cpu_present_to_apicid(int mps_cpu)
103{
104 if (!mps_cpu)
105 return boot_cpu_physical_apicid;
106 else if (mps_cpu < nr_cpu_ids)
107 return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
108 else
109 return BAD_APICID;
110}
111
112static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
113{
114 static int id = 0;
115 physid_mask_t mask;
116 mask = physid_mask_of_physid(id);
117 ++id;
118 return mask;
119}
120
121extern u8 cpu_2_logical_apicid[];
122/* Mapping from cpu number to logical apicid */
123static inline int cpu_to_logical_apicid(int cpu)
124{
125#ifdef CONFIG_SMP
126 if (cpu >= nr_cpu_ids)
127 return BAD_APICID;
128 return (int)cpu_2_logical_apicid[cpu];
129#else
130 return logical_smp_processor_id();
131#endif
132}
133
134static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
135{
136 /* For clustered we don't have a good way to do this yet - hack */
137 return physids_promote(0xff);
138}
139
140
141static inline void setup_portio_remap(void)
142{
143}
144
145extern unsigned int boot_cpu_physical_apicid;
146static inline int check_phys_apicid_present(int cpu_physical_apicid)
147{
148 boot_cpu_physical_apicid = read_apic_id();
149 return (1);
150}
151
152static inline unsigned int
153cpu_mask_to_apicid_cluster(const struct cpumask *cpumask)
154{
155 int num_bits_set;
156 int cpus_found = 0;
157 int cpu;
158 int apicid;
159
160 num_bits_set = cpumask_weight(cpumask);
161 /* Return id to all */
162 if (num_bits_set == nr_cpu_ids)
163 return 0xFF;
164 /*
165 * The cpus in the mask must all be on the apic cluster. If are not
166 * on the same apicid cluster return default value of TARGET_CPUS.
167 */
168 cpu = cpumask_first(cpumask);
169 apicid = cpu_to_logical_apicid(cpu);
170 while (cpus_found < num_bits_set) {
171 if (cpumask_test_cpu(cpu, cpumask)) {
172 int new_apicid = cpu_to_logical_apicid(cpu);
173 if (apicid_cluster(apicid) !=
174 apicid_cluster(new_apicid)){
175 printk ("%s: Not a valid mask!\n", __func__);
176 return 0xFF;
177 }
178 apicid = new_apicid;
179 cpus_found++;
180 }
181 cpu++;
182 }
183 return apicid;
184}
185
186static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
187{
188 int num_bits_set;
189 int cpus_found = 0;
190 int cpu;
191 int apicid;
192
193 num_bits_set = cpus_weight(*cpumask);
194 /* Return id to all */
195 if (num_bits_set == nr_cpu_ids)
196 return cpu_to_logical_apicid(0);
197 /*
198 * The cpus in the mask must all be on the apic cluster. If are not
199 * on the same apicid cluster return default value of TARGET_CPUS.
200 */
201 cpu = first_cpu(*cpumask);
202 apicid = cpu_to_logical_apicid(cpu);
203 while (cpus_found < num_bits_set) {
204 if (cpu_isset(cpu, *cpumask)) {
205 int new_apicid = cpu_to_logical_apicid(cpu);
206 if (apicid_cluster(apicid) !=
207 apicid_cluster(new_apicid)){
208 printk ("%s: Not a valid mask!\n", __func__);
209 return cpu_to_logical_apicid(0);
210 }
211 apicid = new_apicid;
212 cpus_found++;
213 }
214 cpu++;
215 }
216 return apicid;
217}
218
219
220static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask,
221 const struct cpumask *andmask)
222{
223 int apicid = cpu_to_logical_apicid(0);
224 cpumask_var_t cpumask;
225
226 if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
227 return apicid;
228
229 cpumask_and(cpumask, inmask, andmask);
230 cpumask_and(cpumask, cpumask, cpu_online_mask);
231 apicid = cpu_mask_to_apicid(cpumask);
232
233 free_cpumask_var(cpumask);
234 return apicid;
235}
236
237static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
238{
239 return cpuid_apic >> index_msb;
240}
241
242#endif /* __ASM_ES7000_APIC_H */
diff --git a/arch/x86/include/asm/es7000/apicdef.h b/arch/x86/include/asm/es7000/apicdef.h
deleted file mode 100644
index 8b234a3cb851..000000000000
--- a/arch/x86/include/asm/es7000/apicdef.h
+++ /dev/null
@@ -1,13 +0,0 @@
1#ifndef __ASM_ES7000_APICDEF_H
2#define __ASM_ES7000_APICDEF_H
3
4#define APIC_ID_MASK (0xFF<<24)
5
6static inline unsigned get_apic_id(unsigned long x)
7{
8 return (((x)>>24)&0xFF);
9}
10
11#define GET_APIC_ID(x) get_apic_id(x)
12
13#endif
diff --git a/arch/x86/include/asm/es7000/ipi.h b/arch/x86/include/asm/es7000/ipi.h
deleted file mode 100644
index 7e8ed24d4b8a..000000000000
--- a/arch/x86/include/asm/es7000/ipi.h
+++ /dev/null
@@ -1,22 +0,0 @@
1#ifndef __ASM_ES7000_IPI_H
2#define __ASM_ES7000_IPI_H
3
4void send_IPI_mask_sequence(const struct cpumask *mask, int vector);
5void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
6
7static inline void send_IPI_mask(const struct cpumask *mask, int vector)
8{
9 send_IPI_mask_sequence(mask, vector);
10}
11
12static inline void send_IPI_allbutself(int vector)
13{
14 send_IPI_mask_allbutself(cpu_online_mask, vector);
15}
16
17static inline void send_IPI_all(int vector)
18{
19 send_IPI_mask(cpu_online_mask, vector);
20}
21
22#endif /* __ASM_ES7000_IPI_H */
diff --git a/arch/x86/include/asm/es7000/mpparse.h b/arch/x86/include/asm/es7000/mpparse.h
deleted file mode 100644
index c1629b090ec2..000000000000
--- a/arch/x86/include/asm/es7000/mpparse.h
+++ /dev/null
@@ -1,29 +0,0 @@
1#ifndef __ASM_ES7000_MPPARSE_H
2#define __ASM_ES7000_MPPARSE_H
3
4#include <linux/acpi.h>
5
6extern int parse_unisys_oem (char *oemptr);
7extern int find_unisys_acpi_oem_table(unsigned long *oem_addr);
8extern void unmap_unisys_acpi_oem_table(unsigned long oem_addr);
9extern void setup_unisys(void);
10
11#ifndef CONFIG_X86_GENERICARCH
12extern int acpi_madt_oem_check(char *oem_id, char *oem_table_id);
13extern int mps_oem_check(struct mpc_table *mpc, char *oem, char *productid);
14#endif
15
16#ifdef CONFIG_ACPI
17
18static inline int es7000_check_dsdt(void)
19{
20 struct acpi_table_header header;
21
22 if (ACPI_SUCCESS(acpi_get_table_header(ACPI_SIG_DSDT, 0, &header)) &&
23 !strncmp(header.oem_id, "UNISYS", 6))
24 return 1;
25 return 0;
26}
27#endif
28
29#endif /* __ASM_MACH_MPPARSE_H */
diff --git a/arch/x86/include/asm/es7000/wakecpu.h b/arch/x86/include/asm/es7000/wakecpu.h
deleted file mode 100644
index 78f0daaee436..000000000000
--- a/arch/x86/include/asm/es7000/wakecpu.h
+++ /dev/null
@@ -1,37 +0,0 @@
1#ifndef __ASM_ES7000_WAKECPU_H
2#define __ASM_ES7000_WAKECPU_H
3
4#define TRAMPOLINE_PHYS_LOW 0x467
5#define TRAMPOLINE_PHYS_HIGH 0x469
6
7static inline void wait_for_init_deassert(atomic_t *deassert)
8{
9#ifndef CONFIG_ES7000_CLUSTERED_APIC
10 while (!atomic_read(deassert))
11 cpu_relax();
12#endif
13 return;
14}
15
16/* Nothing to do for most platforms, since cleared by the INIT cycle */
17static inline void smp_callin_clear_local_apic(void)
18{
19}
20
21static inline void store_NMI_vector(unsigned short *high, unsigned short *low)
22{
23}
24
25static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
26{
27}
28
29extern void __inquire_remote_apic(int apicid);
30
31static inline void inquire_remote_apic(int apicid)
32{
33 if (apic_verbosity >= APIC_DEBUG)
34 __inquire_remote_apic(apicid);
35}
36
37#endif /* __ASM_MACH_WAKECPU_H */
diff --git a/arch/x86/include/asm/genapic.h b/arch/x86/include/asm/genapic.h
index d48bee663a6f..273b99452ae0 100644
--- a/arch/x86/include/asm/genapic.h
+++ b/arch/x86/include/asm/genapic.h
@@ -1,5 +1,263 @@
1#ifndef _ASM_X86_GENAPIC_H
2#define _ASM_X86_GENAPIC_H
3
4#include <linux/cpumask.h>
5
6#include <asm/mpspec.h>
7#include <asm/atomic.h>
8
9/*
10 * Copyright 2004 James Cleverdon, IBM.
11 * Subject to the GNU Public License, v.2
12 *
13 * Generic APIC sub-arch data struct.
14 *
15 * Hacked for x86-64 by James Cleverdon from i386 architecture code by
16 * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
17 * James Cleverdon.
18 */
19struct genapic {
20 char *name;
21
22 int (*probe)(void);
23 int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
24 int (*apic_id_registered)(void);
25
26 u32 irq_delivery_mode;
27 u32 irq_dest_mode;
28
29 const struct cpumask *(*target_cpus)(void);
30
31 int disable_esr;
32
33 int dest_logical;
34 unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid);
35 unsigned long (*check_apicid_present)(int apicid);
36
37 void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
38 void (*init_apic_ldr)(void);
39
40 physid_mask_t (*ioapic_phys_id_map)(physid_mask_t map);
41
42 void (*setup_apic_routing)(void);
43 int (*multi_timer_check)(int apic, int irq);
44 int (*apicid_to_node)(int logical_apicid);
45 int (*cpu_to_logical_apicid)(int cpu);
46 int (*cpu_present_to_apicid)(int mps_cpu);
47 physid_mask_t (*apicid_to_cpu_present)(int phys_apicid);
48 void (*setup_portio_remap)(void);
49 int (*check_phys_apicid_present)(int boot_cpu_physical_apicid);
50 void (*enable_apic_mode)(void);
51 int (*phys_pkg_id)(int cpuid_apic, int index_msb);
52
53 /*
54 * When one of the next two hooks returns 1 the genapic
55 * is switched to this. Essentially they are additional
56 * probe functions:
57 */
58 int (*mps_oem_check)(struct mpc_table *mpc, char *oem, char *productid);
59
60 unsigned int (*get_apic_id)(unsigned long x);
61 unsigned long (*set_apic_id)(unsigned int id);
62 unsigned long apic_id_mask;
63
64 unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask);
65 unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
66 const struct cpumask *andmask);
67
68 /* ipi */
69 void (*send_IPI_mask)(const struct cpumask *mask, int vector);
70 void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
71 int vector);
72 void (*send_IPI_allbutself)(int vector);
73 void (*send_IPI_all)(int vector);
74 void (*send_IPI_self)(int vector);
75
76 /* wakeup_secondary_cpu */
77 int (*wakeup_cpu)(int apicid, unsigned long start_eip);
78
79 int trampoline_phys_low;
80 int trampoline_phys_high;
81
82 void (*wait_for_init_deassert)(atomic_t *deassert);
83 void (*smp_callin_clear_local_apic)(void);
84 void (*store_NMI_vector)(unsigned short *high, unsigned short *low);
85 void (*inquire_remote_apic)(int apicid);
86};
87
88extern struct genapic *apic;
89
90/*
91 * Warm reset vector default position:
92 */
93#define DEFAULT_TRAMPOLINE_PHYS_LOW 0x467
94#define DEFAULT_TRAMPOLINE_PHYS_HIGH 0x469
95
1#ifdef CONFIG_X86_32 96#ifdef CONFIG_X86_32
2# include "genapic_32.h" 97extern void es7000_update_genapic_to_cluster(void);
3#else 98#else
4# include "genapic_64.h" 99extern struct genapic apic_flat;
100extern struct genapic apic_physflat;
101extern struct genapic apic_x2apic_cluster;
102extern struct genapic apic_x2apic_phys;
103extern int default_acpi_madt_oem_check(char *, char *);
104
105extern void apic_send_IPI_self(int vector);
106
107extern struct genapic apic_x2apic_uv_x;
108DECLARE_PER_CPU(int, x2apic_extra_bits);
109
110extern void default_setup_apic_routing(void);
111
112extern int default_cpu_present_to_apicid(int mps_cpu);
113extern int default_check_phys_apicid_present(int boot_cpu_physical_apicid);
5#endif 114#endif
115
116static inline void default_wait_for_init_deassert(atomic_t *deassert)
117{
118 while (!atomic_read(deassert))
119 cpu_relax();
120 return;
121}
122
123extern void generic_bigsmp_probe(void);
124
125
126#ifdef CONFIG_X86_LOCAL_APIC
127
128#include <asm/smp.h>
129
130#define APIC_DFR_VALUE (APIC_DFR_FLAT)
131
132static inline const struct cpumask *default_target_cpus(void)
133{
134#ifdef CONFIG_SMP
135 return cpu_online_mask;
136#else
137 return cpumask_of(0);
138#endif
139}
140
141DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
142
143
144static inline unsigned int read_apic_id(void)
145{
146 unsigned int reg;
147
148 reg = apic_read(APIC_ID);
149
150 return apic->get_apic_id(reg);
151}
152
153#ifdef CONFIG_X86_64
154extern void default_setup_apic_routing(void);
155#else
156
157/*
158 * Set up the logical destination ID.
159 *
160 * Intel recommends to set DFR, LDR and TPR before enabling
161 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
162 * document number 292116). So here it goes...
163 */
164extern void default_init_apic_ldr(void);
165
166static inline int default_apic_id_registered(void)
167{
168 return physid_isset(read_apic_id(), phys_cpu_present_map);
169}
170
171static inline unsigned int
172default_cpu_mask_to_apicid(const struct cpumask *cpumask)
173{
174 return cpumask_bits(cpumask)[0];
175}
176
177static inline unsigned int
178default_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
179 const struct cpumask *andmask)
180{
181 unsigned long mask1 = cpumask_bits(cpumask)[0];
182 unsigned long mask2 = cpumask_bits(andmask)[0];
183 unsigned long mask3 = cpumask_bits(cpu_online_mask)[0];
184
185 return (unsigned int)(mask1 & mask2 & mask3);
186}
187
188static inline int default_phys_pkg_id(int cpuid_apic, int index_msb)
189{
190 return cpuid_apic >> index_msb;
191}
192
193static inline void default_setup_apic_routing(void)
194{
195#ifdef CONFIG_X86_IO_APIC
196 printk("Enabling APIC mode: %s. Using %d I/O APICs\n",
197 "Flat", nr_ioapics);
198#endif
199}
200
201extern int default_apicid_to_node(int logical_apicid);
202
203#endif
204
205static inline unsigned long default_check_apicid_used(physid_mask_t bitmap, int apicid)
206{
207 return physid_isset(apicid, bitmap);
208}
209
210static inline unsigned long default_check_apicid_present(int bit)
211{
212 return physid_isset(bit, phys_cpu_present_map);
213}
214
215static inline physid_mask_t default_ioapic_phys_id_map(physid_mask_t phys_map)
216{
217 return phys_map;
218}
219
220/* Mapping from cpu number to logical apicid */
221static inline int default_cpu_to_logical_apicid(int cpu)
222{
223 return 1 << cpu;
224}
225
226static inline int __default_cpu_present_to_apicid(int mps_cpu)
227{
228 if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu))
229 return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
230 else
231 return BAD_APICID;
232}
233
234static inline int
235__default_check_phys_apicid_present(int boot_cpu_physical_apicid)
236{
237 return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map);
238}
239
240#ifdef CONFIG_X86_32
241static inline int default_cpu_present_to_apicid(int mps_cpu)
242{
243 return __default_cpu_present_to_apicid(mps_cpu);
244}
245
246static inline int
247default_check_phys_apicid_present(int boot_cpu_physical_apicid)
248{
249 return __default_check_phys_apicid_present(boot_cpu_physical_apicid);
250}
251#else
252extern int default_cpu_present_to_apicid(int mps_cpu);
253extern int default_check_phys_apicid_present(int boot_cpu_physical_apicid);
254#endif
255
256static inline physid_mask_t default_apicid_to_cpu_present(int phys_apicid)
257{
258 return physid_mask_of_physid(phys_apicid);
259}
260
261#endif /* CONFIG_X86_LOCAL_APIC */
262
263#endif /* _ASM_X86_GENAPIC_64_H */
diff --git a/arch/x86/include/asm/genapic_32.h b/arch/x86/include/asm/genapic_32.h
deleted file mode 100644
index 2c05b737ee22..000000000000
--- a/arch/x86/include/asm/genapic_32.h
+++ /dev/null
@@ -1,148 +0,0 @@
1#ifndef _ASM_X86_GENAPIC_32_H
2#define _ASM_X86_GENAPIC_32_H
3
4#include <asm/mpspec.h>
5#include <asm/atomic.h>
6
7/*
8 * Generic APIC driver interface.
9 *
10 * An straight forward mapping of the APIC related parts of the
11 * x86 subarchitecture interface to a dynamic object.
12 *
13 * This is used by the "generic" x86 subarchitecture.
14 *
15 * Copyright 2003 Andi Kleen, SuSE Labs.
16 */
17
18struct mpc_bus;
19struct mpc_table;
20struct mpc_cpu;
21
22struct genapic {
23 char *name;
24 int (*probe)(void);
25
26 int (*apic_id_registered)(void);
27 const struct cpumask *(*target_cpus)(void);
28 int int_delivery_mode;
29 int int_dest_mode;
30 int ESR_DISABLE;
31 int apic_destination_logical;
32 unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid);
33 unsigned long (*check_apicid_present)(int apicid);
34 int no_balance_irq;
35 int no_ioapic_check;
36 void (*init_apic_ldr)(void);
37 physid_mask_t (*ioapic_phys_id_map)(physid_mask_t map);
38
39 void (*setup_apic_routing)(void);
40 int (*multi_timer_check)(int apic, int irq);
41 int (*apicid_to_node)(int logical_apicid);
42 int (*cpu_to_logical_apicid)(int cpu);
43 int (*cpu_present_to_apicid)(int mps_cpu);
44 physid_mask_t (*apicid_to_cpu_present)(int phys_apicid);
45 void (*setup_portio_remap)(void);
46 int (*check_phys_apicid_present)(int boot_cpu_physical_apicid);
47 void (*enable_apic_mode)(void);
48 u32 (*phys_pkg_id)(u32 cpuid_apic, int index_msb);
49
50 /* mpparse */
51 /* When one of the next two hooks returns 1 the genapic
52 is switched to this. Essentially they are additional probe
53 functions. */
54 int (*mps_oem_check)(struct mpc_table *mpc, char *oem,
55 char *productid);
56 int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
57
58 unsigned (*get_apic_id)(unsigned long x);
59 unsigned long apic_id_mask;
60 unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask);
61 unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
62 const struct cpumask *andmask);
63 void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
64
65#ifdef CONFIG_SMP
66 /* ipi */
67 void (*send_IPI_mask)(const struct cpumask *mask, int vector);
68 void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
69 int vector);
70 void (*send_IPI_allbutself)(int vector);
71 void (*send_IPI_all)(int vector);
72#endif
73 int (*wakeup_cpu)(int apicid, unsigned long start_eip);
74 int trampoline_phys_low;
75 int trampoline_phys_high;
76 void (*wait_for_init_deassert)(atomic_t *deassert);
77 void (*smp_callin_clear_local_apic)(void);
78 void (*store_NMI_vector)(unsigned short *high, unsigned short *low);
79 void (*restore_NMI_vector)(unsigned short *high, unsigned short *low);
80 void (*inquire_remote_apic)(int apicid);
81};
82
83#define APICFUNC(x) .x = x,
84
85/* More functions could be probably marked IPIFUNC and save some space
86 in UP GENERICARCH kernels, but I don't have the nerve right now
87 to untangle this mess. -AK */
88#ifdef CONFIG_SMP
89#define IPIFUNC(x) APICFUNC(x)
90#else
91#define IPIFUNC(x)
92#endif
93
94#define APIC_INIT(aname, aprobe) \
95{ \
96 .name = aname, \
97 .probe = aprobe, \
98 .int_delivery_mode = INT_DELIVERY_MODE, \
99 .int_dest_mode = INT_DEST_MODE, \
100 .no_balance_irq = NO_BALANCE_IRQ, \
101 .ESR_DISABLE = esr_disable, \
102 .apic_destination_logical = APIC_DEST_LOGICAL, \
103 APICFUNC(apic_id_registered) \
104 APICFUNC(target_cpus) \
105 APICFUNC(check_apicid_used) \
106 APICFUNC(check_apicid_present) \
107 APICFUNC(init_apic_ldr) \
108 APICFUNC(ioapic_phys_id_map) \
109 APICFUNC(setup_apic_routing) \
110 APICFUNC(multi_timer_check) \
111 APICFUNC(apicid_to_node) \
112 APICFUNC(cpu_to_logical_apicid) \
113 APICFUNC(cpu_present_to_apicid) \
114 APICFUNC(apicid_to_cpu_present) \
115 APICFUNC(setup_portio_remap) \
116 APICFUNC(check_phys_apicid_present) \
117 APICFUNC(mps_oem_check) \
118 APICFUNC(get_apic_id) \
119 .apic_id_mask = APIC_ID_MASK, \
120 APICFUNC(cpu_mask_to_apicid) \
121 APICFUNC(cpu_mask_to_apicid_and) \
122 APICFUNC(vector_allocation_domain) \
123 APICFUNC(acpi_madt_oem_check) \
124 IPIFUNC(send_IPI_mask) \
125 IPIFUNC(send_IPI_allbutself) \
126 IPIFUNC(send_IPI_all) \
127 APICFUNC(enable_apic_mode) \
128 APICFUNC(phys_pkg_id) \
129 .trampoline_phys_low = TRAMPOLINE_PHYS_LOW, \
130 .trampoline_phys_high = TRAMPOLINE_PHYS_HIGH, \
131 APICFUNC(wait_for_init_deassert) \
132 APICFUNC(smp_callin_clear_local_apic) \
133 APICFUNC(store_NMI_vector) \
134 APICFUNC(restore_NMI_vector) \
135 APICFUNC(inquire_remote_apic) \
136}
137
138extern struct genapic *genapic;
139extern void es7000_update_genapic_to_cluster(void);
140
141enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
142#define get_uv_system_type() UV_NONE
143#define is_uv_system() 0
144#define uv_wakeup_secondary(a, b) 1
145#define uv_system_init() do {} while (0)
146
147
148#endif /* _ASM_X86_GENAPIC_32_H */
diff --git a/arch/x86/include/asm/genapic_64.h b/arch/x86/include/asm/genapic_64.h
deleted file mode 100644
index adf32fb56aa6..000000000000
--- a/arch/x86/include/asm/genapic_64.h
+++ /dev/null
@@ -1,66 +0,0 @@
1#ifndef _ASM_X86_GENAPIC_64_H
2#define _ASM_X86_GENAPIC_64_H
3
4#include <linux/cpumask.h>
5
6/*
7 * Copyright 2004 James Cleverdon, IBM.
8 * Subject to the GNU Public License, v.2
9 *
10 * Generic APIC sub-arch data struct.
11 *
12 * Hacked for x86-64 by James Cleverdon from i386 architecture code by
13 * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
14 * James Cleverdon.
15 */
16
17struct genapic {
18 char *name;
19 int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
20 u32 int_delivery_mode;
21 u32 int_dest_mode;
22 int (*apic_id_registered)(void);
23 const struct cpumask *(*target_cpus)(void);
24 void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
25 void (*init_apic_ldr)(void);
26 /* ipi */
27 void (*send_IPI_mask)(const struct cpumask *mask, int vector);
28 void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
29 int vector);
30 void (*send_IPI_allbutself)(int vector);
31 void (*send_IPI_all)(int vector);
32 void (*send_IPI_self)(int vector);
33 /* */
34 unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask);
35 unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
36 const struct cpumask *andmask);
37 unsigned int (*phys_pkg_id)(int index_msb);
38 unsigned int (*get_apic_id)(unsigned long x);
39 unsigned long (*set_apic_id)(unsigned int id);
40 unsigned long apic_id_mask;
41 /* wakeup_secondary_cpu */
42 int (*wakeup_cpu)(int apicid, unsigned long start_eip);
43};
44
45extern struct genapic *genapic;
46
47extern struct genapic apic_flat;
48extern struct genapic apic_physflat;
49extern struct genapic apic_x2apic_cluster;
50extern struct genapic apic_x2apic_phys;
51extern int acpi_madt_oem_check(char *, char *);
52
53extern void apic_send_IPI_self(int vector);
54enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
55extern enum uv_system_type get_uv_system_type(void);
56extern int is_uv_system(void);
57
58extern struct genapic apic_x2apic_uv_x;
59DECLARE_PER_CPU(int, x2apic_extra_bits);
60extern void uv_cpu_init(void);
61extern void uv_system_init(void);
62extern int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip);
63
64extern void setup_apic_routing(void);
65
66#endif /* _ASM_X86_GENAPIC_64_H */
diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h
index 000787df66e6..176f058e7159 100644
--- a/arch/x86/include/asm/hardirq.h
+++ b/arch/x86/include/asm/hardirq.h
@@ -1,11 +1,52 @@
1#ifdef CONFIG_X86_32 1#ifndef _ASM_X86_HARDIRQ_H
2# include "hardirq_32.h" 2#define _ASM_X86_HARDIRQ_H
3#else 3
4# include "hardirq_64.h" 4#include <linux/threads.h>
5#include <linux/irq.h>
6
7typedef struct {
8 unsigned int __softirq_pending;
9 unsigned int __nmi_count; /* arch dependent */
10 unsigned int irq0_irqs;
11#ifdef CONFIG_X86_LOCAL_APIC
12 unsigned int apic_timer_irqs; /* arch dependent */
13 unsigned int irq_spurious_count;
14#endif
15#ifdef CONFIG_SMP
16 unsigned int irq_resched_count;
17 unsigned int irq_call_count;
18 unsigned int irq_tlb_count;
19#endif
20#ifdef CONFIG_X86_MCE
21 unsigned int irq_thermal_count;
22# ifdef CONFIG_X86_64
23 unsigned int irq_threshold_count;
24# endif
5#endif 25#endif
26} ____cacheline_aligned irq_cpustat_t;
27
28DECLARE_PER_CPU(irq_cpustat_t, irq_stat);
29
30/* We can have at most NR_VECTORS irqs routed to a cpu at a time */
31#define MAX_HARDIRQS_PER_CPU NR_VECTORS
32
33#define __ARCH_IRQ_STAT
34
35#define inc_irq_stat(member) percpu_add(irq_stat.member, 1)
36
37#define local_softirq_pending() percpu_read(irq_stat.__softirq_pending)
38
39#define __ARCH_SET_SOFTIRQ_PENDING
40
41#define set_softirq_pending(x) percpu_write(irq_stat.__softirq_pending, (x))
42#define or_softirq_pending(x) percpu_or(irq_stat.__softirq_pending, (x))
43
44extern void ack_bad_irq(unsigned int irq);
6 45
7extern u64 arch_irq_stat_cpu(unsigned int cpu); 46extern u64 arch_irq_stat_cpu(unsigned int cpu);
8#define arch_irq_stat_cpu arch_irq_stat_cpu 47#define arch_irq_stat_cpu arch_irq_stat_cpu
9 48
10extern u64 arch_irq_stat(void); 49extern u64 arch_irq_stat(void);
11#define arch_irq_stat arch_irq_stat 50#define arch_irq_stat arch_irq_stat
51
52#endif /* _ASM_X86_HARDIRQ_H */
diff --git a/arch/x86/include/asm/hardirq_32.h b/arch/x86/include/asm/hardirq_32.h
deleted file mode 100644
index cf7954d1405f..000000000000
--- a/arch/x86/include/asm/hardirq_32.h
+++ /dev/null
@@ -1,30 +0,0 @@
1#ifndef _ASM_X86_HARDIRQ_32_H
2#define _ASM_X86_HARDIRQ_32_H
3
4#include <linux/threads.h>
5#include <linux/irq.h>
6
7typedef struct {
8 unsigned int __softirq_pending;
9 unsigned long idle_timestamp;
10 unsigned int __nmi_count; /* arch dependent */
11 unsigned int apic_timer_irqs; /* arch dependent */
12 unsigned int irq0_irqs;
13 unsigned int irq_resched_count;
14 unsigned int irq_call_count;
15 unsigned int irq_tlb_count;
16 unsigned int irq_thermal_count;
17 unsigned int irq_spurious_count;
18} ____cacheline_aligned irq_cpustat_t;
19
20DECLARE_PER_CPU(irq_cpustat_t, irq_stat);
21
22#define __ARCH_IRQ_STAT
23#define __IRQ_STAT(cpu, member) (per_cpu(irq_stat, cpu).member)
24
25#define inc_irq_stat(member) (__get_cpu_var(irq_stat).member++)
26
27void ack_bad_irq(unsigned int irq);
28#include <linux/irq_cpustat.h>
29
30#endif /* _ASM_X86_HARDIRQ_32_H */
diff --git a/arch/x86/include/asm/hardirq_64.h b/arch/x86/include/asm/hardirq_64.h
deleted file mode 100644
index b5a6b5d56704..000000000000
--- a/arch/x86/include/asm/hardirq_64.h
+++ /dev/null
@@ -1,25 +0,0 @@
1#ifndef _ASM_X86_HARDIRQ_64_H
2#define _ASM_X86_HARDIRQ_64_H
3
4#include <linux/threads.h>
5#include <linux/irq.h>
6#include <asm/pda.h>
7#include <asm/apic.h>
8
9/* We can have at most NR_VECTORS irqs routed to a cpu at a time */
10#define MAX_HARDIRQS_PER_CPU NR_VECTORS
11
12#define __ARCH_IRQ_STAT 1
13
14#define inc_irq_stat(member) add_pda(member, 1)
15
16#define local_softirq_pending() read_pda(__softirq_pending)
17
18#define __ARCH_SET_SOFTIRQ_PENDING 1
19
20#define set_softirq_pending(x) write_pda(__softirq_pending, (x))
21#define or_softirq_pending(x) or_pda(__softirq_pending, (x))
22
23extern void ack_bad_irq(unsigned int irq);
24
25#endif /* _ASM_X86_HARDIRQ_64_H */
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
index 8de644b6b959..370e1c83bb49 100644
--- a/arch/x86/include/asm/hw_irq.h
+++ b/arch/x86/include/asm/hw_irq.h
@@ -25,8 +25,6 @@
25#include <asm/irq.h> 25#include <asm/irq.h>
26#include <asm/sections.h> 26#include <asm/sections.h>
27 27
28#define platform_legacy_irq(irq) ((irq) < 16)
29
30/* Interrupt handlers registered during init_IRQ */ 28/* Interrupt handlers registered during init_IRQ */
31extern void apic_timer_interrupt(void); 29extern void apic_timer_interrupt(void);
32extern void error_interrupt(void); 30extern void error_interrupt(void);
@@ -58,7 +56,7 @@ extern void make_8259A_irq(unsigned int irq);
58extern void init_8259A(int aeoi); 56extern void init_8259A(int aeoi);
59 57
60/* IOAPIC */ 58/* IOAPIC */
61#define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs)) 59#define IO_APIC_IRQ(x) (((x) >= NR_IRQS_LEGACY) || ((1<<(x)) & io_apic_irqs))
62extern unsigned long io_apic_irqs; 60extern unsigned long io_apic_irqs;
63 61
64extern void init_VISWS_APIC_irqs(void); 62extern void init_VISWS_APIC_irqs(void);
@@ -67,15 +65,7 @@ extern void disable_IO_APIC(void);
67extern int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn); 65extern int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn);
68extern void setup_ioapic_dest(void); 66extern void setup_ioapic_dest(void);
69 67
70#ifdef CONFIG_X86_64
71extern void enable_IO_APIC(void); 68extern void enable_IO_APIC(void);
72#endif
73
74/* IPI functions */
75#ifdef CONFIG_X86_32
76extern void send_IPI_self(int vector);
77#endif
78extern void send_IPI(int dest, int vector);
79 69
80/* Statistics */ 70/* Statistics */
81extern atomic_t irq_err_count; 71extern atomic_t irq_err_count;
@@ -84,21 +74,11 @@ extern atomic_t irq_mis_count;
84/* EISA */ 74/* EISA */
85extern void eisa_set_level_irq(unsigned int irq); 75extern void eisa_set_level_irq(unsigned int irq);
86 76
87/* Voyager functions */
88extern asmlinkage void vic_cpi_interrupt(void);
89extern asmlinkage void vic_sys_interrupt(void);
90extern asmlinkage void vic_cmn_interrupt(void);
91extern asmlinkage void qic_timer_interrupt(void);
92extern asmlinkage void qic_invalidate_interrupt(void);
93extern asmlinkage void qic_reschedule_interrupt(void);
94extern asmlinkage void qic_enable_irq_interrupt(void);
95extern asmlinkage void qic_call_function_interrupt(void);
96
97/* SMP */ 77/* SMP */
98extern void smp_apic_timer_interrupt(struct pt_regs *); 78extern void smp_apic_timer_interrupt(struct pt_regs *);
99extern void smp_spurious_interrupt(struct pt_regs *); 79extern void smp_spurious_interrupt(struct pt_regs *);
100extern void smp_error_interrupt(struct pt_regs *); 80extern void smp_error_interrupt(struct pt_regs *);
101#ifdef CONFIG_X86_SMP 81#ifdef CONFIG_SMP
102extern void smp_reschedule_interrupt(struct pt_regs *); 82extern void smp_reschedule_interrupt(struct pt_regs *);
103extern void smp_call_function_interrupt(struct pt_regs *); 83extern void smp_call_function_interrupt(struct pt_regs *);
104extern void smp_call_function_single_interrupt(struct pt_regs *); 84extern void smp_call_function_single_interrupt(struct pt_regs *);
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index 1dbbdf4be9b4..bcf7ea4e1367 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -91,7 +91,7 @@ extern void unxlate_dev_mem_ptr(unsigned long phys, void *addr);
91 91
92extern int ioremap_change_attr(unsigned long vaddr, unsigned long size, 92extern int ioremap_change_attr(unsigned long vaddr, unsigned long size,
93 unsigned long prot_val); 93 unsigned long prot_val);
94extern void __iomem *ioremap_wc(unsigned long offset, unsigned long size); 94extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size);
95 95
96/* 96/*
97 * early_ioremap() and early_iounmap() are for temporary early boot-time 97 * early_ioremap() and early_iounmap() are for temporary early boot-time
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h
index 7a1f44ac1f17..59cb4a1317b7 100644
--- a/arch/x86/include/asm/io_apic.h
+++ b/arch/x86/include/asm/io_apic.h
@@ -114,38 +114,16 @@ struct IR_IO_APIC_route_entry {
114extern int nr_ioapics; 114extern int nr_ioapics;
115extern int nr_ioapic_registers[MAX_IO_APICS]; 115extern int nr_ioapic_registers[MAX_IO_APICS];
116 116
117/*
118 * MP-BIOS irq configuration table structures:
119 */
120
121#define MP_MAX_IOAPIC_PIN 127 117#define MP_MAX_IOAPIC_PIN 127
122 118
123struct mp_config_ioapic {
124 unsigned long mp_apicaddr;
125 unsigned int mp_apicid;
126 unsigned char mp_type;
127 unsigned char mp_apicver;
128 unsigned char mp_flags;
129};
130
131struct mp_config_intsrc {
132 unsigned int mp_dstapic;
133 unsigned char mp_type;
134 unsigned char mp_irqtype;
135 unsigned short mp_irqflag;
136 unsigned char mp_srcbus;
137 unsigned char mp_srcbusirq;
138 unsigned char mp_dstirq;
139};
140
141/* I/O APIC entries */ 119/* I/O APIC entries */
142extern struct mp_config_ioapic mp_ioapics[MAX_IO_APICS]; 120extern struct mpc_ioapic mp_ioapics[MAX_IO_APICS];
143 121
144/* # of MP IRQ source entries */ 122/* # of MP IRQ source entries */
145extern int mp_irq_entries; 123extern int mp_irq_entries;
146 124
147/* MP IRQ source entries */ 125/* MP IRQ source entries */
148extern struct mp_config_intsrc mp_irqs[MAX_IRQ_SOURCES]; 126extern struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES];
149 127
150/* non-0 if default (table-less) MP configuration */ 128/* non-0 if default (table-less) MP configuration */
151extern int mpc_default_type; 129extern int mpc_default_type;
@@ -165,15 +143,6 @@ extern int noioapicreroute;
165/* 1 if the timer IRQ uses the '8259A Virtual Wire' mode */ 143/* 1 if the timer IRQ uses the '8259A Virtual Wire' mode */
166extern int timer_through_8259; 144extern int timer_through_8259;
167 145
168static inline void disable_ioapic_setup(void)
169{
170#ifdef CONFIG_PCI
171 noioapicquirk = 1;
172 noioapicreroute = -1;
173#endif
174 skip_ioapic_setup = 1;
175}
176
177/* 146/*
178 * If we use the IO-APIC for IRQ routing, disable automatic 147 * If we use the IO-APIC for IRQ routing, disable automatic
179 * assignment of PCI IRQ's. 148 * assignment of PCI IRQ's.
@@ -200,6 +169,12 @@ extern void reinit_intr_remapped_IO_APIC(int);
200 169
201extern void probe_nr_irqs_gsi(void); 170extern void probe_nr_irqs_gsi(void);
202 171
172extern int setup_ioapic_entry(int apic, int irq,
173 struct IO_APIC_route_entry *entry,
174 unsigned int destination, int trigger,
175 int polarity, int vector);
176extern void ioapic_write_entry(int apic, int pin,
177 struct IO_APIC_route_entry e);
203#else /* !CONFIG_X86_IO_APIC */ 178#else /* !CONFIG_X86_IO_APIC */
204#define io_apic_assign_pci_irqs 0 179#define io_apic_assign_pci_irqs 0
205static const int timer_through_8259 = 0; 180static const int timer_through_8259 = 0;
diff --git a/arch/x86/include/asm/ipi.h b/arch/x86/include/asm/ipi.h
index c745a306f7d3..5f2efc5d9927 100644
--- a/arch/x86/include/asm/ipi.h
+++ b/arch/x86/include/asm/ipi.h
@@ -1,6 +1,8 @@
1#ifndef _ASM_X86_IPI_H 1#ifndef _ASM_X86_IPI_H
2#define _ASM_X86_IPI_H 2#define _ASM_X86_IPI_H
3 3
4#ifdef CONFIG_X86_LOCAL_APIC
5
4/* 6/*
5 * Copyright 2004 James Cleverdon, IBM. 7 * Copyright 2004 James Cleverdon, IBM.
6 * Subject to the GNU Public License, v.2 8 * Subject to the GNU Public License, v.2
@@ -55,8 +57,8 @@ static inline void __xapic_wait_icr_idle(void)
55 cpu_relax(); 57 cpu_relax();
56} 58}
57 59
58static inline void __send_IPI_shortcut(unsigned int shortcut, int vector, 60static inline void
59 unsigned int dest) 61__default_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest)
60{ 62{
61 /* 63 /*
62 * Subtle. In the case of the 'never do double writes' workaround 64 * Subtle. In the case of the 'never do double writes' workaround
@@ -87,8 +89,8 @@ static inline void __send_IPI_shortcut(unsigned int shortcut, int vector,
87 * This is used to send an IPI with no shorthand notation (the destination is 89 * This is used to send an IPI with no shorthand notation (the destination is
88 * specified in bits 56 to 63 of the ICR). 90 * specified in bits 56 to 63 of the ICR).
89 */ 91 */
90static inline void __send_IPI_dest_field(unsigned int mask, int vector, 92static inline void
91 unsigned int dest) 93 __default_send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest)
92{ 94{
93 unsigned long cfg; 95 unsigned long cfg;
94 96
@@ -117,41 +119,46 @@ static inline void __send_IPI_dest_field(unsigned int mask, int vector,
117 native_apic_mem_write(APIC_ICR, cfg); 119 native_apic_mem_write(APIC_ICR, cfg);
118} 120}
119 121
120static inline void send_IPI_mask_sequence(const struct cpumask *mask, 122extern void default_send_IPI_mask_sequence_phys(const struct cpumask *mask,
121 int vector) 123 int vector);
122{ 124extern void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
123 unsigned long flags; 125 int vector);
124 unsigned long query_cpu; 126#include <asm/genapic.h>
125 127
126 /* 128extern void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
127 * Hack. The clustered APIC addressing mode doesn't allow us to send 129 int vector);
128 * to an arbitrary mask, so I do a unicast to each CPU instead. 130extern void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
129 * - mbligh 131 int vector);
130 */ 132
131 local_irq_save(flags); 133/* Avoid include hell */
132 for_each_cpu(query_cpu, mask) { 134#define NMI_VECTOR 0x02
133 __send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu), 135
134 vector, APIC_DEST_PHYSICAL); 136extern int no_broadcast;
135 } 137
136 local_irq_restore(flags); 138static inline void __default_local_send_IPI_allbutself(int vector)
139{
140 if (no_broadcast || vector == NMI_VECTOR)
141 apic->send_IPI_mask_allbutself(cpu_online_mask, vector);
142 else
143 __default_send_IPI_shortcut(APIC_DEST_ALLBUT, vector, apic->dest_logical);
137} 144}
138 145
139static inline void send_IPI_mask_allbutself(const struct cpumask *mask, 146static inline void __default_local_send_IPI_all(int vector)
140 int vector)
141{ 147{
142 unsigned long flags; 148 if (no_broadcast || vector == NMI_VECTOR)
143 unsigned int query_cpu; 149 apic->send_IPI_mask(cpu_online_mask, vector);
144 unsigned int this_cpu = smp_processor_id(); 150 else
145 151 __default_send_IPI_shortcut(APIC_DEST_ALLINC, vector, apic->dest_logical);
146 /* See Hack comment above */
147
148 local_irq_save(flags);
149 for_each_cpu(query_cpu, mask)
150 if (query_cpu != this_cpu)
151 __send_IPI_dest_field(
152 per_cpu(x86_cpu_to_apicid, query_cpu),
153 vector, APIC_DEST_PHYSICAL);
154 local_irq_restore(flags);
155} 152}
156 153
154#ifdef CONFIG_X86_32
155extern void default_send_IPI_mask_logical(const struct cpumask *mask,
156 int vector);
157extern void default_send_IPI_allbutself(int vector);
158extern void default_send_IPI_all(int vector);
159extern void default_send_IPI_self(int vector);
160#endif
161
162#endif
163
157#endif /* _ASM_X86_IPI_H */ 164#endif /* _ASM_X86_IPI_H */
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
index 592688ed04d3..107eb2196691 100644
--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -36,9 +36,11 @@ static inline int irq_canonicalize(int irq)
36extern void fixup_irqs(void); 36extern void fixup_irqs(void);
37#endif 37#endif
38 38
39extern unsigned int do_IRQ(struct pt_regs *regs);
40extern void init_IRQ(void); 39extern void init_IRQ(void);
41extern void native_init_IRQ(void); 40extern void native_init_IRQ(void);
41extern bool handle_irq(unsigned irq, struct pt_regs *regs);
42
43extern unsigned int do_IRQ(struct pt_regs *regs);
42 44
43/* Interrupt vector management */ 45/* Interrupt vector management */
44extern DECLARE_BITMAP(used_vectors, NR_VECTORS); 46extern DECLARE_BITMAP(used_vectors, NR_VECTORS);
diff --git a/arch/x86/include/asm/irq_regs.h b/arch/x86/include/asm/irq_regs.h
index 89c898ab298b..77843225b7ea 100644
--- a/arch/x86/include/asm/irq_regs.h
+++ b/arch/x86/include/asm/irq_regs.h
@@ -1,5 +1,31 @@
1#ifdef CONFIG_X86_32 1/*
2# include "irq_regs_32.h" 2 * Per-cpu current frame pointer - the location of the last exception frame on
3#else 3 * the stack, stored in the per-cpu area.
4# include "irq_regs_64.h" 4 *
5#endif 5 * Jeremy Fitzhardinge <jeremy@goop.org>
6 */
7#ifndef _ASM_X86_IRQ_REGS_H
8#define _ASM_X86_IRQ_REGS_H
9
10#include <asm/percpu.h>
11
12#define ARCH_HAS_OWN_IRQ_REGS
13
14DECLARE_PER_CPU(struct pt_regs *, irq_regs);
15
16static inline struct pt_regs *get_irq_regs(void)
17{
18 return percpu_read(irq_regs);
19}
20
21static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
22{
23 struct pt_regs *old_regs;
24
25 old_regs = get_irq_regs();
26 percpu_write(irq_regs, new_regs);
27
28 return old_regs;
29}
30
31#endif /* _ASM_X86_IRQ_REGS_32_H */
diff --git a/arch/x86/include/asm/irq_regs_32.h b/arch/x86/include/asm/irq_regs_32.h
deleted file mode 100644
index 86afd7473457..000000000000
--- a/arch/x86/include/asm/irq_regs_32.h
+++ /dev/null
@@ -1,31 +0,0 @@
1/*
2 * Per-cpu current frame pointer - the location of the last exception frame on
3 * the stack, stored in the per-cpu area.
4 *
5 * Jeremy Fitzhardinge <jeremy@goop.org>
6 */
7#ifndef _ASM_X86_IRQ_REGS_32_H
8#define _ASM_X86_IRQ_REGS_32_H
9
10#include <asm/percpu.h>
11
12#define ARCH_HAS_OWN_IRQ_REGS
13
14DECLARE_PER_CPU(struct pt_regs *, irq_regs);
15
16static inline struct pt_regs *get_irq_regs(void)
17{
18 return x86_read_percpu(irq_regs);
19}
20
21static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
22{
23 struct pt_regs *old_regs;
24
25 old_regs = get_irq_regs();
26 x86_write_percpu(irq_regs, new_regs);
27
28 return old_regs;
29}
30
31#endif /* _ASM_X86_IRQ_REGS_32_H */
diff --git a/arch/x86/include/asm/irq_regs_64.h b/arch/x86/include/asm/irq_regs_64.h
deleted file mode 100644
index 3dd9c0b70270..000000000000
--- a/arch/x86/include/asm/irq_regs_64.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/irq_regs.h>
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index f7ff65032b9d..b07278c55e9e 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -1,47 +1,69 @@
1#ifndef _ASM_X86_IRQ_VECTORS_H 1#ifndef _ASM_X86_IRQ_VECTORS_H
2#define _ASM_X86_IRQ_VECTORS_H 2#define _ASM_X86_IRQ_VECTORS_H
3 3
4#include <linux/threads.h> 4/*
5 * Linux IRQ vector layout.
6 *
7 * There are 256 IDT entries (per CPU - each entry is 8 bytes) which can
8 * be defined by Linux. They are used as a jump table by the CPU when a
9 * given vector is triggered - by a CPU-external, CPU-internal or
10 * software-triggered event.
11 *
12 * Linux sets the kernel code address each entry jumps to early during
13 * bootup, and never changes them. This is the general layout of the
14 * IDT entries:
15 *
16 * Vectors 0 ... 31 : system traps and exceptions - hardcoded events
17 * Vectors 32 ... 127 : device interrupts
18 * Vector 128 : legacy int80 syscall interface
19 * Vectors 129 ... 237 : device interrupts
20 * Vectors 238 ... 255 : special interrupts
21 *
22 * 64-bit x86 has per CPU IDT tables, 32-bit has one shared IDT table.
23 *
24 * This file enumerates the exact layout of them:
25 */
5 26
6#define NMI_VECTOR 0x02 27#define NMI_VECTOR 0x02
7 28
8/* 29/*
9 * IDT vectors usable for external interrupt sources start 30 * IDT vectors usable for external interrupt sources start
10 * at 0x20: 31 * at 0x20:
11 */ 32 */
12#define FIRST_EXTERNAL_VECTOR 0x20 33#define FIRST_EXTERNAL_VECTOR 0x20
13 34
14#ifdef CONFIG_X86_32 35#ifdef CONFIG_X86_32
15# define SYSCALL_VECTOR 0x80 36# define SYSCALL_VECTOR 0x80
16#else 37#else
17# define IA32_SYSCALL_VECTOR 0x80 38# define IA32_SYSCALL_VECTOR 0x80
18#endif 39#endif
19 40
20/* 41/*
21 * Reserve the lowest usable priority level 0x20 - 0x2f for triggering 42 * Reserve the lowest usable priority level 0x20 - 0x2f for triggering
22 * cleanup after irq migration. 43 * cleanup after irq migration.
23 */ 44 */
24#define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR 45#define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR
25 46
26/* 47/*
27 * Vectors 0x30-0x3f are used for ISA interrupts. 48 * Vectors 0x30-0x3f are used for ISA interrupts.
28 */ 49 */
29#define IRQ0_VECTOR (FIRST_EXTERNAL_VECTOR + 0x10) 50#define IRQ0_VECTOR (FIRST_EXTERNAL_VECTOR + 0x10)
30#define IRQ1_VECTOR (IRQ0_VECTOR + 1) 51
31#define IRQ2_VECTOR (IRQ0_VECTOR + 2) 52#define IRQ1_VECTOR (IRQ0_VECTOR + 1)
32#define IRQ3_VECTOR (IRQ0_VECTOR + 3) 53#define IRQ2_VECTOR (IRQ0_VECTOR + 2)
33#define IRQ4_VECTOR (IRQ0_VECTOR + 4) 54#define IRQ3_VECTOR (IRQ0_VECTOR + 3)
34#define IRQ5_VECTOR (IRQ0_VECTOR + 5) 55#define IRQ4_VECTOR (IRQ0_VECTOR + 4)
35#define IRQ6_VECTOR (IRQ0_VECTOR + 6) 56#define IRQ5_VECTOR (IRQ0_VECTOR + 5)
36#define IRQ7_VECTOR (IRQ0_VECTOR + 7) 57#define IRQ6_VECTOR (IRQ0_VECTOR + 6)
37#define IRQ8_VECTOR (IRQ0_VECTOR + 8) 58#define IRQ7_VECTOR (IRQ0_VECTOR + 7)
38#define IRQ9_VECTOR (IRQ0_VECTOR + 9) 59#define IRQ8_VECTOR (IRQ0_VECTOR + 8)
39#define IRQ10_VECTOR (IRQ0_VECTOR + 10) 60#define IRQ9_VECTOR (IRQ0_VECTOR + 9)
40#define IRQ11_VECTOR (IRQ0_VECTOR + 11) 61#define IRQ10_VECTOR (IRQ0_VECTOR + 10)
41#define IRQ12_VECTOR (IRQ0_VECTOR + 12) 62#define IRQ11_VECTOR (IRQ0_VECTOR + 11)
42#define IRQ13_VECTOR (IRQ0_VECTOR + 13) 63#define IRQ12_VECTOR (IRQ0_VECTOR + 12)
43#define IRQ14_VECTOR (IRQ0_VECTOR + 14) 64#define IRQ13_VECTOR (IRQ0_VECTOR + 13)
44#define IRQ15_VECTOR (IRQ0_VECTOR + 15) 65#define IRQ14_VECTOR (IRQ0_VECTOR + 14)
66#define IRQ15_VECTOR (IRQ0_VECTOR + 15)
45 67
46/* 68/*
47 * Special IRQ vectors used by the SMP architecture, 0xf0-0xff 69 * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
@@ -49,119 +71,98 @@
49 * some of the following vectors are 'rare', they are merged 71 * some of the following vectors are 'rare', they are merged
50 * into a single vector (CALL_FUNCTION_VECTOR) to save vector space. 72 * into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
51 * TLB, reschedule and local APIC vectors are performance-critical. 73 * TLB, reschedule and local APIC vectors are performance-critical.
52 *
53 * Vectors 0xf0-0xfa are free (reserved for future Linux use).
54 */ 74 */
55#ifdef CONFIG_X86_32
56
57# define SPURIOUS_APIC_VECTOR 0xff
58# define ERROR_APIC_VECTOR 0xfe
59# define INVALIDATE_TLB_VECTOR 0xfd
60# define RESCHEDULE_VECTOR 0xfc
61# define CALL_FUNCTION_VECTOR 0xfb
62# define CALL_FUNCTION_SINGLE_VECTOR 0xfa
63# define THERMAL_APIC_VECTOR 0xf0
64
65#else
66 75
67#define SPURIOUS_APIC_VECTOR 0xff 76#define SPURIOUS_APIC_VECTOR 0xff
77/*
78 * Sanity check
79 */
80#if ((SPURIOUS_APIC_VECTOR & 0x0F) != 0x0F)
81# error SPURIOUS_APIC_VECTOR definition error
82#endif
83
68#define ERROR_APIC_VECTOR 0xfe 84#define ERROR_APIC_VECTOR 0xfe
69#define RESCHEDULE_VECTOR 0xfd 85#define RESCHEDULE_VECTOR 0xfd
70#define CALL_FUNCTION_VECTOR 0xfc 86#define CALL_FUNCTION_VECTOR 0xfc
71#define CALL_FUNCTION_SINGLE_VECTOR 0xfb 87#define CALL_FUNCTION_SINGLE_VECTOR 0xfb
72#define THERMAL_APIC_VECTOR 0xfa 88#define THERMAL_APIC_VECTOR 0xfa
73#define THRESHOLD_APIC_VECTOR 0xf9
74#define UV_BAU_MESSAGE 0xf8
75#define INVALIDATE_TLB_VECTOR_END 0xf7
76#define INVALIDATE_TLB_VECTOR_START 0xf0 /* f0-f7 used for TLB flush */
77
78#define NUM_INVALIDATE_TLB_VECTORS 8
79 89
90#ifdef CONFIG_X86_32
91/* 0xf8 - 0xf9 : free */
92#else
93# define THRESHOLD_APIC_VECTOR 0xf9
94# define UV_BAU_MESSAGE 0xf8
80#endif 95#endif
81 96
97/* f0-f7 used for spreading out TLB flushes: */
98#define INVALIDATE_TLB_VECTOR_END 0xf7
99#define INVALIDATE_TLB_VECTOR_START 0xf0
100#define NUM_INVALIDATE_TLB_VECTORS 8
101
82/* 102/*
83 * Local APIC timer IRQ vector is on a different priority level, 103 * Local APIC timer IRQ vector is on a different priority level,
84 * to work around the 'lost local interrupt if more than 2 IRQ 104 * to work around the 'lost local interrupt if more than 2 IRQ
85 * sources per level' errata. 105 * sources per level' errata.
86 */ 106 */
87#define LOCAL_TIMER_VECTOR 0xef 107#define LOCAL_TIMER_VECTOR 0xef
108
109/*
110 * Performance monitoring interrupt vector:
111 */
112#define LOCAL_PERF_VECTOR 0xee
88 113
89/* 114/*
90 * First APIC vector available to drivers: (vectors 0x30-0xee) we 115 * First APIC vector available to drivers: (vectors 0x30-0xee) we
91 * start at 0x31(0x41) to spread out vectors evenly between priority 116 * start at 0x31(0x41) to spread out vectors evenly between priority
92 * levels. (0x80 is the syscall vector) 117 * levels. (0x80 is the syscall vector)
93 */ 118 */
94#define FIRST_DEVICE_VECTOR (IRQ15_VECTOR + 2) 119#define FIRST_DEVICE_VECTOR (IRQ15_VECTOR + 2)
95
96#define NR_VECTORS 256
97 120
98#define FPU_IRQ 13 121#define NR_VECTORS 256
99 122
100#define FIRST_VM86_IRQ 3 123#define FPU_IRQ 13
101#define LAST_VM86_IRQ 15
102#define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15)
103 124
104#define NR_IRQS_LEGACY 16 125#define FIRST_VM86_IRQ 3
126#define LAST_VM86_IRQ 15
105 127
106#if defined(CONFIG_X86_IO_APIC) && !defined(CONFIG_X86_VOYAGER) 128#ifndef __ASSEMBLY__
107 129static inline int invalid_vm86_irq(int irq)
108#ifndef CONFIG_SPARSE_IRQ 130{
109# if NR_CPUS < MAX_IO_APICS 131 return irq < 3 || irq > 15;
110# define NR_IRQS (NR_VECTORS + (32 * NR_CPUS)) 132}
111# else
112# define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS))
113# endif
114#else
115# if (8 * NR_CPUS) > (32 * MAX_IO_APICS)
116# define NR_IRQS (NR_VECTORS + (8 * NR_CPUS))
117# else
118# define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS))
119# endif
120#endif 133#endif
121 134
122#elif defined(CONFIG_X86_VOYAGER) 135/*
123 136 * Size the maximum number of interrupts.
124# define NR_IRQS 224 137 *
138 * If the irq_desc[] array has a sparse layout, we can size things
139 * generously - it scales up linearly with the maximum number of CPUs,
140 * and the maximum number of IO-APICs, whichever is higher.
141 *
142 * In other cases we size more conservatively, to not create too large
143 * static arrays.
144 */
125 145
126#else /* IO_APIC || VOYAGER */ 146#define NR_IRQS_LEGACY 16
127 147
128# define NR_IRQS 16 148#define CPU_VECTOR_LIMIT ( 8 * NR_CPUS )
149#define IO_APIC_VECTOR_LIMIT ( 32 * MAX_IO_APICS )
129 150
151#ifdef CONFIG_X86_IO_APIC
152# ifdef CONFIG_SPARSE_IRQ
153# define NR_IRQS \
154 (CPU_VECTOR_LIMIT > IO_APIC_VECTOR_LIMIT ? \
155 (NR_VECTORS + CPU_VECTOR_LIMIT) : \
156 (NR_VECTORS + IO_APIC_VECTOR_LIMIT))
157# else
158# if NR_CPUS < MAX_IO_APICS
159# define NR_IRQS (NR_VECTORS + 4*CPU_VECTOR_LIMIT)
160# else
161# define NR_IRQS (NR_VECTORS + IO_APIC_VECTOR_LIMIT)
162# endif
163# endif
164#else /* !CONFIG_X86_IO_APIC: */
165# define NR_IRQS NR_IRQS_LEGACY
130#endif 166#endif
131 167
132/* Voyager specific defines */
133/* These define the CPIs we use in linux */
134#define VIC_CPI_LEVEL0 0
135#define VIC_CPI_LEVEL1 1
136/* now the fake CPIs */
137#define VIC_TIMER_CPI 2
138#define VIC_INVALIDATE_CPI 3
139#define VIC_RESCHEDULE_CPI 4
140#define VIC_ENABLE_IRQ_CPI 5
141#define VIC_CALL_FUNCTION_CPI 6
142#define VIC_CALL_FUNCTION_SINGLE_CPI 7
143
144/* Now the QIC CPIs: Since we don't need the two initial levels,
145 * these are 2 less than the VIC CPIs */
146#define QIC_CPI_OFFSET 1
147#define QIC_TIMER_CPI (VIC_TIMER_CPI - QIC_CPI_OFFSET)
148#define QIC_INVALIDATE_CPI (VIC_INVALIDATE_CPI - QIC_CPI_OFFSET)
149#define QIC_RESCHEDULE_CPI (VIC_RESCHEDULE_CPI - QIC_CPI_OFFSET)
150#define QIC_ENABLE_IRQ_CPI (VIC_ENABLE_IRQ_CPI - QIC_CPI_OFFSET)
151#define QIC_CALL_FUNCTION_CPI (VIC_CALL_FUNCTION_CPI - QIC_CPI_OFFSET)
152#define QIC_CALL_FUNCTION_SINGLE_CPI (VIC_CALL_FUNCTION_SINGLE_CPI - QIC_CPI_OFFSET)
153
154#define VIC_START_FAKE_CPI VIC_TIMER_CPI
155#define VIC_END_FAKE_CPI VIC_CALL_FUNCTION_SINGLE_CPI
156
157/* this is the SYS_INT CPI. */
158#define VIC_SYS_INT 8
159#define VIC_CMN_INT 15
160
161/* This is the boot CPI for alternate processors. It gets overwritten
162 * by the above once the system has activated all available processors */
163#define VIC_CPU_BOOT_CPI VIC_CPI_LEVEL0
164#define VIC_CPU_BOOT_ERRATA_CPI (VIC_CPI_LEVEL0 + 8)
165
166
167#endif /* _ASM_X86_IRQ_VECTORS_H */ 168#endif /* _ASM_X86_IRQ_VECTORS_H */
diff --git a/arch/x86/include/asm/mach-default/mach_apic.h b/arch/x86/include/asm/mach-default/mach_apic.h
deleted file mode 100644
index cc09cbbee27e..000000000000
--- a/arch/x86/include/asm/mach-default/mach_apic.h
+++ /dev/null
@@ -1,168 +0,0 @@
1#ifndef _ASM_X86_MACH_DEFAULT_MACH_APIC_H
2#define _ASM_X86_MACH_DEFAULT_MACH_APIC_H
3
4#ifdef CONFIG_X86_LOCAL_APIC
5
6#include <mach_apicdef.h>
7#include <asm/smp.h>
8
9#define APIC_DFR_VALUE (APIC_DFR_FLAT)
10
11static inline const struct cpumask *target_cpus(void)
12{
13#ifdef CONFIG_SMP
14 return cpu_online_mask;
15#else
16 return cpumask_of(0);
17#endif
18}
19
20#define NO_BALANCE_IRQ (0)
21#define esr_disable (0)
22
23#ifdef CONFIG_X86_64
24#include <asm/genapic.h>
25#define INT_DELIVERY_MODE (genapic->int_delivery_mode)
26#define INT_DEST_MODE (genapic->int_dest_mode)
27#define TARGET_CPUS (genapic->target_cpus())
28#define apic_id_registered (genapic->apic_id_registered)
29#define init_apic_ldr (genapic->init_apic_ldr)
30#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid)
31#define cpu_mask_to_apicid_and (genapic->cpu_mask_to_apicid_and)
32#define phys_pkg_id (genapic->phys_pkg_id)
33#define vector_allocation_domain (genapic->vector_allocation_domain)
34#define read_apic_id() (GET_APIC_ID(apic_read(APIC_ID)))
35#define send_IPI_self (genapic->send_IPI_self)
36#define wakeup_secondary_cpu (genapic->wakeup_cpu)
37extern void setup_apic_routing(void);
38#else
39#define INT_DELIVERY_MODE dest_LowestPrio
40#define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */
41#define TARGET_CPUS (target_cpus())
42#define wakeup_secondary_cpu wakeup_secondary_cpu_via_init
43/*
44 * Set up the logical destination ID.
45 *
46 * Intel recommends to set DFR, LDR and TPR before enabling
47 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
48 * document number 292116). So here it goes...
49 */
50static inline void init_apic_ldr(void)
51{
52 unsigned long val;
53
54 apic_write(APIC_DFR, APIC_DFR_VALUE);
55 val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
56 val |= SET_APIC_LOGICAL_ID(1UL << smp_processor_id());
57 apic_write(APIC_LDR, val);
58}
59
60static inline int apic_id_registered(void)
61{
62 return physid_isset(read_apic_id(), phys_cpu_present_map);
63}
64
65static inline unsigned int cpu_mask_to_apicid(const struct cpumask *cpumask)
66{
67 return cpumask_bits(cpumask)[0];
68}
69
70static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
71 const struct cpumask *andmask)
72{
73 unsigned long mask1 = cpumask_bits(cpumask)[0];
74 unsigned long mask2 = cpumask_bits(andmask)[0];
75 unsigned long mask3 = cpumask_bits(cpu_online_mask)[0];
76
77 return (unsigned int)(mask1 & mask2 & mask3);
78}
79
80static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
81{
82 return cpuid_apic >> index_msb;
83}
84
85static inline void setup_apic_routing(void)
86{
87#ifdef CONFIG_X86_IO_APIC
88 printk("Enabling APIC mode: %s. Using %d I/O APICs\n",
89 "Flat", nr_ioapics);
90#endif
91}
92
93static inline int apicid_to_node(int logical_apicid)
94{
95#ifdef CONFIG_SMP
96 return apicid_2_node[hard_smp_processor_id()];
97#else
98 return 0;
99#endif
100}
101
102static inline void vector_allocation_domain(int cpu, struct cpumask *retmask)
103{
104 /* Careful. Some cpus do not strictly honor the set of cpus
105 * specified in the interrupt destination when using lowest
106 * priority interrupt delivery mode.
107 *
108 * In particular there was a hyperthreading cpu observed to
109 * deliver interrupts to the wrong hyperthread when only one
110 * hyperthread was specified in the interrupt desitination.
111 */
112 *retmask = (cpumask_t) { { [0] = APIC_ALL_CPUS } };
113}
114#endif
115
116static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
117{
118 return physid_isset(apicid, bitmap);
119}
120
121static inline unsigned long check_apicid_present(int bit)
122{
123 return physid_isset(bit, phys_cpu_present_map);
124}
125
126static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
127{
128 return phys_map;
129}
130
131static inline int multi_timer_check(int apic, int irq)
132{
133 return 0;
134}
135
136/* Mapping from cpu number to logical apicid */
137static inline int cpu_to_logical_apicid(int cpu)
138{
139 return 1 << cpu;
140}
141
142static inline int cpu_present_to_apicid(int mps_cpu)
143{
144 if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu))
145 return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
146 else
147 return BAD_APICID;
148}
149
150static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
151{
152 return physid_mask_of_physid(phys_apicid);
153}
154
155static inline void setup_portio_remap(void)
156{
157}
158
159static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
160{
161 return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map);
162}
163
164static inline void enable_apic_mode(void)
165{
166}
167#endif /* CONFIG_X86_LOCAL_APIC */
168#endif /* _ASM_X86_MACH_DEFAULT_MACH_APIC_H */
diff --git a/arch/x86/include/asm/mach-default/mach_apicdef.h b/arch/x86/include/asm/mach-default/mach_apicdef.h
deleted file mode 100644
index 53179936d6c6..000000000000
--- a/arch/x86/include/asm/mach-default/mach_apicdef.h
+++ /dev/null
@@ -1,24 +0,0 @@
1#ifndef _ASM_X86_MACH_DEFAULT_MACH_APICDEF_H
2#define _ASM_X86_MACH_DEFAULT_MACH_APICDEF_H
3
4#include <asm/apic.h>
5
6#ifdef CONFIG_X86_64
7#define APIC_ID_MASK (genapic->apic_id_mask)
8#define GET_APIC_ID(x) (genapic->get_apic_id(x))
9#define SET_APIC_ID(x) (genapic->set_apic_id(x))
10#else
11#define APIC_ID_MASK (0xF<<24)
12static inline unsigned get_apic_id(unsigned long x)
13{
14 unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR));
15 if (APIC_XAPIC(ver))
16 return (((x)>>24)&0xFF);
17 else
18 return (((x)>>24)&0xF);
19}
20
21#define GET_APIC_ID(x) get_apic_id(x)
22#endif
23
24#endif /* _ASM_X86_MACH_DEFAULT_MACH_APICDEF_H */
diff --git a/arch/x86/include/asm/mach-default/mach_ipi.h b/arch/x86/include/asm/mach-default/mach_ipi.h
deleted file mode 100644
index 191312d155da..000000000000
--- a/arch/x86/include/asm/mach-default/mach_ipi.h
+++ /dev/null
@@ -1,64 +0,0 @@
1#ifndef _ASM_X86_MACH_DEFAULT_MACH_IPI_H
2#define _ASM_X86_MACH_DEFAULT_MACH_IPI_H
3
4/* Avoid include hell */
5#define NMI_VECTOR 0x02
6
7void send_IPI_mask_bitmask(const struct cpumask *mask, int vector);
8void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
9void __send_IPI_shortcut(unsigned int shortcut, int vector);
10
11extern int no_broadcast;
12
13#ifdef CONFIG_X86_64
14#include <asm/genapic.h>
15#define send_IPI_mask (genapic->send_IPI_mask)
16#define send_IPI_mask_allbutself (genapic->send_IPI_mask_allbutself)
17#else
18static inline void send_IPI_mask(const struct cpumask *mask, int vector)
19{
20 send_IPI_mask_bitmask(mask, vector);
21}
22void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
23#endif
24
25static inline void __local_send_IPI_allbutself(int vector)
26{
27 if (no_broadcast || vector == NMI_VECTOR)
28 send_IPI_mask_allbutself(cpu_online_mask, vector);
29 else
30 __send_IPI_shortcut(APIC_DEST_ALLBUT, vector);
31}
32
33static inline void __local_send_IPI_all(int vector)
34{
35 if (no_broadcast || vector == NMI_VECTOR)
36 send_IPI_mask(cpu_online_mask, vector);
37 else
38 __send_IPI_shortcut(APIC_DEST_ALLINC, vector);
39}
40
41#ifdef CONFIG_X86_64
42#define send_IPI_allbutself (genapic->send_IPI_allbutself)
43#define send_IPI_all (genapic->send_IPI_all)
44#else
45static inline void send_IPI_allbutself(int vector)
46{
47 /*
48 * if there are no other CPUs in the system then we get an APIC send
49 * error if we try to broadcast, thus avoid sending IPIs in this case.
50 */
51 if (!(num_online_cpus() > 1))
52 return;
53
54 __local_send_IPI_allbutself(vector);
55 return;
56}
57
58static inline void send_IPI_all(int vector)
59{
60 __local_send_IPI_all(vector);
61}
62#endif
63
64#endif /* _ASM_X86_MACH_DEFAULT_MACH_IPI_H */
diff --git a/arch/x86/include/asm/mach-default/mach_mpparse.h b/arch/x86/include/asm/mach-default/mach_mpparse.h
deleted file mode 100644
index c70a263d68cd..000000000000
--- a/arch/x86/include/asm/mach-default/mach_mpparse.h
+++ /dev/null
@@ -1,17 +0,0 @@
1#ifndef _ASM_X86_MACH_DEFAULT_MACH_MPPARSE_H
2#define _ASM_X86_MACH_DEFAULT_MACH_MPPARSE_H
3
4static inline int
5mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
6{
7 return 0;
8}
9
10/* Hook from generic ACPI tables.c */
11static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
12{
13 return 0;
14}
15
16
17#endif /* _ASM_X86_MACH_DEFAULT_MACH_MPPARSE_H */
diff --git a/arch/x86/include/asm/mach-default/mach_mpspec.h b/arch/x86/include/asm/mach-default/mach_mpspec.h
deleted file mode 100644
index e85ede686be8..000000000000
--- a/arch/x86/include/asm/mach-default/mach_mpspec.h
+++ /dev/null
@@ -1,12 +0,0 @@
1#ifndef _ASM_X86_MACH_DEFAULT_MACH_MPSPEC_H
2#define _ASM_X86_MACH_DEFAULT_MACH_MPSPEC_H
3
4#define MAX_IRQ_SOURCES 256
5
6#if CONFIG_BASE_SMALL == 0
7#define MAX_MP_BUSSES 256
8#else
9#define MAX_MP_BUSSES 32
10#endif
11
12#endif /* _ASM_X86_MACH_DEFAULT_MACH_MPSPEC_H */
diff --git a/arch/x86/include/asm/mach-default/mach_wakecpu.h b/arch/x86/include/asm/mach-default/mach_wakecpu.h
deleted file mode 100644
index 89897a6a65b9..000000000000
--- a/arch/x86/include/asm/mach-default/mach_wakecpu.h
+++ /dev/null
@@ -1,41 +0,0 @@
1#ifndef _ASM_X86_MACH_DEFAULT_MACH_WAKECPU_H
2#define _ASM_X86_MACH_DEFAULT_MACH_WAKECPU_H
3
4#define TRAMPOLINE_PHYS_LOW (0x467)
5#define TRAMPOLINE_PHYS_HIGH (0x469)
6
7static inline void wait_for_init_deassert(atomic_t *deassert)
8{
9 while (!atomic_read(deassert))
10 cpu_relax();
11 return;
12}
13
14/* Nothing to do for most platforms, since cleared by the INIT cycle */
15static inline void smp_callin_clear_local_apic(void)
16{
17}
18
19static inline void store_NMI_vector(unsigned short *high, unsigned short *low)
20{
21}
22
23static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
24{
25}
26
27#ifdef CONFIG_SMP
28extern void __inquire_remote_apic(int apicid);
29#else /* CONFIG_SMP */
30static inline void __inquire_remote_apic(int apicid)
31{
32}
33#endif /* CONFIG_SMP */
34
35static inline void inquire_remote_apic(int apicid)
36{
37 if (apic_verbosity >= APIC_DEBUG)
38 __inquire_remote_apic(apicid);
39}
40
41#endif /* _ASM_X86_MACH_DEFAULT_MACH_WAKECPU_H */
diff --git a/arch/x86/include/asm/mach-generic/gpio.h b/arch/x86/include/asm/mach-generic/gpio.h
deleted file mode 100644
index 995c45efdb33..000000000000
--- a/arch/x86/include/asm/mach-generic/gpio.h
+++ /dev/null
@@ -1,15 +0,0 @@
1#ifndef _ASM_X86_MACH_GENERIC_GPIO_H
2#define _ASM_X86_MACH_GENERIC_GPIO_H
3
4int gpio_request(unsigned gpio, const char *label);
5void gpio_free(unsigned gpio);
6int gpio_direction_input(unsigned gpio);
7int gpio_direction_output(unsigned gpio, int value);
8int gpio_get_value(unsigned gpio);
9void gpio_set_value(unsigned gpio, int value);
10int gpio_to_irq(unsigned gpio);
11int irq_to_gpio(unsigned irq);
12
13#include <asm-generic/gpio.h> /* cansleep wrappers */
14
15#endif /* _ASM_X86_MACH_GENERIC_GPIO_H */
diff --git a/arch/x86/include/asm/mach-generic/mach_apic.h b/arch/x86/include/asm/mach-generic/mach_apic.h
deleted file mode 100644
index 48553e958ad5..000000000000
--- a/arch/x86/include/asm/mach-generic/mach_apic.h
+++ /dev/null
@@ -1,35 +0,0 @@
1#ifndef _ASM_X86_MACH_GENERIC_MACH_APIC_H
2#define _ASM_X86_MACH_GENERIC_MACH_APIC_H
3
4#include <asm/genapic.h>
5
6#define esr_disable (genapic->ESR_DISABLE)
7#define NO_BALANCE_IRQ (genapic->no_balance_irq)
8#define INT_DELIVERY_MODE (genapic->int_delivery_mode)
9#define INT_DEST_MODE (genapic->int_dest_mode)
10#undef APIC_DEST_LOGICAL
11#define APIC_DEST_LOGICAL (genapic->apic_destination_logical)
12#define TARGET_CPUS (genapic->target_cpus())
13#define apic_id_registered (genapic->apic_id_registered)
14#define init_apic_ldr (genapic->init_apic_ldr)
15#define ioapic_phys_id_map (genapic->ioapic_phys_id_map)
16#define setup_apic_routing (genapic->setup_apic_routing)
17#define multi_timer_check (genapic->multi_timer_check)
18#define apicid_to_node (genapic->apicid_to_node)
19#define cpu_to_logical_apicid (genapic->cpu_to_logical_apicid)
20#define cpu_present_to_apicid (genapic->cpu_present_to_apicid)
21#define apicid_to_cpu_present (genapic->apicid_to_cpu_present)
22#define setup_portio_remap (genapic->setup_portio_remap)
23#define check_apicid_present (genapic->check_apicid_present)
24#define check_phys_apicid_present (genapic->check_phys_apicid_present)
25#define check_apicid_used (genapic->check_apicid_used)
26#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid)
27#define cpu_mask_to_apicid_and (genapic->cpu_mask_to_apicid_and)
28#define vector_allocation_domain (genapic->vector_allocation_domain)
29#define enable_apic_mode (genapic->enable_apic_mode)
30#define phys_pkg_id (genapic->phys_pkg_id)
31#define wakeup_secondary_cpu (genapic->wakeup_cpu)
32
33extern void generic_bigsmp_probe(void);
34
35#endif /* _ASM_X86_MACH_GENERIC_MACH_APIC_H */
diff --git a/arch/x86/include/asm/mach-generic/mach_apicdef.h b/arch/x86/include/asm/mach-generic/mach_apicdef.h
deleted file mode 100644
index 68041f3802f4..000000000000
--- a/arch/x86/include/asm/mach-generic/mach_apicdef.h
+++ /dev/null
@@ -1,11 +0,0 @@
1#ifndef _ASM_X86_MACH_GENERIC_MACH_APICDEF_H
2#define _ASM_X86_MACH_GENERIC_MACH_APICDEF_H
3
4#ifndef APIC_DEFINITION
5#include <asm/genapic.h>
6
7#define GET_APIC_ID (genapic->get_apic_id)
8#define APIC_ID_MASK (genapic->apic_id_mask)
9#endif
10
11#endif /* _ASM_X86_MACH_GENERIC_MACH_APICDEF_H */
diff --git a/arch/x86/include/asm/mach-generic/mach_ipi.h b/arch/x86/include/asm/mach-generic/mach_ipi.h
deleted file mode 100644
index ffd637e3c3d9..000000000000
--- a/arch/x86/include/asm/mach-generic/mach_ipi.h
+++ /dev/null
@@ -1,10 +0,0 @@
1#ifndef _ASM_X86_MACH_GENERIC_MACH_IPI_H
2#define _ASM_X86_MACH_GENERIC_MACH_IPI_H
3
4#include <asm/genapic.h>
5
6#define send_IPI_mask (genapic->send_IPI_mask)
7#define send_IPI_allbutself (genapic->send_IPI_allbutself)
8#define send_IPI_all (genapic->send_IPI_all)
9
10#endif /* _ASM_X86_MACH_GENERIC_MACH_IPI_H */
diff --git a/arch/x86/include/asm/mach-generic/mach_mpparse.h b/arch/x86/include/asm/mach-generic/mach_mpparse.h
deleted file mode 100644
index 9444ab8dca94..000000000000
--- a/arch/x86/include/asm/mach-generic/mach_mpparse.h
+++ /dev/null
@@ -1,9 +0,0 @@
1#ifndef _ASM_X86_MACH_GENERIC_MACH_MPPARSE_H
2#define _ASM_X86_MACH_GENERIC_MACH_MPPARSE_H
3
4
5extern int mps_oem_check(struct mpc_table *, char *, char *);
6
7extern int acpi_madt_oem_check(char *, char *);
8
9#endif /* _ASM_X86_MACH_GENERIC_MACH_MPPARSE_H */
diff --git a/arch/x86/include/asm/mach-generic/mach_mpspec.h b/arch/x86/include/asm/mach-generic/mach_mpspec.h
deleted file mode 100644
index 3bc407226578..000000000000
--- a/arch/x86/include/asm/mach-generic/mach_mpspec.h
+++ /dev/null
@@ -1,12 +0,0 @@
1#ifndef _ASM_X86_MACH_GENERIC_MACH_MPSPEC_H
2#define _ASM_X86_MACH_GENERIC_MACH_MPSPEC_H
3
4#define MAX_IRQ_SOURCES 256
5
6/* Summit or generic (i.e. installer) kernels need lots of bus entries. */
7/* Maximum 256 PCI busses, plus 1 ISA bus in each of 4 cabinets. */
8#define MAX_MP_BUSSES 260
9
10extern void numaq_mps_oem_check(struct mpc_table *, char *, char *);
11
12#endif /* _ASM_X86_MACH_GENERIC_MACH_MPSPEC_H */
diff --git a/arch/x86/include/asm/mach-generic/mach_wakecpu.h b/arch/x86/include/asm/mach-generic/mach_wakecpu.h
deleted file mode 100644
index 1ab16b168c8a..000000000000
--- a/arch/x86/include/asm/mach-generic/mach_wakecpu.h
+++ /dev/null
@@ -1,12 +0,0 @@
1#ifndef _ASM_X86_MACH_GENERIC_MACH_WAKECPU_H
2#define _ASM_X86_MACH_GENERIC_MACH_WAKECPU_H
3
4#define TRAMPOLINE_PHYS_LOW (genapic->trampoline_phys_low)
5#define TRAMPOLINE_PHYS_HIGH (genapic->trampoline_phys_high)
6#define wait_for_init_deassert (genapic->wait_for_init_deassert)
7#define smp_callin_clear_local_apic (genapic->smp_callin_clear_local_apic)
8#define store_NMI_vector (genapic->store_NMI_vector)
9#define restore_NMI_vector (genapic->restore_NMI_vector)
10#define inquire_remote_apic (genapic->inquire_remote_apic)
11
12#endif /* _ASM_X86_MACH_GENERIC_MACH_APIC_H */
diff --git a/arch/x86/include/asm/mach-rdc321x/gpio.h b/arch/x86/include/asm/mach-rdc321x/gpio.h
deleted file mode 100644
index c210ab5788b0..000000000000
--- a/arch/x86/include/asm/mach-rdc321x/gpio.h
+++ /dev/null
@@ -1,60 +0,0 @@
1#ifndef _ASM_X86_MACH_RDC321X_GPIO_H
2#define _ASM_X86_MACH_RDC321X_GPIO_H
3
4#include <linux/kernel.h>
5
6extern int rdc_gpio_get_value(unsigned gpio);
7extern void rdc_gpio_set_value(unsigned gpio, int value);
8extern int rdc_gpio_direction_input(unsigned gpio);
9extern int rdc_gpio_direction_output(unsigned gpio, int value);
10extern int rdc_gpio_request(unsigned gpio, const char *label);
11extern void rdc_gpio_free(unsigned gpio);
12extern void __init rdc321x_gpio_setup(void);
13
14/* Wrappers for the arch-neutral GPIO API */
15
16static inline int gpio_request(unsigned gpio, const char *label)
17{
18 return rdc_gpio_request(gpio, label);
19}
20
21static inline void gpio_free(unsigned gpio)
22{
23 might_sleep();
24 rdc_gpio_free(gpio);
25}
26
27static inline int gpio_direction_input(unsigned gpio)
28{
29 return rdc_gpio_direction_input(gpio);
30}
31
32static inline int gpio_direction_output(unsigned gpio, int value)
33{
34 return rdc_gpio_direction_output(gpio, value);
35}
36
37static inline int gpio_get_value(unsigned gpio)
38{
39 return rdc_gpio_get_value(gpio);
40}
41
42static inline void gpio_set_value(unsigned gpio, int value)
43{
44 rdc_gpio_set_value(gpio, value);
45}
46
47static inline int gpio_to_irq(unsigned gpio)
48{
49 return gpio;
50}
51
52static inline int irq_to_gpio(unsigned irq)
53{
54 return irq;
55}
56
57/* For cansleep */
58#include <asm-generic/gpio.h>
59
60#endif /* _ASM_X86_MACH_RDC321X_GPIO_H */
diff --git a/arch/x86/include/asm/mach-default/mach_timer.h b/arch/x86/include/asm/mach_timer.h
index 853728519ae9..853728519ae9 100644
--- a/arch/x86/include/asm/mach-default/mach_timer.h
+++ b/arch/x86/include/asm/mach_timer.h
diff --git a/arch/x86/include/asm/mach-default/mach_traps.h b/arch/x86/include/asm/mach_traps.h
index f7920601e472..f7920601e472 100644
--- a/arch/x86/include/asm/mach-default/mach_traps.h
+++ b/arch/x86/include/asm/mach_traps.h
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 8aeeb3fd73db..52948df9cd1d 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -21,11 +21,54 @@ static inline void paravirt_activate_mm(struct mm_struct *prev,
21int init_new_context(struct task_struct *tsk, struct mm_struct *mm); 21int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
22void destroy_context(struct mm_struct *mm); 22void destroy_context(struct mm_struct *mm);
23 23
24#ifdef CONFIG_X86_32 24
25# include "mmu_context_32.h" 25static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
26#else 26{
27# include "mmu_context_64.h" 27#ifdef CONFIG_SMP
28 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
29 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
30#endif
31}
32
33static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
34 struct task_struct *tsk)
35{
36 unsigned cpu = smp_processor_id();
37
38 if (likely(prev != next)) {
39 /* stop flush ipis for the previous mm */
40 cpu_clear(cpu, prev->cpu_vm_mask);
41#ifdef CONFIG_SMP
42 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
43 percpu_write(cpu_tlbstate.active_mm, next);
28#endif 44#endif
45 cpu_set(cpu, next->cpu_vm_mask);
46
47 /* Re-load page tables */
48 load_cr3(next->pgd);
49
50 /*
51 * load the LDT, if the LDT is different:
52 */
53 if (unlikely(prev->context.ldt != next->context.ldt))
54 load_LDT_nolock(&next->context);
55 }
56#ifdef CONFIG_SMP
57 else {
58 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
59 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
60
61 if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
62 /* We were in lazy tlb mode and leave_mm disabled
63 * tlb flush IPI delivery. We must reload CR3
64 * to make sure to use no freed page tables.
65 */
66 load_cr3(next->pgd);
67 load_LDT_nolock(&next->context);
68 }
69 }
70#endif
71}
29 72
30#define activate_mm(prev, next) \ 73#define activate_mm(prev, next) \
31do { \ 74do { \
@@ -33,5 +76,17 @@ do { \
33 switch_mm((prev), (next), NULL); \ 76 switch_mm((prev), (next), NULL); \
34} while (0); 77} while (0);
35 78
79#ifdef CONFIG_X86_32
80#define deactivate_mm(tsk, mm) \
81do { \
82 loadsegment(gs, 0); \
83} while (0)
84#else
85#define deactivate_mm(tsk, mm) \
86do { \
87 load_gs_index(0); \
88 loadsegment(fs, 0); \
89} while (0)
90#endif
36 91
37#endif /* _ASM_X86_MMU_CONTEXT_H */ 92#endif /* _ASM_X86_MMU_CONTEXT_H */
diff --git a/arch/x86/include/asm/mmu_context_32.h b/arch/x86/include/asm/mmu_context_32.h
deleted file mode 100644
index 7e98ce1d2c0e..000000000000
--- a/arch/x86/include/asm/mmu_context_32.h
+++ /dev/null
@@ -1,55 +0,0 @@
1#ifndef _ASM_X86_MMU_CONTEXT_32_H
2#define _ASM_X86_MMU_CONTEXT_32_H
3
4static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
5{
6#ifdef CONFIG_SMP
7 if (x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_OK)
8 x86_write_percpu(cpu_tlbstate.state, TLBSTATE_LAZY);
9#endif
10}
11
12static inline void switch_mm(struct mm_struct *prev,
13 struct mm_struct *next,
14 struct task_struct *tsk)
15{
16 int cpu = smp_processor_id();
17
18 if (likely(prev != next)) {
19 /* stop flush ipis for the previous mm */
20 cpu_clear(cpu, prev->cpu_vm_mask);
21#ifdef CONFIG_SMP
22 x86_write_percpu(cpu_tlbstate.state, TLBSTATE_OK);
23 x86_write_percpu(cpu_tlbstate.active_mm, next);
24#endif
25 cpu_set(cpu, next->cpu_vm_mask);
26
27 /* Re-load page tables */
28 load_cr3(next->pgd);
29
30 /*
31 * load the LDT, if the LDT is different:
32 */
33 if (unlikely(prev->context.ldt != next->context.ldt))
34 load_LDT_nolock(&next->context);
35 }
36#ifdef CONFIG_SMP
37 else {
38 x86_write_percpu(cpu_tlbstate.state, TLBSTATE_OK);
39 BUG_ON(x86_read_percpu(cpu_tlbstate.active_mm) != next);
40
41 if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
42 /* We were in lazy tlb mode and leave_mm disabled
43 * tlb flush IPI delivery. We must reload %cr3.
44 */
45 load_cr3(next->pgd);
46 load_LDT_nolock(&next->context);
47 }
48 }
49#endif
50}
51
52#define deactivate_mm(tsk, mm) \
53 asm("movl %0,%%gs": :"r" (0));
54
55#endif /* _ASM_X86_MMU_CONTEXT_32_H */
diff --git a/arch/x86/include/asm/mmu_context_64.h b/arch/x86/include/asm/mmu_context_64.h
deleted file mode 100644
index 677d36e9540a..000000000000
--- a/arch/x86/include/asm/mmu_context_64.h
+++ /dev/null
@@ -1,54 +0,0 @@
1#ifndef _ASM_X86_MMU_CONTEXT_64_H
2#define _ASM_X86_MMU_CONTEXT_64_H
3
4#include <asm/pda.h>
5
6static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
7{
8#ifdef CONFIG_SMP
9 if (read_pda(mmu_state) == TLBSTATE_OK)
10 write_pda(mmu_state, TLBSTATE_LAZY);
11#endif
12}
13
14static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
15 struct task_struct *tsk)
16{
17 unsigned cpu = smp_processor_id();
18 if (likely(prev != next)) {
19 /* stop flush ipis for the previous mm */
20 cpu_clear(cpu, prev->cpu_vm_mask);
21#ifdef CONFIG_SMP
22 write_pda(mmu_state, TLBSTATE_OK);
23 write_pda(active_mm, next);
24#endif
25 cpu_set(cpu, next->cpu_vm_mask);
26 load_cr3(next->pgd);
27
28 if (unlikely(next->context.ldt != prev->context.ldt))
29 load_LDT_nolock(&next->context);
30 }
31#ifdef CONFIG_SMP
32 else {
33 write_pda(mmu_state, TLBSTATE_OK);
34 if (read_pda(active_mm) != next)
35 BUG();
36 if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
37 /* We were in lazy tlb mode and leave_mm disabled
38 * tlb flush IPI delivery. We must reload CR3
39 * to make sure to use no freed page tables.
40 */
41 load_cr3(next->pgd);
42 load_LDT_nolock(&next->context);
43 }
44 }
45#endif
46}
47
48#define deactivate_mm(tsk, mm) \
49do { \
50 load_gs_index(0); \
51 asm volatile("movl %0,%%fs"::"r"(0)); \
52} while (0)
53
54#endif /* _ASM_X86_MMU_CONTEXT_64_H */
diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h
index bd22f2a3713f..5916c8df09d9 100644
--- a/arch/x86/include/asm/mpspec.h
+++ b/arch/x86/include/asm/mpspec.h
@@ -9,7 +9,18 @@ extern int apic_version[MAX_APICS];
9extern int pic_mode; 9extern int pic_mode;
10 10
11#ifdef CONFIG_X86_32 11#ifdef CONFIG_X86_32
12#include <mach_mpspec.h> 12
13/*
14 * Summit or generic (i.e. installer) kernels need lots of bus entries.
15 * Maximum 256 PCI busses, plus 1 ISA bus in each of 4 cabinets.
16 */
17#if CONFIG_BASE_SMALL == 0
18# define MAX_MP_BUSSES 260
19#else
20# define MAX_MP_BUSSES 32
21#endif
22
23#define MAX_IRQ_SOURCES 256
13 24
14extern unsigned int def_to_bigsmp; 25extern unsigned int def_to_bigsmp;
15extern u8 apicid_2_node[]; 26extern u8 apicid_2_node[];
@@ -20,15 +31,15 @@ extern int mp_bus_id_to_local[MAX_MP_BUSSES];
20extern int quad_local_to_mp_bus_id [NR_CPUS/4][4]; 31extern int quad_local_to_mp_bus_id [NR_CPUS/4][4];
21#endif 32#endif
22 33
23#define MAX_APICID 256 34#define MAX_APICID 256
24 35
25#else 36#else /* CONFIG_X86_64: */
26 37
27#define MAX_MP_BUSSES 256 38#define MAX_MP_BUSSES 256
28/* Each PCI slot may be a combo card with its own bus. 4 IRQ pins per slot. */ 39/* Each PCI slot may be a combo card with its own bus. 4 IRQ pins per slot. */
29#define MAX_IRQ_SOURCES (MAX_MP_BUSSES * 4) 40#define MAX_IRQ_SOURCES (MAX_MP_BUSSES * 4)
30 41
31#endif 42#endif /* CONFIG_X86_64 */
32 43
33extern void early_find_smp_config(void); 44extern void early_find_smp_config(void);
34extern void early_get_smp_config(void); 45extern void early_get_smp_config(void);
@@ -45,11 +56,13 @@ extern int smp_found_config;
45extern int mpc_default_type; 56extern int mpc_default_type;
46extern unsigned long mp_lapic_addr; 57extern unsigned long mp_lapic_addr;
47 58
48extern void find_smp_config(void);
49extern void get_smp_config(void); 59extern void get_smp_config(void);
60
50#ifdef CONFIG_X86_MPPARSE 61#ifdef CONFIG_X86_MPPARSE
62extern void find_smp_config(void);
51extern void early_reserve_e820_mpc_new(void); 63extern void early_reserve_e820_mpc_new(void);
52#else 64#else
65static inline void find_smp_config(void) { }
53static inline void early_reserve_e820_mpc_new(void) { } 66static inline void early_reserve_e820_mpc_new(void) { }
54#endif 67#endif
55 68
@@ -64,6 +77,8 @@ extern int acpi_probe_gsi(void);
64#ifdef CONFIG_X86_IO_APIC 77#ifdef CONFIG_X86_IO_APIC
65extern int mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin, 78extern int mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin,
66 u32 gsi, int triggering, int polarity); 79 u32 gsi, int triggering, int polarity);
80extern int mp_find_ioapic(int gsi);
81extern int mp_find_ioapic_pin(int ioapic, int gsi);
67#else 82#else
68static inline int 83static inline int
69mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin, 84mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin,
@@ -148,4 +163,10 @@ static inline void physid_set_mask_of_physid(int physid, physid_mask_t *map)
148 163
149extern physid_mask_t phys_cpu_present_map; 164extern physid_mask_t phys_cpu_present_map;
150 165
166extern int generic_mps_oem_check(struct mpc_table *, char *, char *);
167
168extern int default_acpi_madt_oem_check(char *, char *);
169
170extern void numaq_mps_oem_check(struct mpc_table *, char *, char *);
171
151#endif /* _ASM_X86_MPSPEC_H */ 172#endif /* _ASM_X86_MPSPEC_H */
diff --git a/arch/x86/include/asm/mpspec_def.h b/arch/x86/include/asm/mpspec_def.h
index 59568bc4767f..4a7f96d7c188 100644
--- a/arch/x86/include/asm/mpspec_def.h
+++ b/arch/x86/include/asm/mpspec_def.h
@@ -24,17 +24,18 @@
24# endif 24# endif
25#endif 25#endif
26 26
27struct intel_mp_floating { 27/* Intel MP Floating Pointer Structure */
28 char mpf_signature[4]; /* "_MP_" */ 28struct mpf_intel {
29 unsigned int mpf_physptr; /* Configuration table address */ 29 char signature[4]; /* "_MP_" */
30 unsigned char mpf_length; /* Our length (paragraphs) */ 30 unsigned int physptr; /* Configuration table address */
31 unsigned char mpf_specification;/* Specification version */ 31 unsigned char length; /* Our length (paragraphs) */
32 unsigned char mpf_checksum; /* Checksum (makes sum 0) */ 32 unsigned char specification; /* Specification version */
33 unsigned char mpf_feature1; /* Standard or configuration ? */ 33 unsigned char checksum; /* Checksum (makes sum 0) */
34 unsigned char mpf_feature2; /* Bit7 set for IMCR|PIC */ 34 unsigned char feature1; /* Standard or configuration ? */
35 unsigned char mpf_feature3; /* Unused (0) */ 35 unsigned char feature2; /* Bit7 set for IMCR|PIC */
36 unsigned char mpf_feature4; /* Unused (0) */ 36 unsigned char feature3; /* Unused (0) */
37 unsigned char mpf_feature5; /* Unused (0) */ 37 unsigned char feature4; /* Unused (0) */
38 unsigned char feature5; /* Unused (0) */
38}; 39};
39 40
40#define MPC_SIGNATURE "PCMP" 41#define MPC_SIGNATURE "PCMP"
diff --git a/arch/x86/include/asm/numaq.h b/arch/x86/include/asm/numaq.h
index 1e8bd30b4c16..9f0a5f5d29ec 100644
--- a/arch/x86/include/asm/numaq.h
+++ b/arch/x86/include/asm/numaq.h
@@ -31,6 +31,8 @@
31extern int found_numaq; 31extern int found_numaq;
32extern int get_memcfg_numaq(void); 32extern int get_memcfg_numaq(void);
33 33
34extern void *xquad_portio;
35
34/* 36/*
35 * SYS_CFG_DATA_PRIV_ADDR, struct eachquadmem, and struct sys_cfg_data are the 37 * SYS_CFG_DATA_PRIV_ADDR, struct eachquadmem, and struct sys_cfg_data are the
36 */ 38 */
diff --git a/arch/x86/include/asm/numaq/apic.h b/arch/x86/include/asm/numaq/apic.h
deleted file mode 100644
index bf37bc49bd8e..000000000000
--- a/arch/x86/include/asm/numaq/apic.h
+++ /dev/null
@@ -1,142 +0,0 @@
1#ifndef __ASM_NUMAQ_APIC_H
2#define __ASM_NUMAQ_APIC_H
3
4#include <asm/io.h>
5#include <linux/mmzone.h>
6#include <linux/nodemask.h>
7
8#define APIC_DFR_VALUE (APIC_DFR_CLUSTER)
9
10static inline const cpumask_t *target_cpus(void)
11{
12 return &CPU_MASK_ALL;
13}
14
15#define NO_BALANCE_IRQ (1)
16#define esr_disable (1)
17
18#define INT_DELIVERY_MODE dest_LowestPrio
19#define INT_DEST_MODE 0 /* physical delivery on LOCAL quad */
20
21static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
22{
23 return physid_isset(apicid, bitmap);
24}
25static inline unsigned long check_apicid_present(int bit)
26{
27 return physid_isset(bit, phys_cpu_present_map);
28}
29#define apicid_cluster(apicid) (apicid & 0xF0)
30
31static inline int apic_id_registered(void)
32{
33 return 1;
34}
35
36static inline void init_apic_ldr(void)
37{
38 /* Already done in NUMA-Q firmware */
39}
40
41static inline void setup_apic_routing(void)
42{
43 printk("Enabling APIC mode: %s. Using %d I/O APICs\n",
44 "NUMA-Q", nr_ioapics);
45}
46
47/*
48 * Skip adding the timer int on secondary nodes, which causes
49 * a small but painful rift in the time-space continuum.
50 */
51static inline int multi_timer_check(int apic, int irq)
52{
53 return apic != 0 && irq == 0;
54}
55
56static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
57{
58 /* We don't have a good way to do this yet - hack */
59 return physids_promote(0xFUL);
60}
61
62/* Mapping from cpu number to logical apicid */
63extern u8 cpu_2_logical_apicid[];
64static inline int cpu_to_logical_apicid(int cpu)
65{
66 if (cpu >= nr_cpu_ids)
67 return BAD_APICID;
68 return (int)cpu_2_logical_apicid[cpu];
69}
70
71/*
72 * Supporting over 60 cpus on NUMA-Q requires a locality-dependent
73 * cpu to APIC ID relation to properly interact with the intelligent
74 * mode of the cluster controller.
75 */
76static inline int cpu_present_to_apicid(int mps_cpu)
77{
78 if (mps_cpu < 60)
79 return ((mps_cpu >> 2) << 4) | (1 << (mps_cpu & 0x3));
80 else
81 return BAD_APICID;
82}
83
84static inline int apicid_to_node(int logical_apicid)
85{
86 return logical_apicid >> 4;
87}
88
89static inline physid_mask_t apicid_to_cpu_present(int logical_apicid)
90{
91 int node = apicid_to_node(logical_apicid);
92 int cpu = __ffs(logical_apicid & 0xf);
93
94 return physid_mask_of_physid(cpu + 4*node);
95}
96
97extern void *xquad_portio;
98
99static inline void setup_portio_remap(void)
100{
101 int num_quads = num_online_nodes();
102
103 if (num_quads <= 1)
104 return;
105
106 printk("Remapping cross-quad port I/O for %d quads\n", num_quads);
107 xquad_portio = ioremap(XQUAD_PORTIO_BASE, num_quads*XQUAD_PORTIO_QUAD);
108 printk("xquad_portio vaddr 0x%08lx, len %08lx\n",
109 (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD);
110}
111
112static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
113{
114 return (1);
115}
116
117static inline void enable_apic_mode(void)
118{
119}
120
121/*
122 * We use physical apicids here, not logical, so just return the default
123 * physical broadcast to stop people from breaking us
124 */
125static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
126{
127 return (int) 0xF;
128}
129
130static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
131 const struct cpumask *andmask)
132{
133 return (int) 0xF;
134}
135
136/* No NUMA-Q box has a HT CPU, but it can't hurt to use the default code. */
137static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
138{
139 return cpuid_apic >> index_msb;
140}
141
142#endif /* __ASM_NUMAQ_APIC_H */
diff --git a/arch/x86/include/asm/numaq/apicdef.h b/arch/x86/include/asm/numaq/apicdef.h
deleted file mode 100644
index e012a46cc22a..000000000000
--- a/arch/x86/include/asm/numaq/apicdef.h
+++ /dev/null
@@ -1,14 +0,0 @@
1#ifndef __ASM_NUMAQ_APICDEF_H
2#define __ASM_NUMAQ_APICDEF_H
3
4
5#define APIC_ID_MASK (0xF<<24)
6
7static inline unsigned get_apic_id(unsigned long x)
8{
9 return (((x)>>24)&0x0F);
10}
11
12#define GET_APIC_ID(x) get_apic_id(x)
13
14#endif
diff --git a/arch/x86/include/asm/numaq/ipi.h b/arch/x86/include/asm/numaq/ipi.h
deleted file mode 100644
index a8374c652778..000000000000
--- a/arch/x86/include/asm/numaq/ipi.h
+++ /dev/null
@@ -1,22 +0,0 @@
1#ifndef __ASM_NUMAQ_IPI_H
2#define __ASM_NUMAQ_IPI_H
3
4void send_IPI_mask_sequence(const struct cpumask *mask, int vector);
5void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
6
7static inline void send_IPI_mask(const struct cpumask *mask, int vector)
8{
9 send_IPI_mask_sequence(mask, vector);
10}
11
12static inline void send_IPI_allbutself(int vector)
13{
14 send_IPI_mask_allbutself(cpu_online_mask, vector);
15}
16
17static inline void send_IPI_all(int vector)
18{
19 send_IPI_mask(cpu_online_mask, vector);
20}
21
22#endif /* __ASM_NUMAQ_IPI_H */
diff --git a/arch/x86/include/asm/numaq/mpparse.h b/arch/x86/include/asm/numaq/mpparse.h
deleted file mode 100644
index a2eeefcd1cc7..000000000000
--- a/arch/x86/include/asm/numaq/mpparse.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_NUMAQ_MPPARSE_H
2#define __ASM_NUMAQ_MPPARSE_H
3
4extern void numaq_mps_oem_check(struct mpc_table *, char *, char *);
5
6#endif /* __ASM_NUMAQ_MPPARSE_H */
diff --git a/arch/x86/include/asm/numaq/wakecpu.h b/arch/x86/include/asm/numaq/wakecpu.h
deleted file mode 100644
index 6f499df8eddb..000000000000
--- a/arch/x86/include/asm/numaq/wakecpu.h
+++ /dev/null
@@ -1,45 +0,0 @@
1#ifndef __ASM_NUMAQ_WAKECPU_H
2#define __ASM_NUMAQ_WAKECPU_H
3
4/* This file copes with machines that wakeup secondary CPUs by NMIs */
5
6#define TRAMPOLINE_PHYS_LOW (0x8)
7#define TRAMPOLINE_PHYS_HIGH (0xa)
8
9/* We don't do anything here because we use NMI's to boot instead */
10static inline void wait_for_init_deassert(atomic_t *deassert)
11{
12}
13
14/*
15 * Because we use NMIs rather than the INIT-STARTUP sequence to
16 * bootstrap the CPUs, the APIC may be in a weird state. Kick it.
17 */
18static inline void smp_callin_clear_local_apic(void)
19{
20 clear_local_APIC();
21}
22
23static inline void store_NMI_vector(unsigned short *high, unsigned short *low)
24{
25 printk("Storing NMI vector\n");
26 *high =
27 *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH));
28 *low =
29 *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW));
30}
31
32static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
33{
34 printk("Restoring NMI vector\n");
35 *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) =
36 *high;
37 *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) =
38 *low;
39}
40
41static inline void inquire_remote_apic(int apicid)
42{
43}
44
45#endif /* __ASM_NUMAQ_WAKECPU_H */
diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
index e9873a2e8695..6b9810859daf 100644
--- a/arch/x86/include/asm/page.h
+++ b/arch/x86/include/asm/page.h
@@ -147,7 +147,7 @@ static inline pteval_t native_pte_val(pte_t pte)
147 return pte.pte; 147 return pte.pte;
148} 148}
149 149
150static inline pteval_t native_pte_flags(pte_t pte) 150static inline pteval_t pte_flags(pte_t pte)
151{ 151{
152 return native_pte_val(pte) & PTE_FLAGS_MASK; 152 return native_pte_val(pte) & PTE_FLAGS_MASK;
153} 153}
@@ -173,7 +173,6 @@ static inline pteval_t native_pte_flags(pte_t pte)
173#endif 173#endif
174 174
175#define pte_val(x) native_pte_val(x) 175#define pte_val(x) native_pte_val(x)
176#define pte_flags(x) native_pte_flags(x)
177#define __pte(x) native_make_pte(x) 176#define __pte(x) native_make_pte(x)
178 177
179#endif /* CONFIG_PARAVIRT */ 178#endif /* CONFIG_PARAVIRT */
diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
index 5ebca29f44f0..e27fdbe5f9e4 100644
--- a/arch/x86/include/asm/page_64.h
+++ b/arch/x86/include/asm/page_64.h
@@ -13,8 +13,8 @@
13#define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1) 13#define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1)
14#define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER) 14#define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER)
15 15
16#define IRQSTACK_ORDER 2 16#define IRQ_STACK_ORDER 2
17#define IRQSTACKSIZE (PAGE_SIZE << IRQSTACK_ORDER) 17#define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER)
18 18
19#define STACKFAULT_STACK 1 19#define STACKFAULT_STACK 1
20#define DOUBLEFAULT_STACK 2 20#define DOUBLEFAULT_STACK 2
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index c09a14127584..1c244b64573f 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -12,21 +12,38 @@
12#define CLBR_EAX (1 << 0) 12#define CLBR_EAX (1 << 0)
13#define CLBR_ECX (1 << 1) 13#define CLBR_ECX (1 << 1)
14#define CLBR_EDX (1 << 2) 14#define CLBR_EDX (1 << 2)
15#define CLBR_EDI (1 << 3)
15 16
16#ifdef CONFIG_X86_64 17#ifdef CONFIG_X86_32
17#define CLBR_RSI (1 << 3) 18/* CLBR_ANY should match all regs platform has. For i386, that's just it */
18#define CLBR_RDI (1 << 4) 19#define CLBR_ANY ((1 << 4) - 1)
20
21#define CLBR_ARG_REGS (CLBR_EAX | CLBR_EDX | CLBR_ECX)
22#define CLBR_RET_REG (CLBR_EAX | CLBR_EDX)
23#define CLBR_SCRATCH (0)
24#else
25#define CLBR_RAX CLBR_EAX
26#define CLBR_RCX CLBR_ECX
27#define CLBR_RDX CLBR_EDX
28#define CLBR_RDI CLBR_EDI
29#define CLBR_RSI (1 << 4)
19#define CLBR_R8 (1 << 5) 30#define CLBR_R8 (1 << 5)
20#define CLBR_R9 (1 << 6) 31#define CLBR_R9 (1 << 6)
21#define CLBR_R10 (1 << 7) 32#define CLBR_R10 (1 << 7)
22#define CLBR_R11 (1 << 8) 33#define CLBR_R11 (1 << 8)
34
23#define CLBR_ANY ((1 << 9) - 1) 35#define CLBR_ANY ((1 << 9) - 1)
36
37#define CLBR_ARG_REGS (CLBR_RDI | CLBR_RSI | CLBR_RDX | \
38 CLBR_RCX | CLBR_R8 | CLBR_R9)
39#define CLBR_RET_REG (CLBR_RAX)
40#define CLBR_SCRATCH (CLBR_R10 | CLBR_R11)
41
24#include <asm/desc_defs.h> 42#include <asm/desc_defs.h>
25#else
26/* CLBR_ANY should match all regs platform has. For i386, that's just it */
27#define CLBR_ANY ((1 << 3) - 1)
28#endif /* X86_64 */ 43#endif /* X86_64 */
29 44
45#define CLBR_CALLEE_SAVE ((CLBR_ARG_REGS | CLBR_SCRATCH) & ~CLBR_RET_REG)
46
30#ifndef __ASSEMBLY__ 47#ifndef __ASSEMBLY__
31#include <linux/types.h> 48#include <linux/types.h>
32#include <linux/cpumask.h> 49#include <linux/cpumask.h>
@@ -40,6 +57,14 @@ struct tss_struct;
40struct mm_struct; 57struct mm_struct;
41struct desc_struct; 58struct desc_struct;
42 59
60/*
61 * Wrapper type for pointers to code which uses the non-standard
62 * calling convention. See PV_CALL_SAVE_REGS_THUNK below.
63 */
64struct paravirt_callee_save {
65 void *func;
66};
67
43/* general info */ 68/* general info */
44struct pv_info { 69struct pv_info {
45 unsigned int kernel_rpl; 70 unsigned int kernel_rpl;
@@ -189,11 +214,15 @@ struct pv_irq_ops {
189 * expected to use X86_EFLAGS_IF; all other bits 214 * expected to use X86_EFLAGS_IF; all other bits
190 * returned from save_fl are undefined, and may be ignored by 215 * returned from save_fl are undefined, and may be ignored by
191 * restore_fl. 216 * restore_fl.
217 *
218 * NOTE: These functions callers expect the callee to preserve
219 * more registers than the standard C calling convention.
192 */ 220 */
193 unsigned long (*save_fl)(void); 221 struct paravirt_callee_save save_fl;
194 void (*restore_fl)(unsigned long); 222 struct paravirt_callee_save restore_fl;
195 void (*irq_disable)(void); 223 struct paravirt_callee_save irq_disable;
196 void (*irq_enable)(void); 224 struct paravirt_callee_save irq_enable;
225
197 void (*safe_halt)(void); 226 void (*safe_halt)(void);
198 void (*halt)(void); 227 void (*halt)(void);
199 228
@@ -244,7 +273,8 @@ struct pv_mmu_ops {
244 void (*flush_tlb_user)(void); 273 void (*flush_tlb_user)(void);
245 void (*flush_tlb_kernel)(void); 274 void (*flush_tlb_kernel)(void);
246 void (*flush_tlb_single)(unsigned long addr); 275 void (*flush_tlb_single)(unsigned long addr);
247 void (*flush_tlb_others)(const cpumask_t *cpus, struct mm_struct *mm, 276 void (*flush_tlb_others)(const struct cpumask *cpus,
277 struct mm_struct *mm,
248 unsigned long va); 278 unsigned long va);
249 279
250 /* Hooks for allocating and freeing a pagetable top-level */ 280 /* Hooks for allocating and freeing a pagetable top-level */
@@ -278,12 +308,11 @@ struct pv_mmu_ops {
278 void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr, 308 void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr,
279 pte_t *ptep, pte_t pte); 309 pte_t *ptep, pte_t pte);
280 310
281 pteval_t (*pte_val)(pte_t); 311 struct paravirt_callee_save pte_val;
282 pteval_t (*pte_flags)(pte_t); 312 struct paravirt_callee_save make_pte;
283 pte_t (*make_pte)(pteval_t pte);
284 313
285 pgdval_t (*pgd_val)(pgd_t); 314 struct paravirt_callee_save pgd_val;
286 pgd_t (*make_pgd)(pgdval_t pgd); 315 struct paravirt_callee_save make_pgd;
287 316
288#if PAGETABLE_LEVELS >= 3 317#if PAGETABLE_LEVELS >= 3
289#ifdef CONFIG_X86_PAE 318#ifdef CONFIG_X86_PAE
@@ -298,12 +327,12 @@ struct pv_mmu_ops {
298 327
299 void (*set_pud)(pud_t *pudp, pud_t pudval); 328 void (*set_pud)(pud_t *pudp, pud_t pudval);
300 329
301 pmdval_t (*pmd_val)(pmd_t); 330 struct paravirt_callee_save pmd_val;
302 pmd_t (*make_pmd)(pmdval_t pmd); 331 struct paravirt_callee_save make_pmd;
303 332
304#if PAGETABLE_LEVELS == 4 333#if PAGETABLE_LEVELS == 4
305 pudval_t (*pud_val)(pud_t); 334 struct paravirt_callee_save pud_val;
306 pud_t (*make_pud)(pudval_t pud); 335 struct paravirt_callee_save make_pud;
307 336
308 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval); 337 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
309#endif /* PAGETABLE_LEVELS == 4 */ 338#endif /* PAGETABLE_LEVELS == 4 */
@@ -388,6 +417,8 @@ extern struct pv_lock_ops pv_lock_ops;
388 asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":") 417 asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":")
389 418
390unsigned paravirt_patch_nop(void); 419unsigned paravirt_patch_nop(void);
420unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len);
421unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len);
391unsigned paravirt_patch_ignore(unsigned len); 422unsigned paravirt_patch_ignore(unsigned len);
392unsigned paravirt_patch_call(void *insnbuf, 423unsigned paravirt_patch_call(void *insnbuf,
393 const void *target, u16 tgt_clobbers, 424 const void *target, u16 tgt_clobbers,
@@ -479,25 +510,45 @@ int paravirt_disable_iospace(void);
479 * makes sure the incoming and outgoing types are always correct. 510 * makes sure the incoming and outgoing types are always correct.
480 */ 511 */
481#ifdef CONFIG_X86_32 512#ifdef CONFIG_X86_32
482#define PVOP_VCALL_ARGS unsigned long __eax, __edx, __ecx 513#define PVOP_VCALL_ARGS \
514 unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx
483#define PVOP_CALL_ARGS PVOP_VCALL_ARGS 515#define PVOP_CALL_ARGS PVOP_VCALL_ARGS
516
517#define PVOP_CALL_ARG1(x) "a" ((unsigned long)(x))
518#define PVOP_CALL_ARG2(x) "d" ((unsigned long)(x))
519#define PVOP_CALL_ARG3(x) "c" ((unsigned long)(x))
520
484#define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \ 521#define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \
485 "=c" (__ecx) 522 "=c" (__ecx)
486#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS 523#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS
524
525#define PVOP_VCALLEE_CLOBBERS "=a" (__eax), "=d" (__edx)
526#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
527
487#define EXTRA_CLOBBERS 528#define EXTRA_CLOBBERS
488#define VEXTRA_CLOBBERS 529#define VEXTRA_CLOBBERS
489#else 530#else /* CONFIG_X86_64 */
490#define PVOP_VCALL_ARGS unsigned long __edi, __esi, __edx, __ecx 531#define PVOP_VCALL_ARGS \
532 unsigned long __edi = __edi, __esi = __esi, \
533 __edx = __edx, __ecx = __ecx
491#define PVOP_CALL_ARGS PVOP_VCALL_ARGS, __eax 534#define PVOP_CALL_ARGS PVOP_VCALL_ARGS, __eax
535
536#define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x))
537#define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x))
538#define PVOP_CALL_ARG3(x) "d" ((unsigned long)(x))
539#define PVOP_CALL_ARG4(x) "c" ((unsigned long)(x))
540
492#define PVOP_VCALL_CLOBBERS "=D" (__edi), \ 541#define PVOP_VCALL_CLOBBERS "=D" (__edi), \
493 "=S" (__esi), "=d" (__edx), \ 542 "=S" (__esi), "=d" (__edx), \
494 "=c" (__ecx) 543 "=c" (__ecx)
495
496#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax) 544#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax)
497 545
546#define PVOP_VCALLEE_CLOBBERS "=a" (__eax)
547#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
548
498#define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11" 549#define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11"
499#define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11" 550#define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11"
500#endif 551#endif /* CONFIG_X86_32 */
501 552
502#ifdef CONFIG_PARAVIRT_DEBUG 553#ifdef CONFIG_PARAVIRT_DEBUG
503#define PVOP_TEST_NULL(op) BUG_ON(op == NULL) 554#define PVOP_TEST_NULL(op) BUG_ON(op == NULL)
@@ -505,10 +556,11 @@ int paravirt_disable_iospace(void);
505#define PVOP_TEST_NULL(op) ((void)op) 556#define PVOP_TEST_NULL(op) ((void)op)
506#endif 557#endif
507 558
508#define __PVOP_CALL(rettype, op, pre, post, ...) \ 559#define ____PVOP_CALL(rettype, op, clbr, call_clbr, extra_clbr, \
560 pre, post, ...) \
509 ({ \ 561 ({ \
510 rettype __ret; \ 562 rettype __ret; \
511 PVOP_CALL_ARGS; \ 563 PVOP_CALL_ARGS; \
512 PVOP_TEST_NULL(op); \ 564 PVOP_TEST_NULL(op); \
513 /* This is 32-bit specific, but is okay in 64-bit */ \ 565 /* This is 32-bit specific, but is okay in 64-bit */ \
514 /* since this condition will never hold */ \ 566 /* since this condition will never hold */ \
@@ -516,70 +568,113 @@ int paravirt_disable_iospace(void);
516 asm volatile(pre \ 568 asm volatile(pre \
517 paravirt_alt(PARAVIRT_CALL) \ 569 paravirt_alt(PARAVIRT_CALL) \
518 post \ 570 post \
519 : PVOP_CALL_CLOBBERS \ 571 : call_clbr \
520 : paravirt_type(op), \ 572 : paravirt_type(op), \
521 paravirt_clobber(CLBR_ANY), \ 573 paravirt_clobber(clbr), \
522 ##__VA_ARGS__ \ 574 ##__VA_ARGS__ \
523 : "memory", "cc" EXTRA_CLOBBERS); \ 575 : "memory", "cc" extra_clbr); \
524 __ret = (rettype)((((u64)__edx) << 32) | __eax); \ 576 __ret = (rettype)((((u64)__edx) << 32) | __eax); \
525 } else { \ 577 } else { \
526 asm volatile(pre \ 578 asm volatile(pre \
527 paravirt_alt(PARAVIRT_CALL) \ 579 paravirt_alt(PARAVIRT_CALL) \
528 post \ 580 post \
529 : PVOP_CALL_CLOBBERS \ 581 : call_clbr \
530 : paravirt_type(op), \ 582 : paravirt_type(op), \
531 paravirt_clobber(CLBR_ANY), \ 583 paravirt_clobber(clbr), \
532 ##__VA_ARGS__ \ 584 ##__VA_ARGS__ \
533 : "memory", "cc" EXTRA_CLOBBERS); \ 585 : "memory", "cc" extra_clbr); \
534 __ret = (rettype)__eax; \ 586 __ret = (rettype)__eax; \
535 } \ 587 } \
536 __ret; \ 588 __ret; \
537 }) 589 })
538#define __PVOP_VCALL(op, pre, post, ...) \ 590
591#define __PVOP_CALL(rettype, op, pre, post, ...) \
592 ____PVOP_CALL(rettype, op, CLBR_ANY, PVOP_CALL_CLOBBERS, \
593 EXTRA_CLOBBERS, pre, post, ##__VA_ARGS__)
594
595#define __PVOP_CALLEESAVE(rettype, op, pre, post, ...) \
596 ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
597 PVOP_CALLEE_CLOBBERS, , \
598 pre, post, ##__VA_ARGS__)
599
600
601#define ____PVOP_VCALL(op, clbr, call_clbr, extra_clbr, pre, post, ...) \
539 ({ \ 602 ({ \
540 PVOP_VCALL_ARGS; \ 603 PVOP_VCALL_ARGS; \
541 PVOP_TEST_NULL(op); \ 604 PVOP_TEST_NULL(op); \
542 asm volatile(pre \ 605 asm volatile(pre \
543 paravirt_alt(PARAVIRT_CALL) \ 606 paravirt_alt(PARAVIRT_CALL) \
544 post \ 607 post \
545 : PVOP_VCALL_CLOBBERS \ 608 : call_clbr \
546 : paravirt_type(op), \ 609 : paravirt_type(op), \
547 paravirt_clobber(CLBR_ANY), \ 610 paravirt_clobber(clbr), \
548 ##__VA_ARGS__ \ 611 ##__VA_ARGS__ \
549 : "memory", "cc" VEXTRA_CLOBBERS); \ 612 : "memory", "cc" extra_clbr); \
550 }) 613 })
551 614
615#define __PVOP_VCALL(op, pre, post, ...) \
616 ____PVOP_VCALL(op, CLBR_ANY, PVOP_VCALL_CLOBBERS, \
617 VEXTRA_CLOBBERS, \
618 pre, post, ##__VA_ARGS__)
619
620#define __PVOP_VCALLEESAVE(rettype, op, pre, post, ...) \
621 ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
622 PVOP_VCALLEE_CLOBBERS, , \
623 pre, post, ##__VA_ARGS__)
624
625
626
552#define PVOP_CALL0(rettype, op) \ 627#define PVOP_CALL0(rettype, op) \
553 __PVOP_CALL(rettype, op, "", "") 628 __PVOP_CALL(rettype, op, "", "")
554#define PVOP_VCALL0(op) \ 629#define PVOP_VCALL0(op) \
555 __PVOP_VCALL(op, "", "") 630 __PVOP_VCALL(op, "", "")
556 631
632#define PVOP_CALLEE0(rettype, op) \
633 __PVOP_CALLEESAVE(rettype, op, "", "")
634#define PVOP_VCALLEE0(op) \
635 __PVOP_VCALLEESAVE(op, "", "")
636
637
557#define PVOP_CALL1(rettype, op, arg1) \ 638#define PVOP_CALL1(rettype, op, arg1) \
558 __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1))) 639 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
559#define PVOP_VCALL1(op, arg1) \ 640#define PVOP_VCALL1(op, arg1) \
560 __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1))) 641 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1))
642
643#define PVOP_CALLEE1(rettype, op, arg1) \
644 __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
645#define PVOP_VCALLEE1(op, arg1) \
646 __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1))
647
561 648
562#define PVOP_CALL2(rettype, op, arg1, arg2) \ 649#define PVOP_CALL2(rettype, op, arg1, arg2) \
563 __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \ 650 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
564 "1" ((unsigned long)(arg2))) 651 PVOP_CALL_ARG2(arg2))
565#define PVOP_VCALL2(op, arg1, arg2) \ 652#define PVOP_VCALL2(op, arg1, arg2) \
566 __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \ 653 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
567 "1" ((unsigned long)(arg2))) 654 PVOP_CALL_ARG2(arg2))
655
656#define PVOP_CALLEE2(rettype, op, arg1, arg2) \
657 __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
658 PVOP_CALL_ARG2(arg2))
659#define PVOP_VCALLEE2(op, arg1, arg2) \
660 __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1), \
661 PVOP_CALL_ARG2(arg2))
662
568 663
569#define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \ 664#define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \
570 __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \ 665 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
571 "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3))) 666 PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
572#define PVOP_VCALL3(op, arg1, arg2, arg3) \ 667#define PVOP_VCALL3(op, arg1, arg2, arg3) \
573 __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \ 668 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
574 "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3))) 669 PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
575 670
576/* This is the only difference in x86_64. We can make it much simpler */ 671/* This is the only difference in x86_64. We can make it much simpler */
577#ifdef CONFIG_X86_32 672#ifdef CONFIG_X86_32
578#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \ 673#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
579 __PVOP_CALL(rettype, op, \ 674 __PVOP_CALL(rettype, op, \
580 "push %[_arg4];", "lea 4(%%esp),%%esp;", \ 675 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
581 "0" ((u32)(arg1)), "1" ((u32)(arg2)), \ 676 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
582 "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4))) 677 PVOP_CALL_ARG3(arg3), [_arg4] "mr" ((u32)(arg4)))
583#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \ 678#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
584 __PVOP_VCALL(op, \ 679 __PVOP_VCALL(op, \
585 "push %[_arg4];", "lea 4(%%esp),%%esp;", \ 680 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
@@ -587,13 +682,13 @@ int paravirt_disable_iospace(void);
587 "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4))) 682 "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
588#else 683#else
589#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \ 684#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
590 __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \ 685 __PVOP_CALL(rettype, op, "", "", \
591 "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)), \ 686 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
592 "3"((unsigned long)(arg4))) 687 PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
593#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \ 688#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
594 __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \ 689 __PVOP_VCALL(op, "", "", \
595 "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)), \ 690 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
596 "3"((unsigned long)(arg4))) 691 PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
597#endif 692#endif
598 693
599static inline int paravirt_enabled(void) 694static inline int paravirt_enabled(void)
@@ -984,10 +1079,11 @@ static inline void __flush_tlb_single(unsigned long addr)
984 PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr); 1079 PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
985} 1080}
986 1081
987static inline void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, 1082static inline void flush_tlb_others(const struct cpumask *cpumask,
1083 struct mm_struct *mm,
988 unsigned long va) 1084 unsigned long va)
989{ 1085{
990 PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, &cpumask, mm, va); 1086 PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, cpumask, mm, va);
991} 1087}
992 1088
993static inline int paravirt_pgd_alloc(struct mm_struct *mm) 1089static inline int paravirt_pgd_alloc(struct mm_struct *mm)
@@ -1059,13 +1155,13 @@ static inline pte_t __pte(pteval_t val)
1059 pteval_t ret; 1155 pteval_t ret;
1060 1156
1061 if (sizeof(pteval_t) > sizeof(long)) 1157 if (sizeof(pteval_t) > sizeof(long))
1062 ret = PVOP_CALL2(pteval_t, 1158 ret = PVOP_CALLEE2(pteval_t,
1063 pv_mmu_ops.make_pte, 1159 pv_mmu_ops.make_pte,
1064 val, (u64)val >> 32); 1160 val, (u64)val >> 32);
1065 else 1161 else
1066 ret = PVOP_CALL1(pteval_t, 1162 ret = PVOP_CALLEE1(pteval_t,
1067 pv_mmu_ops.make_pte, 1163 pv_mmu_ops.make_pte,
1068 val); 1164 val);
1069 1165
1070 return (pte_t) { .pte = ret }; 1166 return (pte_t) { .pte = ret };
1071} 1167}
@@ -1075,29 +1171,12 @@ static inline pteval_t pte_val(pte_t pte)
1075 pteval_t ret; 1171 pteval_t ret;
1076 1172
1077 if (sizeof(pteval_t) > sizeof(long)) 1173 if (sizeof(pteval_t) > sizeof(long))
1078 ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_val, 1174 ret = PVOP_CALLEE2(pteval_t, pv_mmu_ops.pte_val,
1079 pte.pte, (u64)pte.pte >> 32); 1175 pte.pte, (u64)pte.pte >> 32);
1080 else
1081 ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_val,
1082 pte.pte);
1083
1084 return ret;
1085}
1086
1087static inline pteval_t pte_flags(pte_t pte)
1088{
1089 pteval_t ret;
1090
1091 if (sizeof(pteval_t) > sizeof(long))
1092 ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_flags,
1093 pte.pte, (u64)pte.pte >> 32);
1094 else 1176 else
1095 ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_flags, 1177 ret = PVOP_CALLEE1(pteval_t, pv_mmu_ops.pte_val,
1096 pte.pte); 1178 pte.pte);
1097 1179
1098#ifdef CONFIG_PARAVIRT_DEBUG
1099 BUG_ON(ret & PTE_PFN_MASK);
1100#endif
1101 return ret; 1180 return ret;
1102} 1181}
1103 1182
@@ -1106,11 +1185,11 @@ static inline pgd_t __pgd(pgdval_t val)
1106 pgdval_t ret; 1185 pgdval_t ret;
1107 1186
1108 if (sizeof(pgdval_t) > sizeof(long)) 1187 if (sizeof(pgdval_t) > sizeof(long))
1109 ret = PVOP_CALL2(pgdval_t, pv_mmu_ops.make_pgd, 1188 ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.make_pgd,
1110 val, (u64)val >> 32); 1189 val, (u64)val >> 32);
1111 else 1190 else
1112 ret = PVOP_CALL1(pgdval_t, pv_mmu_ops.make_pgd, 1191 ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.make_pgd,
1113 val); 1192 val);
1114 1193
1115 return (pgd_t) { ret }; 1194 return (pgd_t) { ret };
1116} 1195}
@@ -1120,11 +1199,11 @@ static inline pgdval_t pgd_val(pgd_t pgd)
1120 pgdval_t ret; 1199 pgdval_t ret;
1121 1200
1122 if (sizeof(pgdval_t) > sizeof(long)) 1201 if (sizeof(pgdval_t) > sizeof(long))
1123 ret = PVOP_CALL2(pgdval_t, pv_mmu_ops.pgd_val, 1202 ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.pgd_val,
1124 pgd.pgd, (u64)pgd.pgd >> 32); 1203 pgd.pgd, (u64)pgd.pgd >> 32);
1125 else 1204 else
1126 ret = PVOP_CALL1(pgdval_t, pv_mmu_ops.pgd_val, 1205 ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.pgd_val,
1127 pgd.pgd); 1206 pgd.pgd);
1128 1207
1129 return ret; 1208 return ret;
1130} 1209}
@@ -1188,11 +1267,11 @@ static inline pmd_t __pmd(pmdval_t val)
1188 pmdval_t ret; 1267 pmdval_t ret;
1189 1268
1190 if (sizeof(pmdval_t) > sizeof(long)) 1269 if (sizeof(pmdval_t) > sizeof(long))
1191 ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.make_pmd, 1270 ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.make_pmd,
1192 val, (u64)val >> 32); 1271 val, (u64)val >> 32);
1193 else 1272 else
1194 ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.make_pmd, 1273 ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.make_pmd,
1195 val); 1274 val);
1196 1275
1197 return (pmd_t) { ret }; 1276 return (pmd_t) { ret };
1198} 1277}
@@ -1202,11 +1281,11 @@ static inline pmdval_t pmd_val(pmd_t pmd)
1202 pmdval_t ret; 1281 pmdval_t ret;
1203 1282
1204 if (sizeof(pmdval_t) > sizeof(long)) 1283 if (sizeof(pmdval_t) > sizeof(long))
1205 ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.pmd_val, 1284 ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.pmd_val,
1206 pmd.pmd, (u64)pmd.pmd >> 32); 1285 pmd.pmd, (u64)pmd.pmd >> 32);
1207 else 1286 else
1208 ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.pmd_val, 1287 ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.pmd_val,
1209 pmd.pmd); 1288 pmd.pmd);
1210 1289
1211 return ret; 1290 return ret;
1212} 1291}
@@ -1228,11 +1307,11 @@ static inline pud_t __pud(pudval_t val)
1228 pudval_t ret; 1307 pudval_t ret;
1229 1308
1230 if (sizeof(pudval_t) > sizeof(long)) 1309 if (sizeof(pudval_t) > sizeof(long))
1231 ret = PVOP_CALL2(pudval_t, pv_mmu_ops.make_pud, 1310 ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.make_pud,
1232 val, (u64)val >> 32); 1311 val, (u64)val >> 32);
1233 else 1312 else
1234 ret = PVOP_CALL1(pudval_t, pv_mmu_ops.make_pud, 1313 ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.make_pud,
1235 val); 1314 val);
1236 1315
1237 return (pud_t) { ret }; 1316 return (pud_t) { ret };
1238} 1317}
@@ -1242,11 +1321,11 @@ static inline pudval_t pud_val(pud_t pud)
1242 pudval_t ret; 1321 pudval_t ret;
1243 1322
1244 if (sizeof(pudval_t) > sizeof(long)) 1323 if (sizeof(pudval_t) > sizeof(long))
1245 ret = PVOP_CALL2(pudval_t, pv_mmu_ops.pud_val, 1324 ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.pud_val,
1246 pud.pud, (u64)pud.pud >> 32); 1325 pud.pud, (u64)pud.pud >> 32);
1247 else 1326 else
1248 ret = PVOP_CALL1(pudval_t, pv_mmu_ops.pud_val, 1327 ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.pud_val,
1249 pud.pud); 1328 pud.pud);
1250 1329
1251 return ret; 1330 return ret;
1252} 1331}
@@ -1387,9 +1466,10 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
1387} 1466}
1388 1467
1389void _paravirt_nop(void); 1468void _paravirt_nop(void);
1390#define paravirt_nop ((void *)_paravirt_nop) 1469u32 _paravirt_ident_32(u32);
1470u64 _paravirt_ident_64(u64);
1391 1471
1392void paravirt_use_bytelocks(void); 1472#define paravirt_nop ((void *)_paravirt_nop)
1393 1473
1394#ifdef CONFIG_SMP 1474#ifdef CONFIG_SMP
1395 1475
@@ -1439,12 +1519,37 @@ extern struct paravirt_patch_site __parainstructions[],
1439 __parainstructions_end[]; 1519 __parainstructions_end[];
1440 1520
1441#ifdef CONFIG_X86_32 1521#ifdef CONFIG_X86_32
1442#define PV_SAVE_REGS "pushl %%ecx; pushl %%edx;" 1522#define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
1443#define PV_RESTORE_REGS "popl %%edx; popl %%ecx" 1523#define PV_RESTORE_REGS "popl %edx; popl %ecx;"
1524
1525/* save and restore all caller-save registers, except return value */
1526#define PV_SAVE_ALL_CALLER_REGS "pushl %ecx;"
1527#define PV_RESTORE_ALL_CALLER_REGS "popl %ecx;"
1528
1444#define PV_FLAGS_ARG "0" 1529#define PV_FLAGS_ARG "0"
1445#define PV_EXTRA_CLOBBERS 1530#define PV_EXTRA_CLOBBERS
1446#define PV_VEXTRA_CLOBBERS 1531#define PV_VEXTRA_CLOBBERS
1447#else 1532#else
1533/* save and restore all caller-save registers, except return value */
1534#define PV_SAVE_ALL_CALLER_REGS \
1535 "push %rcx;" \
1536 "push %rdx;" \
1537 "push %rsi;" \
1538 "push %rdi;" \
1539 "push %r8;" \
1540 "push %r9;" \
1541 "push %r10;" \
1542 "push %r11;"
1543#define PV_RESTORE_ALL_CALLER_REGS \
1544 "pop %r11;" \
1545 "pop %r10;" \
1546 "pop %r9;" \
1547 "pop %r8;" \
1548 "pop %rdi;" \
1549 "pop %rsi;" \
1550 "pop %rdx;" \
1551 "pop %rcx;"
1552
1448/* We save some registers, but all of them, that's too much. We clobber all 1553/* We save some registers, but all of them, that's too much. We clobber all
1449 * caller saved registers but the argument parameter */ 1554 * caller saved registers but the argument parameter */
1450#define PV_SAVE_REGS "pushq %%rdi;" 1555#define PV_SAVE_REGS "pushq %%rdi;"
@@ -1454,52 +1559,76 @@ extern struct paravirt_patch_site __parainstructions[],
1454#define PV_FLAGS_ARG "D" 1559#define PV_FLAGS_ARG "D"
1455#endif 1560#endif
1456 1561
1562/*
1563 * Generate a thunk around a function which saves all caller-save
1564 * registers except for the return value. This allows C functions to
1565 * be called from assembler code where fewer than normal registers are
1566 * available. It may also help code generation around calls from C
1567 * code if the common case doesn't use many registers.
1568 *
1569 * When a callee is wrapped in a thunk, the caller can assume that all
1570 * arg regs and all scratch registers are preserved across the
1571 * call. The return value in rax/eax will not be saved, even for void
1572 * functions.
1573 */
1574#define PV_CALLEE_SAVE_REGS_THUNK(func) \
1575 extern typeof(func) __raw_callee_save_##func; \
1576 static void *__##func##__ __used = func; \
1577 \
1578 asm(".pushsection .text;" \
1579 "__raw_callee_save_" #func ": " \
1580 PV_SAVE_ALL_CALLER_REGS \
1581 "call " #func ";" \
1582 PV_RESTORE_ALL_CALLER_REGS \
1583 "ret;" \
1584 ".popsection")
1585
1586/* Get a reference to a callee-save function */
1587#define PV_CALLEE_SAVE(func) \
1588 ((struct paravirt_callee_save) { __raw_callee_save_##func })
1589
1590/* Promise that "func" already uses the right calling convention */
1591#define __PV_IS_CALLEE_SAVE(func) \
1592 ((struct paravirt_callee_save) { func })
1593
1457static inline unsigned long __raw_local_save_flags(void) 1594static inline unsigned long __raw_local_save_flags(void)
1458{ 1595{
1459 unsigned long f; 1596 unsigned long f;
1460 1597
1461 asm volatile(paravirt_alt(PV_SAVE_REGS 1598 asm volatile(paravirt_alt(PARAVIRT_CALL)
1462 PARAVIRT_CALL
1463 PV_RESTORE_REGS)
1464 : "=a"(f) 1599 : "=a"(f)
1465 : paravirt_type(pv_irq_ops.save_fl), 1600 : paravirt_type(pv_irq_ops.save_fl),
1466 paravirt_clobber(CLBR_EAX) 1601 paravirt_clobber(CLBR_EAX)
1467 : "memory", "cc" PV_VEXTRA_CLOBBERS); 1602 : "memory", "cc");
1468 return f; 1603 return f;
1469} 1604}
1470 1605
1471static inline void raw_local_irq_restore(unsigned long f) 1606static inline void raw_local_irq_restore(unsigned long f)
1472{ 1607{
1473 asm volatile(paravirt_alt(PV_SAVE_REGS 1608 asm volatile(paravirt_alt(PARAVIRT_CALL)
1474 PARAVIRT_CALL
1475 PV_RESTORE_REGS)
1476 : "=a"(f) 1609 : "=a"(f)
1477 : PV_FLAGS_ARG(f), 1610 : PV_FLAGS_ARG(f),
1478 paravirt_type(pv_irq_ops.restore_fl), 1611 paravirt_type(pv_irq_ops.restore_fl),
1479 paravirt_clobber(CLBR_EAX) 1612 paravirt_clobber(CLBR_EAX)
1480 : "memory", "cc" PV_EXTRA_CLOBBERS); 1613 : "memory", "cc");
1481} 1614}
1482 1615
1483static inline void raw_local_irq_disable(void) 1616static inline void raw_local_irq_disable(void)
1484{ 1617{
1485 asm volatile(paravirt_alt(PV_SAVE_REGS 1618 asm volatile(paravirt_alt(PARAVIRT_CALL)
1486 PARAVIRT_CALL
1487 PV_RESTORE_REGS)
1488 : 1619 :
1489 : paravirt_type(pv_irq_ops.irq_disable), 1620 : paravirt_type(pv_irq_ops.irq_disable),
1490 paravirt_clobber(CLBR_EAX) 1621 paravirt_clobber(CLBR_EAX)
1491 : "memory", "eax", "cc" PV_EXTRA_CLOBBERS); 1622 : "memory", "eax", "cc");
1492} 1623}
1493 1624
1494static inline void raw_local_irq_enable(void) 1625static inline void raw_local_irq_enable(void)
1495{ 1626{
1496 asm volatile(paravirt_alt(PV_SAVE_REGS 1627 asm volatile(paravirt_alt(PARAVIRT_CALL)
1497 PARAVIRT_CALL
1498 PV_RESTORE_REGS)
1499 : 1628 :
1500 : paravirt_type(pv_irq_ops.irq_enable), 1629 : paravirt_type(pv_irq_ops.irq_enable),
1501 paravirt_clobber(CLBR_EAX) 1630 paravirt_clobber(CLBR_EAX)
1502 : "memory", "eax", "cc" PV_EXTRA_CLOBBERS); 1631 : "memory", "eax", "cc");
1503} 1632}
1504 1633
1505static inline unsigned long __raw_local_irq_save(void) 1634static inline unsigned long __raw_local_irq_save(void)
@@ -1542,33 +1671,49 @@ static inline unsigned long __raw_local_irq_save(void)
1542 .popsection 1671 .popsection
1543 1672
1544 1673
1674#define COND_PUSH(set, mask, reg) \
1675 .if ((~(set)) & mask); push %reg; .endif
1676#define COND_POP(set, mask, reg) \
1677 .if ((~(set)) & mask); pop %reg; .endif
1678
1545#ifdef CONFIG_X86_64 1679#ifdef CONFIG_X86_64
1546#define PV_SAVE_REGS \ 1680
1547 push %rax; \ 1681#define PV_SAVE_REGS(set) \
1548 push %rcx; \ 1682 COND_PUSH(set, CLBR_RAX, rax); \
1549 push %rdx; \ 1683 COND_PUSH(set, CLBR_RCX, rcx); \
1550 push %rsi; \ 1684 COND_PUSH(set, CLBR_RDX, rdx); \
1551 push %rdi; \ 1685 COND_PUSH(set, CLBR_RSI, rsi); \
1552 push %r8; \ 1686 COND_PUSH(set, CLBR_RDI, rdi); \
1553 push %r9; \ 1687 COND_PUSH(set, CLBR_R8, r8); \
1554 push %r10; \ 1688 COND_PUSH(set, CLBR_R9, r9); \
1555 push %r11 1689 COND_PUSH(set, CLBR_R10, r10); \
1556#define PV_RESTORE_REGS \ 1690 COND_PUSH(set, CLBR_R11, r11)
1557 pop %r11; \ 1691#define PV_RESTORE_REGS(set) \
1558 pop %r10; \ 1692 COND_POP(set, CLBR_R11, r11); \
1559 pop %r9; \ 1693 COND_POP(set, CLBR_R10, r10); \
1560 pop %r8; \ 1694 COND_POP(set, CLBR_R9, r9); \
1561 pop %rdi; \ 1695 COND_POP(set, CLBR_R8, r8); \
1562 pop %rsi; \ 1696 COND_POP(set, CLBR_RDI, rdi); \
1563 pop %rdx; \ 1697 COND_POP(set, CLBR_RSI, rsi); \
1564 pop %rcx; \ 1698 COND_POP(set, CLBR_RDX, rdx); \
1565 pop %rax 1699 COND_POP(set, CLBR_RCX, rcx); \
1700 COND_POP(set, CLBR_RAX, rax)
1701
1566#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8) 1702#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8)
1567#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8) 1703#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
1568#define PARA_INDIRECT(addr) *addr(%rip) 1704#define PARA_INDIRECT(addr) *addr(%rip)
1569#else 1705#else
1570#define PV_SAVE_REGS pushl %eax; pushl %edi; pushl %ecx; pushl %edx 1706#define PV_SAVE_REGS(set) \
1571#define PV_RESTORE_REGS popl %edx; popl %ecx; popl %edi; popl %eax 1707 COND_PUSH(set, CLBR_EAX, eax); \
1708 COND_PUSH(set, CLBR_EDI, edi); \
1709 COND_PUSH(set, CLBR_ECX, ecx); \
1710 COND_PUSH(set, CLBR_EDX, edx)
1711#define PV_RESTORE_REGS(set) \
1712 COND_POP(set, CLBR_EDX, edx); \
1713 COND_POP(set, CLBR_ECX, ecx); \
1714 COND_POP(set, CLBR_EDI, edi); \
1715 COND_POP(set, CLBR_EAX, eax)
1716
1572#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4) 1717#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
1573#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4) 1718#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
1574#define PARA_INDIRECT(addr) *%cs:addr 1719#define PARA_INDIRECT(addr) *%cs:addr
@@ -1580,15 +1725,15 @@ static inline unsigned long __raw_local_irq_save(void)
1580 1725
1581#define DISABLE_INTERRUPTS(clobbers) \ 1726#define DISABLE_INTERRUPTS(clobbers) \
1582 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \ 1727 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
1583 PV_SAVE_REGS; \ 1728 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
1584 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \ 1729 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \
1585 PV_RESTORE_REGS;) \ 1730 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
1586 1731
1587#define ENABLE_INTERRUPTS(clobbers) \ 1732#define ENABLE_INTERRUPTS(clobbers) \
1588 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \ 1733 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \
1589 PV_SAVE_REGS; \ 1734 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
1590 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \ 1735 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \
1591 PV_RESTORE_REGS;) 1736 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
1592 1737
1593#define USERGS_SYSRET32 \ 1738#define USERGS_SYSRET32 \
1594 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32), \ 1739 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32), \
@@ -1618,11 +1763,15 @@ static inline unsigned long __raw_local_irq_save(void)
1618 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \ 1763 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
1619 swapgs) 1764 swapgs)
1620 1765
1766/*
1767 * Note: swapgs is very special, and in practise is either going to be
1768 * implemented with a single "swapgs" instruction or something very
1769 * special. Either way, we don't need to save any registers for
1770 * it.
1771 */
1621#define SWAPGS \ 1772#define SWAPGS \
1622 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \ 1773 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
1623 PV_SAVE_REGS; \ 1774 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs) \
1624 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs); \
1625 PV_RESTORE_REGS \
1626 ) 1775 )
1627 1776
1628#define GET_CR2_INTO_RCX \ 1777#define GET_CR2_INTO_RCX \
diff --git a/arch/x86/include/asm/pat.h b/arch/x86/include/asm/pat.h
index b8493b3b9890..9709fdff6615 100644
--- a/arch/x86/include/asm/pat.h
+++ b/arch/x86/include/asm/pat.h
@@ -5,10 +5,8 @@
5 5
6#ifdef CONFIG_X86_PAT 6#ifdef CONFIG_X86_PAT
7extern int pat_enabled; 7extern int pat_enabled;
8extern void validate_pat_support(struct cpuinfo_x86 *c);
9#else 8#else
10static const int pat_enabled; 9static const int pat_enabled;
11static inline void validate_pat_support(struct cpuinfo_x86 *c) { }
12#endif 10#endif
13 11
14extern void pat_init(void); 12extern void pat_init(void);
@@ -17,6 +15,4 @@ extern int reserve_memtype(u64 start, u64 end,
17 unsigned long req_type, unsigned long *ret_type); 15 unsigned long req_type, unsigned long *ret_type);
18extern int free_memtype(u64 start, u64 end); 16extern int free_memtype(u64 start, u64 end);
19 17
20extern void pat_disable(char *reason);
21
22#endif /* _ASM_X86_PAT_H */ 18#endif /* _ASM_X86_PAT_H */
diff --git a/arch/x86/include/asm/mach-default/pci-functions.h b/arch/x86/include/asm/pci-functions.h
index ed0bab427354..ed0bab427354 100644
--- a/arch/x86/include/asm/mach-default/pci-functions.h
+++ b/arch/x86/include/asm/pci-functions.h
diff --git a/arch/x86/include/asm/pda.h b/arch/x86/include/asm/pda.h
deleted file mode 100644
index 2fbfff88df37..000000000000
--- a/arch/x86/include/asm/pda.h
+++ /dev/null
@@ -1,137 +0,0 @@
1#ifndef _ASM_X86_PDA_H
2#define _ASM_X86_PDA_H
3
4#ifndef __ASSEMBLY__
5#include <linux/stddef.h>
6#include <linux/types.h>
7#include <linux/cache.h>
8#include <asm/page.h>
9
10/* Per processor datastructure. %gs points to it while the kernel runs */
11struct x8664_pda {
12 struct task_struct *pcurrent; /* 0 Current process */
13 unsigned long data_offset; /* 8 Per cpu data offset from linker
14 address */
15 unsigned long kernelstack; /* 16 top of kernel stack for current */
16 unsigned long oldrsp; /* 24 user rsp for system call */
17 int irqcount; /* 32 Irq nesting counter. Starts -1 */
18 unsigned int cpunumber; /* 36 Logical CPU number */
19#ifdef CONFIG_CC_STACKPROTECTOR
20 unsigned long stack_canary; /* 40 stack canary value */
21 /* gcc-ABI: this canary MUST be at
22 offset 40!!! */
23#endif
24 char *irqstackptr;
25 short nodenumber; /* number of current node (32k max) */
26 short in_bootmem; /* pda lives in bootmem */
27 unsigned int __softirq_pending;
28 unsigned int __nmi_count; /* number of NMI on this CPUs */
29 short mmu_state;
30 short isidle;
31 struct mm_struct *active_mm;
32 unsigned apic_timer_irqs;
33 unsigned irq0_irqs;
34 unsigned irq_resched_count;
35 unsigned irq_call_count;
36 unsigned irq_tlb_count;
37 unsigned irq_thermal_count;
38 unsigned irq_threshold_count;
39 unsigned irq_spurious_count;
40} ____cacheline_aligned_in_smp;
41
42extern struct x8664_pda **_cpu_pda;
43extern void pda_init(int);
44
45#define cpu_pda(i) (_cpu_pda[i])
46
47/*
48 * There is no fast way to get the base address of the PDA, all the accesses
49 * have to mention %fs/%gs. So it needs to be done this Torvaldian way.
50 */
51extern void __bad_pda_field(void) __attribute__((noreturn));
52
53/*
54 * proxy_pda doesn't actually exist, but tell gcc it is accessed for
55 * all PDA accesses so it gets read/write dependencies right.
56 */
57extern struct x8664_pda _proxy_pda;
58
59#define pda_offset(field) offsetof(struct x8664_pda, field)
60
61#define pda_to_op(op, field, val) \
62do { \
63 typedef typeof(_proxy_pda.field) T__; \
64 if (0) { T__ tmp__; tmp__ = (val); } /* type checking */ \
65 switch (sizeof(_proxy_pda.field)) { \
66 case 2: \
67 asm(op "w %1,%%gs:%c2" : \
68 "+m" (_proxy_pda.field) : \
69 "ri" ((T__)val), \
70 "i"(pda_offset(field))); \
71 break; \
72 case 4: \
73 asm(op "l %1,%%gs:%c2" : \
74 "+m" (_proxy_pda.field) : \
75 "ri" ((T__)val), \
76 "i" (pda_offset(field))); \
77 break; \
78 case 8: \
79 asm(op "q %1,%%gs:%c2": \
80 "+m" (_proxy_pda.field) : \
81 "ri" ((T__)val), \
82 "i"(pda_offset(field))); \
83 break; \
84 default: \
85 __bad_pda_field(); \
86 } \
87} while (0)
88
89#define pda_from_op(op, field) \
90({ \
91 typeof(_proxy_pda.field) ret__; \
92 switch (sizeof(_proxy_pda.field)) { \
93 case 2: \
94 asm(op "w %%gs:%c1,%0" : \
95 "=r" (ret__) : \
96 "i" (pda_offset(field)), \
97 "m" (_proxy_pda.field)); \
98 break; \
99 case 4: \
100 asm(op "l %%gs:%c1,%0": \
101 "=r" (ret__): \
102 "i" (pda_offset(field)), \
103 "m" (_proxy_pda.field)); \
104 break; \
105 case 8: \
106 asm(op "q %%gs:%c1,%0": \
107 "=r" (ret__) : \
108 "i" (pda_offset(field)), \
109 "m" (_proxy_pda.field)); \
110 break; \
111 default: \
112 __bad_pda_field(); \
113 } \
114 ret__; \
115})
116
117#define read_pda(field) pda_from_op("mov", field)
118#define write_pda(field, val) pda_to_op("mov", field, val)
119#define add_pda(field, val) pda_to_op("add", field, val)
120#define sub_pda(field, val) pda_to_op("sub", field, val)
121#define or_pda(field, val) pda_to_op("or", field, val)
122
123/* This is not atomic against other CPUs -- CPU preemption needs to be off */
124#define test_and_clear_bit_pda(bit, field) \
125({ \
126 int old__; \
127 asm volatile("btr %2,%%gs:%c3\n\tsbbl %0,%0" \
128 : "=r" (old__), "+m" (_proxy_pda.field) \
129 : "dIr" (bit), "i" (pda_offset(field)) : "memory");\
130 old__; \
131})
132
133#endif
134
135#define PDA_STACKOFFSET (5*8)
136
137#endif /* _ASM_X86_PDA_H */
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index ece72053ba63..0b64af4f13ac 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -2,53 +2,12 @@
2#define _ASM_X86_PERCPU_H 2#define _ASM_X86_PERCPU_H
3 3
4#ifdef CONFIG_X86_64 4#ifdef CONFIG_X86_64
5#include <linux/compiler.h> 5#define __percpu_seg gs
6 6#define __percpu_mov_op movq
7/* Same as asm-generic/percpu.h, except that we store the per cpu offset 7#else
8 in the PDA. Longer term the PDA and every per cpu variable 8#define __percpu_seg fs
9 should be just put into a single section and referenced directly 9#define __percpu_mov_op movl
10 from %gs */
11
12#ifdef CONFIG_SMP
13#include <asm/pda.h>
14
15#define __per_cpu_offset(cpu) (cpu_pda(cpu)->data_offset)
16#define __my_cpu_offset read_pda(data_offset)
17
18#define per_cpu_offset(x) (__per_cpu_offset(x))
19
20#endif 10#endif
21#include <asm-generic/percpu.h>
22
23DECLARE_PER_CPU(struct x8664_pda, pda);
24
25/*
26 * These are supposed to be implemented as a single instruction which
27 * operates on the per-cpu data base segment. x86-64 doesn't have
28 * that yet, so this is a fairly inefficient workaround for the
29 * meantime. The single instruction is atomic with respect to
30 * preemption and interrupts, so we need to explicitly disable
31 * interrupts here to achieve the same effect. However, because it
32 * can be used from within interrupt-disable/enable, we can't actually
33 * disable interrupts; disabling preemption is enough.
34 */
35#define x86_read_percpu(var) \
36 ({ \
37 typeof(per_cpu_var(var)) __tmp; \
38 preempt_disable(); \
39 __tmp = __get_cpu_var(var); \
40 preempt_enable(); \
41 __tmp; \
42 })
43
44#define x86_write_percpu(var, val) \
45 do { \
46 preempt_disable(); \
47 __get_cpu_var(var) = (val); \
48 preempt_enable(); \
49 } while(0)
50
51#else /* CONFIG_X86_64 */
52 11
53#ifdef __ASSEMBLY__ 12#ifdef __ASSEMBLY__
54 13
@@ -65,47 +24,26 @@ DECLARE_PER_CPU(struct x8664_pda, pda);
65 * PER_CPU(cpu_gdt_descr, %ebx) 24 * PER_CPU(cpu_gdt_descr, %ebx)
66 */ 25 */
67#ifdef CONFIG_SMP 26#ifdef CONFIG_SMP
68#define PER_CPU(var, reg) \ 27#define PER_CPU(var, reg) \
69 movl %fs:per_cpu__##this_cpu_off, reg; \ 28 __percpu_mov_op %__percpu_seg:per_cpu__this_cpu_off, reg; \
70 lea per_cpu__##var(reg), reg 29 lea per_cpu__##var(reg), reg
71#define PER_CPU_VAR(var) %fs:per_cpu__##var 30#define PER_CPU_VAR(var) %__percpu_seg:per_cpu__##var
72#else /* ! SMP */ 31#else /* ! SMP */
73#define PER_CPU(var, reg) \ 32#define PER_CPU(var, reg) \
74 movl $per_cpu__##var, reg 33 __percpu_mov_op $per_cpu__##var, reg
75#define PER_CPU_VAR(var) per_cpu__##var 34#define PER_CPU_VAR(var) per_cpu__##var
76#endif /* SMP */ 35#endif /* SMP */
77 36
78#else /* ...!ASSEMBLY */ 37#else /* ...!ASSEMBLY */
79 38
80/* 39#include <linux/stringify.h>
81 * PER_CPU finds an address of a per-cpu variable.
82 *
83 * Args:
84 * var - variable name
85 * cpu - 32bit register containing the current CPU number
86 *
87 * The resulting address is stored in the "cpu" argument.
88 *
89 * Example:
90 * PER_CPU(cpu_gdt_descr, %ebx)
91 */
92#ifdef CONFIG_SMP
93
94#define __my_cpu_offset x86_read_percpu(this_cpu_off)
95 40
96/* fs segment starts at (positive) offset == __per_cpu_offset[cpu] */ 41#ifdef CONFIG_SMP
97#define __percpu_seg "%%fs:" 42#define __percpu_arg(x) "%%"__stringify(__percpu_seg)":%P" #x
98 43#define __my_cpu_offset percpu_read(this_cpu_off)
99#else /* !SMP */ 44#else
100 45#define __percpu_arg(x) "%" #x
101#define __percpu_seg "" 46#endif
102
103#endif /* SMP */
104
105#include <asm-generic/percpu.h>
106
107/* We can use this directly for local CPU (faster). */
108DECLARE_PER_CPU(unsigned long, this_cpu_off);
109 47
110/* For arch-specific code, we can use direct single-insn ops (they 48/* For arch-specific code, we can use direct single-insn ops (they
111 * don't give an lvalue though). */ 49 * don't give an lvalue though). */
@@ -120,20 +58,25 @@ do { \
120 } \ 58 } \
121 switch (sizeof(var)) { \ 59 switch (sizeof(var)) { \
122 case 1: \ 60 case 1: \
123 asm(op "b %1,"__percpu_seg"%0" \ 61 asm(op "b %1,"__percpu_arg(0) \
124 : "+m" (var) \ 62 : "+m" (var) \
125 : "ri" ((T__)val)); \ 63 : "ri" ((T__)val)); \
126 break; \ 64 break; \
127 case 2: \ 65 case 2: \
128 asm(op "w %1,"__percpu_seg"%0" \ 66 asm(op "w %1,"__percpu_arg(0) \
129 : "+m" (var) \ 67 : "+m" (var) \
130 : "ri" ((T__)val)); \ 68 : "ri" ((T__)val)); \
131 break; \ 69 break; \
132 case 4: \ 70 case 4: \
133 asm(op "l %1,"__percpu_seg"%0" \ 71 asm(op "l %1,"__percpu_arg(0) \
134 : "+m" (var) \ 72 : "+m" (var) \
135 : "ri" ((T__)val)); \ 73 : "ri" ((T__)val)); \
136 break; \ 74 break; \
75 case 8: \
76 asm(op "q %1,"__percpu_arg(0) \
77 : "+m" (var) \
78 : "re" ((T__)val)); \
79 break; \
137 default: __bad_percpu_size(); \ 80 default: __bad_percpu_size(); \
138 } \ 81 } \
139} while (0) 82} while (0)
@@ -143,17 +86,22 @@ do { \
143 typeof(var) ret__; \ 86 typeof(var) ret__; \
144 switch (sizeof(var)) { \ 87 switch (sizeof(var)) { \
145 case 1: \ 88 case 1: \
146 asm(op "b "__percpu_seg"%1,%0" \ 89 asm(op "b "__percpu_arg(1)",%0" \
147 : "=r" (ret__) \ 90 : "=r" (ret__) \
148 : "m" (var)); \ 91 : "m" (var)); \
149 break; \ 92 break; \
150 case 2: \ 93 case 2: \
151 asm(op "w "__percpu_seg"%1,%0" \ 94 asm(op "w "__percpu_arg(1)",%0" \
152 : "=r" (ret__) \ 95 : "=r" (ret__) \
153 : "m" (var)); \ 96 : "m" (var)); \
154 break; \ 97 break; \
155 case 4: \ 98 case 4: \
156 asm(op "l "__percpu_seg"%1,%0" \ 99 asm(op "l "__percpu_arg(1)",%0" \
100 : "=r" (ret__) \
101 : "m" (var)); \
102 break; \
103 case 8: \
104 asm(op "q "__percpu_arg(1)",%0" \
157 : "=r" (ret__) \ 105 : "=r" (ret__) \
158 : "m" (var)); \ 106 : "m" (var)); \
159 break; \ 107 break; \
@@ -162,13 +110,30 @@ do { \
162 ret__; \ 110 ret__; \
163}) 111})
164 112
165#define x86_read_percpu(var) percpu_from_op("mov", per_cpu__##var) 113#define percpu_read(var) percpu_from_op("mov", per_cpu__##var)
166#define x86_write_percpu(var, val) percpu_to_op("mov", per_cpu__##var, val) 114#define percpu_write(var, val) percpu_to_op("mov", per_cpu__##var, val)
167#define x86_add_percpu(var, val) percpu_to_op("add", per_cpu__##var, val) 115#define percpu_add(var, val) percpu_to_op("add", per_cpu__##var, val)
168#define x86_sub_percpu(var, val) percpu_to_op("sub", per_cpu__##var, val) 116#define percpu_sub(var, val) percpu_to_op("sub", per_cpu__##var, val)
169#define x86_or_percpu(var, val) percpu_to_op("or", per_cpu__##var, val) 117#define percpu_and(var, val) percpu_to_op("and", per_cpu__##var, val)
118#define percpu_or(var, val) percpu_to_op("or", per_cpu__##var, val)
119#define percpu_xor(var, val) percpu_to_op("xor", per_cpu__##var, val)
120
121/* This is not atomic against other CPUs -- CPU preemption needs to be off */
122#define x86_test_and_clear_bit_percpu(bit, var) \
123({ \
124 int old__; \
125 asm volatile("btr %2,"__percpu_arg(1)"\n\tsbbl %0,%0" \
126 : "=r" (old__), "+m" (per_cpu__##var) \
127 : "dIr" (bit)); \
128 old__; \
129})
130
131#include <asm-generic/percpu.h>
132
133/* We can use this directly for local CPU (faster). */
134DECLARE_PER_CPU(unsigned long, this_cpu_off);
135
170#endif /* !__ASSEMBLY__ */ 136#endif /* !__ASSEMBLY__ */
171#endif /* !CONFIG_X86_64 */
172 137
173#ifdef CONFIG_SMP 138#ifdef CONFIG_SMP
174 139
@@ -195,9 +160,9 @@ do { \
195#define early_per_cpu_ptr(_name) (_name##_early_ptr) 160#define early_per_cpu_ptr(_name) (_name##_early_ptr)
196#define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx]) 161#define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx])
197#define early_per_cpu(_name, _cpu) \ 162#define early_per_cpu(_name, _cpu) \
198 (early_per_cpu_ptr(_name) ? \ 163 *(early_per_cpu_ptr(_name) ? \
199 early_per_cpu_ptr(_name)[_cpu] : \ 164 &early_per_cpu_ptr(_name)[_cpu] : \
200 per_cpu(_name, _cpu)) 165 &per_cpu(_name, _cpu))
201 166
202#else /* !CONFIG_SMP */ 167#else /* !CONFIG_SMP */
203#define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \ 168#define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 4f5af8447d54..6f7c102018bf 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -240,64 +240,78 @@ static inline int pmd_large(pmd_t pte)
240 (_PAGE_PSE | _PAGE_PRESENT); 240 (_PAGE_PSE | _PAGE_PRESENT);
241} 241}
242 242
243static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
244{
245 pteval_t v = native_pte_val(pte);
246
247 return native_make_pte(v | set);
248}
249
250static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
251{
252 pteval_t v = native_pte_val(pte);
253
254 return native_make_pte(v & ~clear);
255}
256
243static inline pte_t pte_mkclean(pte_t pte) 257static inline pte_t pte_mkclean(pte_t pte)
244{ 258{
245 return __pte(pte_val(pte) & ~_PAGE_DIRTY); 259 return pte_clear_flags(pte, _PAGE_DIRTY);
246} 260}
247 261
248static inline pte_t pte_mkold(pte_t pte) 262static inline pte_t pte_mkold(pte_t pte)
249{ 263{
250 return __pte(pte_val(pte) & ~_PAGE_ACCESSED); 264 return pte_clear_flags(pte, _PAGE_ACCESSED);
251} 265}
252 266
253static inline pte_t pte_wrprotect(pte_t pte) 267static inline pte_t pte_wrprotect(pte_t pte)
254{ 268{
255 return __pte(pte_val(pte) & ~_PAGE_RW); 269 return pte_clear_flags(pte, _PAGE_RW);
256} 270}
257 271
258static inline pte_t pte_mkexec(pte_t pte) 272static inline pte_t pte_mkexec(pte_t pte)
259{ 273{
260 return __pte(pte_val(pte) & ~_PAGE_NX); 274 return pte_clear_flags(pte, _PAGE_NX);
261} 275}
262 276
263static inline pte_t pte_mkdirty(pte_t pte) 277static inline pte_t pte_mkdirty(pte_t pte)
264{ 278{
265 return __pte(pte_val(pte) | _PAGE_DIRTY); 279 return pte_set_flags(pte, _PAGE_DIRTY);
266} 280}
267 281
268static inline pte_t pte_mkyoung(pte_t pte) 282static inline pte_t pte_mkyoung(pte_t pte)
269{ 283{
270 return __pte(pte_val(pte) | _PAGE_ACCESSED); 284 return pte_set_flags(pte, _PAGE_ACCESSED);
271} 285}
272 286
273static inline pte_t pte_mkwrite(pte_t pte) 287static inline pte_t pte_mkwrite(pte_t pte)
274{ 288{
275 return __pte(pte_val(pte) | _PAGE_RW); 289 return pte_set_flags(pte, _PAGE_RW);
276} 290}
277 291
278static inline pte_t pte_mkhuge(pte_t pte) 292static inline pte_t pte_mkhuge(pte_t pte)
279{ 293{
280 return __pte(pte_val(pte) | _PAGE_PSE); 294 return pte_set_flags(pte, _PAGE_PSE);
281} 295}
282 296
283static inline pte_t pte_clrhuge(pte_t pte) 297static inline pte_t pte_clrhuge(pte_t pte)
284{ 298{
285 return __pte(pte_val(pte) & ~_PAGE_PSE); 299 return pte_clear_flags(pte, _PAGE_PSE);
286} 300}
287 301
288static inline pte_t pte_mkglobal(pte_t pte) 302static inline pte_t pte_mkglobal(pte_t pte)
289{ 303{
290 return __pte(pte_val(pte) | _PAGE_GLOBAL); 304 return pte_set_flags(pte, _PAGE_GLOBAL);
291} 305}
292 306
293static inline pte_t pte_clrglobal(pte_t pte) 307static inline pte_t pte_clrglobal(pte_t pte)
294{ 308{
295 return __pte(pte_val(pte) & ~_PAGE_GLOBAL); 309 return pte_clear_flags(pte, _PAGE_GLOBAL);
296} 310}
297 311
298static inline pte_t pte_mkspecial(pte_t pte) 312static inline pte_t pte_mkspecial(pte_t pte)
299{ 313{
300 return __pte(pte_val(pte) | _PAGE_SPECIAL); 314 return pte_set_flags(pte, _PAGE_SPECIAL);
301} 315}
302 316
303extern pteval_t __supported_pte_mask; 317extern pteval_t __supported_pte_mask;
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index ba09289accaa..1df9637dfda3 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -11,7 +11,6 @@
11#include <asm/processor.h> 11#include <asm/processor.h>
12#include <linux/bitops.h> 12#include <linux/bitops.h>
13#include <linux/threads.h> 13#include <linux/threads.h>
14#include <asm/pda.h>
15 14
16extern pud_t level3_kernel_pgt[512]; 15extern pud_t level3_kernel_pgt[512];
17extern pud_t level3_ident_pgt[512]; 16extern pud_t level3_ident_pgt[512];
diff --git a/arch/x86/include/asm/prctl.h b/arch/x86/include/asm/prctl.h
index a8894647dd9a..3ac5032fae09 100644
--- a/arch/x86/include/asm/prctl.h
+++ b/arch/x86/include/asm/prctl.h
@@ -6,8 +6,4 @@
6#define ARCH_GET_FS 0x1003 6#define ARCH_GET_FS 0x1003
7#define ARCH_GET_GS 0x1004 7#define ARCH_GET_GS 0x1004
8 8
9#ifdef CONFIG_X86_64
10extern long sys_arch_prctl(int, unsigned long);
11#endif /* CONFIG_X86_64 */
12
13#endif /* _ASM_X86_PRCTL_H */ 9#endif /* _ASM_X86_PRCTL_H */
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 3bfd5235a9eb..a6643f68fbb1 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -73,7 +73,7 @@ struct cpuinfo_x86 {
73 char pad0; 73 char pad0;
74#else 74#else
75 /* Number of 4K pages in DTLB/ITLB combined(in pages): */ 75 /* Number of 4K pages in DTLB/ITLB combined(in pages): */
76 int x86_tlbsize; 76 int x86_tlbsize;
77 __u8 x86_virt_bits; 77 __u8 x86_virt_bits;
78 __u8 x86_phys_bits; 78 __u8 x86_phys_bits;
79#endif 79#endif
@@ -378,6 +378,22 @@ union thread_xstate {
378 378
379#ifdef CONFIG_X86_64 379#ifdef CONFIG_X86_64
380DECLARE_PER_CPU(struct orig_ist, orig_ist); 380DECLARE_PER_CPU(struct orig_ist, orig_ist);
381
382union irq_stack_union {
383 char irq_stack[IRQ_STACK_SIZE];
384 /*
385 * GCC hardcodes the stack canary as %gs:40. Since the
386 * irq_stack is the object at %gs:0, we reserve the bottom
387 * 48 bytes of the irq stack for the canary.
388 */
389 struct {
390 char gs_base[40];
391 unsigned long stack_canary;
392 };
393};
394
395DECLARE_PER_CPU(union irq_stack_union, irq_stack_union);
396DECLARE_PER_CPU(char *, irq_stack_ptr);
381#endif 397#endif
382 398
383extern void print_cpu_info(struct cpuinfo_x86 *); 399extern void print_cpu_info(struct cpuinfo_x86 *);
@@ -752,9 +768,9 @@ extern int sysenter_setup(void);
752extern struct desc_ptr early_gdt_descr; 768extern struct desc_ptr early_gdt_descr;
753 769
754extern void cpu_set_gdt(int); 770extern void cpu_set_gdt(int);
755extern void switch_to_new_gdt(void); 771extern void switch_to_new_gdt(int);
772extern void load_percpu_segment(int);
756extern void cpu_init(void); 773extern void cpu_init(void);
757extern void init_gdt(int cpu);
758 774
759static inline unsigned long get_debugctlmsr(void) 775static inline unsigned long get_debugctlmsr(void)
760{ 776{
diff --git a/arch/x86/include/asm/proto.h b/arch/x86/include/asm/proto.h
index d6a22f92ba77..49fb3ecf3bb3 100644
--- a/arch/x86/include/asm/proto.h
+++ b/arch/x86/include/asm/proto.h
@@ -18,11 +18,7 @@ extern void syscall32_cpu_init(void);
18 18
19extern void check_efer(void); 19extern void check_efer(void);
20 20
21#ifdef CONFIG_X86_BIOS_REBOOT
22extern int reboot_force; 21extern int reboot_force;
23#else
24static const int reboot_force = 0;
25#endif
26 22
27long do_arch_prctl(struct task_struct *task, int code, unsigned long addr); 23long do_arch_prctl(struct task_struct *task, int code, unsigned long addr);
28 24
diff --git a/arch/x86/include/asm/mach-rdc321x/rdc321x_defs.h b/arch/x86/include/asm/rdc321x_defs.h
index c8e9c8bed3d0..c8e9c8bed3d0 100644
--- a/arch/x86/include/asm/mach-rdc321x/rdc321x_defs.h
+++ b/arch/x86/include/asm/rdc321x_defs.h
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index ebe858cdc8a3..45b40278b582 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -1,6 +1,8 @@
1#ifndef _ASM_X86_SETUP_H 1#ifndef _ASM_X86_SETUP_H
2#define _ASM_X86_SETUP_H 2#define _ASM_X86_SETUP_H
3 3
4#ifdef __KERNEL__
5
4#define COMMAND_LINE_SIZE 2048 6#define COMMAND_LINE_SIZE 2048
5 7
6#ifndef __ASSEMBLY__ 8#ifndef __ASSEMBLY__
@@ -8,10 +10,8 @@
8/* Interrupt control for vSMPowered x86_64 systems */ 10/* Interrupt control for vSMPowered x86_64 systems */
9void vsmp_init(void); 11void vsmp_init(void);
10 12
11
12void setup_bios_corruption_check(void); 13void setup_bios_corruption_check(void);
13 14
14
15#ifdef CONFIG_X86_VISWS 15#ifdef CONFIG_X86_VISWS
16extern void visws_early_detect(void); 16extern void visws_early_detect(void);
17extern int is_visws_box(void); 17extern int is_visws_box(void);
@@ -43,7 +43,7 @@ struct x86_quirks {
43 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name); 43 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
44 void (*mpc_oem_pci_bus)(struct mpc_bus *m); 44 void (*mpc_oem_pci_bus)(struct mpc_bus *m);
45 void (*smp_read_mpc_oem)(struct mpc_oemtable *oemtable, 45 void (*smp_read_mpc_oem)(struct mpc_oemtable *oemtable,
46 unsigned short oemsize); 46 unsigned short oemsize);
47 int (*setup_ioapic_ids)(void); 47 int (*setup_ioapic_ids)(void);
48 int (*update_genapic)(void); 48 int (*update_genapic)(void);
49}; 49};
@@ -56,8 +56,6 @@ extern unsigned long saved_video_mode;
56#endif 56#endif
57#endif /* __ASSEMBLY__ */ 57#endif /* __ASSEMBLY__ */
58 58
59#ifdef __KERNEL__
60
61#ifdef __i386__ 59#ifdef __i386__
62 60
63#include <linux/pfn.h> 61#include <linux/pfn.h>
@@ -100,7 +98,6 @@ extern unsigned long init_pg_tables_start;
100extern unsigned long init_pg_tables_end; 98extern unsigned long init_pg_tables_end;
101 99
102#else 100#else
103void __init x86_64_init_pda(void);
104void __init x86_64_start_kernel(char *real_mode); 101void __init x86_64_start_kernel(char *real_mode);
105void __init x86_64_start_reservations(char *real_mode_data); 102void __init x86_64_start_reservations(char *real_mode_data);
106 103
diff --git a/arch/x86/include/asm/mach-default/setup_arch.h b/arch/x86/include/asm/setup_arch.h
index 38846208b548..38846208b548 100644
--- a/arch/x86/include/asm/mach-default/setup_arch.h
+++ b/arch/x86/include/asm/setup_arch.h
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 19953df61c52..47d0e21f2b9e 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -15,34 +15,8 @@
15# include <asm/io_apic.h> 15# include <asm/io_apic.h>
16# endif 16# endif
17#endif 17#endif
18#include <asm/pda.h>
19#include <asm/thread_info.h> 18#include <asm/thread_info.h>
20 19#include <asm/cpumask.h>
21#ifdef CONFIG_X86_64
22
23extern cpumask_var_t cpu_callin_mask;
24extern cpumask_var_t cpu_callout_mask;
25extern cpumask_var_t cpu_initialized_mask;
26extern cpumask_var_t cpu_sibling_setup_mask;
27
28#else /* CONFIG_X86_32 */
29
30extern cpumask_t cpu_callin_map;
31extern cpumask_t cpu_callout_map;
32extern cpumask_t cpu_initialized;
33extern cpumask_t cpu_sibling_setup_map;
34
35#define cpu_callin_mask ((struct cpumask *)&cpu_callin_map)
36#define cpu_callout_mask ((struct cpumask *)&cpu_callout_map)
37#define cpu_initialized_mask ((struct cpumask *)&cpu_initialized)
38#define cpu_sibling_setup_mask ((struct cpumask *)&cpu_sibling_setup_map)
39
40#endif /* CONFIG_X86_32 */
41
42extern void (*mtrr_hook)(void);
43extern void zap_low_mappings(void);
44
45extern int __cpuinit get_local_pda(int cpu);
46 20
47extern int smp_num_siblings; 21extern int smp_num_siblings;
48extern unsigned int num_processors; 22extern unsigned int num_processors;
@@ -50,9 +24,7 @@ extern unsigned int num_processors;
50DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); 24DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
51DECLARE_PER_CPU(cpumask_t, cpu_core_map); 25DECLARE_PER_CPU(cpumask_t, cpu_core_map);
52DECLARE_PER_CPU(u16, cpu_llc_id); 26DECLARE_PER_CPU(u16, cpu_llc_id);
53#ifdef CONFIG_X86_32
54DECLARE_PER_CPU(int, cpu_number); 27DECLARE_PER_CPU(int, cpu_number);
55#endif
56 28
57static inline struct cpumask *cpu_sibling_mask(int cpu) 29static inline struct cpumask *cpu_sibling_mask(int cpu)
58{ 30{
@@ -167,8 +139,6 @@ void play_dead_common(void);
167void native_send_call_func_ipi(const struct cpumask *mask); 139void native_send_call_func_ipi(const struct cpumask *mask);
168void native_send_call_func_single_ipi(int cpu); 140void native_send_call_func_single_ipi(int cpu);
169 141
170extern void prefill_possible_map(void);
171
172void smp_store_cpu_info(int id); 142void smp_store_cpu_info(int id);
173#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) 143#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu)
174 144
@@ -177,10 +147,6 @@ static inline int num_booting_cpus(void)
177{ 147{
178 return cpumask_weight(cpu_callout_mask); 148 return cpumask_weight(cpu_callout_mask);
179} 149}
180#else
181static inline void prefill_possible_map(void)
182{
183}
184#endif /* CONFIG_SMP */ 150#endif /* CONFIG_SMP */
185 151
186extern unsigned disabled_cpus __cpuinitdata; 152extern unsigned disabled_cpus __cpuinitdata;
@@ -191,11 +157,11 @@ extern unsigned disabled_cpus __cpuinitdata;
191 * from the initial startup. We map APIC_BASE very early in page_setup(), 157 * from the initial startup. We map APIC_BASE very early in page_setup(),
192 * so this is correct in the x86 case. 158 * so this is correct in the x86 case.
193 */ 159 */
194#define raw_smp_processor_id() (x86_read_percpu(cpu_number)) 160#define raw_smp_processor_id() (percpu_read(cpu_number))
195extern int safe_smp_processor_id(void); 161extern int safe_smp_processor_id(void);
196 162
197#elif defined(CONFIG_X86_64_SMP) 163#elif defined(CONFIG_X86_64_SMP)
198#define raw_smp_processor_id() read_pda(cpunumber) 164#define raw_smp_processor_id() (percpu_read(cpu_number))
199 165
200#define stack_smp_processor_id() \ 166#define stack_smp_processor_id() \
201({ \ 167({ \
@@ -205,10 +171,6 @@ extern int safe_smp_processor_id(void);
205}) 171})
206#define safe_smp_processor_id() smp_processor_id() 172#define safe_smp_processor_id() smp_processor_id()
207 173
208#else /* !CONFIG_X86_32_SMP && !CONFIG_X86_64_SMP */
209#define cpu_physical_id(cpu) boot_cpu_physical_apicid
210#define safe_smp_processor_id() 0
211#define stack_smp_processor_id() 0
212#endif 174#endif
213 175
214#ifdef CONFIG_X86_LOCAL_APIC 176#ifdef CONFIG_X86_LOCAL_APIC
@@ -220,28 +182,9 @@ static inline int logical_smp_processor_id(void)
220 return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR)); 182 return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR));
221} 183}
222 184
223#include <mach_apicdef.h>
224static inline unsigned int read_apic_id(void)
225{
226 unsigned int reg;
227
228 reg = *(u32 *)(APIC_BASE + APIC_ID);
229
230 return GET_APIC_ID(reg);
231}
232#endif 185#endif
233 186
234
235# if defined(APIC_DEFINITION) || defined(CONFIG_X86_64)
236extern int hard_smp_processor_id(void); 187extern int hard_smp_processor_id(void);
237# else
238#include <mach_apicdef.h>
239static inline int hard_smp_processor_id(void)
240{
241 /* we don't want to mark this access volatile - bad code generation */
242 return read_apic_id();
243}
244# endif /* APIC_DEFINITION */
245 188
246#else /* CONFIG_X86_LOCAL_APIC */ 189#else /* CONFIG_X86_LOCAL_APIC */
247 190
@@ -251,11 +194,5 @@ static inline int hard_smp_processor_id(void)
251 194
252#endif /* CONFIG_X86_LOCAL_APIC */ 195#endif /* CONFIG_X86_LOCAL_APIC */
253 196
254#ifdef CONFIG_X86_HAS_BOOT_CPU_ID
255extern unsigned char boot_cpu_id;
256#else
257#define boot_cpu_id 0
258#endif
259
260#endif /* __ASSEMBLY__ */ 197#endif /* __ASSEMBLY__ */
261#endif /* _ASM_X86_SMP_H */ 198#endif /* _ASM_X86_SMP_H */
diff --git a/arch/x86/include/asm/mach-default/smpboot_hooks.h b/arch/x86/include/asm/smpboot_hooks.h
index 23bf52103b89..1def60114906 100644
--- a/arch/x86/include/asm/mach-default/smpboot_hooks.h
+++ b/arch/x86/include/asm/smpboot_hooks.h
@@ -13,10 +13,10 @@ static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
13 CMOS_WRITE(0xa, 0xf); 13 CMOS_WRITE(0xa, 0xf);
14 local_flush_tlb(); 14 local_flush_tlb();
15 pr_debug("1.\n"); 15 pr_debug("1.\n");
16 *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) = 16 *((volatile unsigned short *)phys_to_virt(apic->trampoline_phys_high)) =
17 start_eip >> 4; 17 start_eip >> 4;
18 pr_debug("2.\n"); 18 pr_debug("2.\n");
19 *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = 19 *((volatile unsigned short *)phys_to_virt(apic->trampoline_phys_low)) =
20 start_eip & 0xf; 20 start_eip & 0xf;
21 pr_debug("3.\n"); 21 pr_debug("3.\n");
22} 22}
@@ -34,7 +34,7 @@ static inline void smpboot_restore_warm_reset_vector(void)
34 */ 34 */
35 CMOS_WRITE(0, 0xf); 35 CMOS_WRITE(0, 0xf);
36 36
37 *((volatile long *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = 0; 37 *((volatile long *)phys_to_virt(apic->trampoline_phys_low)) = 0;
38} 38}
39 39
40static inline void __init smpboot_setup_io_apic(void) 40static inline void __init smpboot_setup_io_apic(void)
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index 8247e94ac6b1..3a5696656680 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -172,70 +172,8 @@ static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
172 return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1; 172 return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1;
173} 173}
174 174
175#ifdef CONFIG_PARAVIRT 175#ifndef CONFIG_PARAVIRT
176/*
177 * Define virtualization-friendly old-style lock byte lock, for use in
178 * pv_lock_ops if desired.
179 *
180 * This differs from the pre-2.6.24 spinlock by always using xchgb
181 * rather than decb to take the lock; this allows it to use a
182 * zero-initialized lock structure. It also maintains a 1-byte
183 * contention counter, so that we can implement
184 * __byte_spin_is_contended.
185 */
186struct __byte_spinlock {
187 s8 lock;
188 s8 spinners;
189};
190
191static inline int __byte_spin_is_locked(raw_spinlock_t *lock)
192{
193 struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
194 return bl->lock != 0;
195}
196
197static inline int __byte_spin_is_contended(raw_spinlock_t *lock)
198{
199 struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
200 return bl->spinners != 0;
201}
202
203static inline void __byte_spin_lock(raw_spinlock_t *lock)
204{
205 struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
206 s8 val = 1;
207
208 asm("1: xchgb %1, %0\n"
209 " test %1,%1\n"
210 " jz 3f\n"
211 " " LOCK_PREFIX "incb %2\n"
212 "2: rep;nop\n"
213 " cmpb $1, %0\n"
214 " je 2b\n"
215 " " LOCK_PREFIX "decb %2\n"
216 " jmp 1b\n"
217 "3:"
218 : "+m" (bl->lock), "+q" (val), "+m" (bl->spinners): : "memory");
219}
220
221static inline int __byte_spin_trylock(raw_spinlock_t *lock)
222{
223 struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
224 u8 old = 1;
225
226 asm("xchgb %1,%0"
227 : "+m" (bl->lock), "+q" (old) : : "memory");
228 176
229 return old == 0;
230}
231
232static inline void __byte_spin_unlock(raw_spinlock_t *lock)
233{
234 struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
235 smp_wmb();
236 bl->lock = 0;
237}
238#else /* !CONFIG_PARAVIRT */
239static inline int __raw_spin_is_locked(raw_spinlock_t *lock) 177static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
240{ 178{
241 return __ticket_spin_is_locked(lock); 179 return __ticket_spin_is_locked(lock);
@@ -268,7 +206,7 @@ static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
268 __raw_spin_lock(lock); 206 __raw_spin_lock(lock);
269} 207}
270 208
271#endif /* CONFIG_PARAVIRT */ 209#endif
272 210
273static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) 211static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
274{ 212{
@@ -330,8 +268,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *lock)
330{ 268{
331 atomic_t *count = (atomic_t *)lock; 269 atomic_t *count = (atomic_t *)lock;
332 270
333 atomic_dec(count); 271 if (atomic_dec_return(count) >= 0)
334 if (atomic_read(count) >= 0)
335 return 1; 272 return 1;
336 atomic_inc(count); 273 atomic_inc(count);
337 return 0; 274 return 0;
diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
new file mode 100644
index 000000000000..36a700acaf2b
--- /dev/null
+++ b/arch/x86/include/asm/stackprotector.h
@@ -0,0 +1,38 @@
1#ifndef _ASM_STACKPROTECTOR_H
2#define _ASM_STACKPROTECTOR_H 1
3
4#include <asm/tsc.h>
5#include <asm/processor.h>
6
7/*
8 * Initialize the stackprotector canary value.
9 *
10 * NOTE: this must only be called from functions that never return,
11 * and it must always be inlined.
12 */
13static __always_inline void boot_init_stack_canary(void)
14{
15 u64 canary;
16 u64 tsc;
17
18 /*
19 * Build time only check to make sure the stack_canary is at
20 * offset 40 in the pda; this is a gcc ABI requirement
21 */
22 BUILD_BUG_ON(offsetof(union irq_stack_union, stack_canary) != 40);
23
24 /*
25 * We both use the random pool and the current TSC as a source
26 * of randomness. The TSC only matters for very early init,
27 * there it already has some randomness on most systems. Later
28 * on during the bootup the random pool has true entropy too.
29 */
30 get_random_bytes(&canary, sizeof(canary));
31 tsc = __native_read_tsc();
32 canary += tsc + (tsc << 32UL);
33
34 current->stack_canary = canary;
35 percpu_write(irq_stack_union.stack_canary, canary);
36}
37
38#endif
diff --git a/arch/x86/include/asm/summit/apic.h b/arch/x86/include/asm/summit/apic.h
deleted file mode 100644
index 93d2c8667cfe..000000000000
--- a/arch/x86/include/asm/summit/apic.h
+++ /dev/null
@@ -1,202 +0,0 @@
1#ifndef __ASM_SUMMIT_APIC_H
2#define __ASM_SUMMIT_APIC_H
3
4#include <asm/smp.h>
5#include <linux/gfp.h>
6
7#define esr_disable (1)
8#define NO_BALANCE_IRQ (0)
9
10/* In clustered mode, the high nibble of APIC ID is a cluster number.
11 * The low nibble is a 4-bit bitmap. */
12#define XAPIC_DEST_CPUS_SHIFT 4
13#define XAPIC_DEST_CPUS_MASK ((1u << XAPIC_DEST_CPUS_SHIFT) - 1)
14#define XAPIC_DEST_CLUSTER_MASK (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT)
15
16#define APIC_DFR_VALUE (APIC_DFR_CLUSTER)
17
18static inline const cpumask_t *target_cpus(void)
19{
20 /* CPU_MASK_ALL (0xff) has undefined behaviour with
21 * dest_LowestPrio mode logical clustered apic interrupt routing
22 * Just start on cpu 0. IRQ balancing will spread load
23 */
24 return &cpumask_of_cpu(0);
25}
26
27#define INT_DELIVERY_MODE (dest_LowestPrio)
28#define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */
29
30static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
31{
32 return 0;
33}
34
35/* we don't use the phys_cpu_present_map to indicate apicid presence */
36static inline unsigned long check_apicid_present(int bit)
37{
38 return 1;
39}
40
41#define apicid_cluster(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK)
42
43extern u8 cpu_2_logical_apicid[];
44
45static inline void init_apic_ldr(void)
46{
47 unsigned long val, id;
48 int count = 0;
49 u8 my_id = (u8)hard_smp_processor_id();
50 u8 my_cluster = (u8)apicid_cluster(my_id);
51#ifdef CONFIG_SMP
52 u8 lid;
53 int i;
54
55 /* Create logical APIC IDs by counting CPUs already in cluster. */
56 for (count = 0, i = nr_cpu_ids; --i >= 0; ) {
57 lid = cpu_2_logical_apicid[i];
58 if (lid != BAD_APICID && apicid_cluster(lid) == my_cluster)
59 ++count;
60 }
61#endif
62 /* We only have a 4 wide bitmap in cluster mode. If a deranged
63 * BIOS puts 5 CPUs in one APIC cluster, we're hosed. */
64 BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT);
65 id = my_cluster | (1UL << count);
66 apic_write(APIC_DFR, APIC_DFR_VALUE);
67 val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
68 val |= SET_APIC_LOGICAL_ID(id);
69 apic_write(APIC_LDR, val);
70}
71
72static inline int multi_timer_check(int apic, int irq)
73{
74 return 0;
75}
76
77static inline int apic_id_registered(void)
78{
79 return 1;
80}
81
82static inline void setup_apic_routing(void)
83{
84 printk("Enabling APIC mode: Summit. Using %d I/O APICs\n",
85 nr_ioapics);
86}
87
88static inline int apicid_to_node(int logical_apicid)
89{
90#ifdef CONFIG_SMP
91 return apicid_2_node[hard_smp_processor_id()];
92#else
93 return 0;
94#endif
95}
96
97/* Mapping from cpu number to logical apicid */
98static inline int cpu_to_logical_apicid(int cpu)
99{
100#ifdef CONFIG_SMP
101 if (cpu >= nr_cpu_ids)
102 return BAD_APICID;
103 return (int)cpu_2_logical_apicid[cpu];
104#else
105 return logical_smp_processor_id();
106#endif
107}
108
109static inline int cpu_present_to_apicid(int mps_cpu)
110{
111 if (mps_cpu < nr_cpu_ids)
112 return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
113 else
114 return BAD_APICID;
115}
116
117static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_id_map)
118{
119 /* For clustered we don't have a good way to do this yet - hack */
120 return physids_promote(0x0F);
121}
122
123static inline physid_mask_t apicid_to_cpu_present(int apicid)
124{
125 return physid_mask_of_physid(0);
126}
127
128static inline void setup_portio_remap(void)
129{
130}
131
132static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
133{
134 return 1;
135}
136
137static inline void enable_apic_mode(void)
138{
139}
140
141static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
142{
143 int num_bits_set;
144 int cpus_found = 0;
145 int cpu;
146 int apicid;
147
148 num_bits_set = cpus_weight(*cpumask);
149 /* Return id to all */
150 if (num_bits_set >= nr_cpu_ids)
151 return (int) 0xFF;
152 /*
153 * The cpus in the mask must all be on the apic cluster. If are not
154 * on the same apicid cluster return default value of TARGET_CPUS.
155 */
156 cpu = first_cpu(*cpumask);
157 apicid = cpu_to_logical_apicid(cpu);
158 while (cpus_found < num_bits_set) {
159 if (cpu_isset(cpu, *cpumask)) {
160 int new_apicid = cpu_to_logical_apicid(cpu);
161 if (apicid_cluster(apicid) !=
162 apicid_cluster(new_apicid)){
163 printk ("%s: Not a valid mask!\n", __func__);
164 return 0xFF;
165 }
166 apicid = apicid | new_apicid;
167 cpus_found++;
168 }
169 cpu++;
170 }
171 return apicid;
172}
173
174static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask,
175 const struct cpumask *andmask)
176{
177 int apicid = cpu_to_logical_apicid(0);
178 cpumask_var_t cpumask;
179
180 if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
181 return apicid;
182
183 cpumask_and(cpumask, inmask, andmask);
184 cpumask_and(cpumask, cpumask, cpu_online_mask);
185 apicid = cpu_mask_to_apicid(cpumask);
186
187 free_cpumask_var(cpumask);
188 return apicid;
189}
190
191/* cpuid returns the value latched in the HW at reset, not the APIC ID
192 * register's value. For any box whose BIOS changes APIC IDs, like
193 * clustered APIC systems, we must use hard_smp_processor_id.
194 *
195 * See Intel's IA-32 SW Dev's Manual Vol2 under CPUID.
196 */
197static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
198{
199 return hard_smp_processor_id() >> index_msb;
200}
201
202#endif /* __ASM_SUMMIT_APIC_H */
diff --git a/arch/x86/include/asm/summit/apicdef.h b/arch/x86/include/asm/summit/apicdef.h
deleted file mode 100644
index f3fbca1f61c1..000000000000
--- a/arch/x86/include/asm/summit/apicdef.h
+++ /dev/null
@@ -1,13 +0,0 @@
1#ifndef __ASM_SUMMIT_APICDEF_H
2#define __ASM_SUMMIT_APICDEF_H
3
4#define APIC_ID_MASK (0xFF<<24)
5
6static inline unsigned get_apic_id(unsigned long x)
7{
8 return (x>>24)&0xFF;
9}
10
11#define GET_APIC_ID(x) get_apic_id(x)
12
13#endif
diff --git a/arch/x86/include/asm/summit/ipi.h b/arch/x86/include/asm/summit/ipi.h
deleted file mode 100644
index a8a2c24f50cc..000000000000
--- a/arch/x86/include/asm/summit/ipi.h
+++ /dev/null
@@ -1,26 +0,0 @@
1#ifndef __ASM_SUMMIT_IPI_H
2#define __ASM_SUMMIT_IPI_H
3
4void send_IPI_mask_sequence(const cpumask_t *mask, int vector);
5void send_IPI_mask_allbutself(const cpumask_t *mask, int vector);
6
7static inline void send_IPI_mask(const cpumask_t *mask, int vector)
8{
9 send_IPI_mask_sequence(mask, vector);
10}
11
12static inline void send_IPI_allbutself(int vector)
13{
14 cpumask_t mask = cpu_online_map;
15 cpu_clear(smp_processor_id(), mask);
16
17 if (!cpus_empty(mask))
18 send_IPI_mask(&mask, vector);
19}
20
21static inline void send_IPI_all(int vector)
22{
23 send_IPI_mask(&cpu_online_map, vector);
24}
25
26#endif /* __ASM_SUMMIT_IPI_H */
diff --git a/arch/x86/include/asm/summit/mpparse.h b/arch/x86/include/asm/summit/mpparse.h
deleted file mode 100644
index 380e86c02363..000000000000
--- a/arch/x86/include/asm/summit/mpparse.h
+++ /dev/null
@@ -1,109 +0,0 @@
1#ifndef __ASM_SUMMIT_MPPARSE_H
2#define __ASM_SUMMIT_MPPARSE_H
3
4#include <asm/tsc.h>
5
6extern int use_cyclone;
7
8#ifdef CONFIG_X86_SUMMIT_NUMA
9extern void setup_summit(void);
10#else
11#define setup_summit() {}
12#endif
13
14static inline int mps_oem_check(struct mpc_table *mpc, char *oem,
15 char *productid)
16{
17 if (!strncmp(oem, "IBM ENSW", 8) &&
18 (!strncmp(productid, "VIGIL SMP", 9)
19 || !strncmp(productid, "EXA", 3)
20 || !strncmp(productid, "RUTHLESS SMP", 12))){
21 mark_tsc_unstable("Summit based system");
22 use_cyclone = 1; /*enable cyclone-timer*/
23 setup_summit();
24 return 1;
25 }
26 return 0;
27}
28
29/* Hook from generic ACPI tables.c */
30static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
31{
32 if (!strncmp(oem_id, "IBM", 3) &&
33 (!strncmp(oem_table_id, "SERVIGIL", 8)
34 || !strncmp(oem_table_id, "EXA", 3))){
35 mark_tsc_unstable("Summit based system");
36 use_cyclone = 1; /*enable cyclone-timer*/
37 setup_summit();
38 return 1;
39 }
40 return 0;
41}
42
43struct rio_table_hdr {
44 unsigned char version; /* Version number of this data structure */
45 /* Version 3 adds chassis_num & WP_index */
46 unsigned char num_scal_dev; /* # of Scalability devices (Twisters for Vigil) */
47 unsigned char num_rio_dev; /* # of RIO I/O devices (Cyclones and Winnipegs) */
48} __attribute__((packed));
49
50struct scal_detail {
51 unsigned char node_id; /* Scalability Node ID */
52 unsigned long CBAR; /* Address of 1MB register space */
53 unsigned char port0node; /* Node ID port connected to: 0xFF=None */
54 unsigned char port0port; /* Port num port connected to: 0,1,2, or 0xFF=None */
55 unsigned char port1node; /* Node ID port connected to: 0xFF = None */
56 unsigned char port1port; /* Port num port connected to: 0,1,2, or 0xFF=None */
57 unsigned char port2node; /* Node ID port connected to: 0xFF = None */
58 unsigned char port2port; /* Port num port connected to: 0,1,2, or 0xFF=None */
59 unsigned char chassis_num; /* 1 based Chassis number (1 = boot node) */
60} __attribute__((packed));
61
62struct rio_detail {
63 unsigned char node_id; /* RIO Node ID */
64 unsigned long BBAR; /* Address of 1MB register space */
65 unsigned char type; /* Type of device */
66 unsigned char owner_id; /* For WPEG: Node ID of Cyclone that owns this WPEG*/
67 /* For CYC: Node ID of Twister that owns this CYC */
68 unsigned char port0node; /* Node ID port connected to: 0xFF=None */
69 unsigned char port0port; /* Port num port connected to: 0,1,2, or 0xFF=None */
70 unsigned char port1node; /* Node ID port connected to: 0xFF=None */
71 unsigned char port1port; /* Port num port connected to: 0,1,2, or 0xFF=None */
72 unsigned char first_slot; /* For WPEG: Lowest slot number below this WPEG */
73 /* For CYC: 0 */
74 unsigned char status; /* For WPEG: Bit 0 = 1 : the XAPIC is used */
75 /* = 0 : the XAPIC is not used, ie:*/
76 /* ints fwded to another XAPIC */
77 /* Bits1:7 Reserved */
78 /* For CYC: Bits0:7 Reserved */
79 unsigned char WP_index; /* For WPEG: WPEG instance index - lower ones have */
80 /* lower slot numbers/PCI bus numbers */
81 /* For CYC: No meaning */
82 unsigned char chassis_num; /* 1 based Chassis number */
83 /* For LookOut WPEGs this field indicates the */
84 /* Expansion Chassis #, enumerated from Boot */
85 /* Node WPEG external port, then Boot Node CYC */
86 /* external port, then Next Vigil chassis WPEG */
87 /* external port, etc. */
88 /* Shared Lookouts have only 1 chassis number (the */
89 /* first one assigned) */
90} __attribute__((packed));
91
92
93typedef enum {
94 CompatTwister = 0, /* Compatibility Twister */
95 AltTwister = 1, /* Alternate Twister of internal 8-way */
96 CompatCyclone = 2, /* Compatibility Cyclone */
97 AltCyclone = 3, /* Alternate Cyclone of internal 8-way */
98 CompatWPEG = 4, /* Compatibility WPEG */
99 AltWPEG = 5, /* Second Planar WPEG */
100 LookOutAWPEG = 6, /* LookOut WPEG */
101 LookOutBWPEG = 7, /* LookOut WPEG */
102} node_type;
103
104static inline int is_WPEG(struct rio_detail *rio){
105 return (rio->type == CompatWPEG || rio->type == AltWPEG ||
106 rio->type == LookOutAWPEG || rio->type == LookOutBWPEG);
107}
108
109#endif /* __ASM_SUMMIT_MPPARSE_H */
diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
index 8e626ea33a1a..c22383743f36 100644
--- a/arch/x86/include/asm/system.h
+++ b/arch/x86/include/asm/system.h
@@ -86,27 +86,44 @@ do { \
86 , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \ 86 , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
87 "r12", "r13", "r14", "r15" 87 "r12", "r13", "r14", "r15"
88 88
89#ifdef CONFIG_CC_STACKPROTECTOR
90#define __switch_canary \
91 "movq %P[task_canary](%%rsi),%%r8\n\t" \
92 "movq %%r8,"__percpu_arg([gs_canary])"\n\t"
93#define __switch_canary_oparam \
94 , [gs_canary] "=m" (per_cpu_var(irq_stack_union.stack_canary))
95#define __switch_canary_iparam \
96 , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
97#else /* CC_STACKPROTECTOR */
98#define __switch_canary
99#define __switch_canary_oparam
100#define __switch_canary_iparam
101#endif /* CC_STACKPROTECTOR */
102
89/* Save restore flags to clear handle leaking NT */ 103/* Save restore flags to clear handle leaking NT */
90#define switch_to(prev, next, last) \ 104#define switch_to(prev, next, last) \
91 asm volatile(SAVE_CONTEXT \ 105 asm volatile(SAVE_CONTEXT \
92 "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \ 106 "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
93 "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \ 107 "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
94 "call __switch_to\n\t" \ 108 "call __switch_to\n\t" \
95 ".globl thread_return\n" \ 109 ".globl thread_return\n" \
96 "thread_return:\n\t" \ 110 "thread_return:\n\t" \
97 "movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \ 111 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
112 __switch_canary \
98 "movq %P[thread_info](%%rsi),%%r8\n\t" \ 113 "movq %P[thread_info](%%rsi),%%r8\n\t" \
99 LOCK_PREFIX "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \
100 "movq %%rax,%%rdi\n\t" \ 114 "movq %%rax,%%rdi\n\t" \
101 "jc ret_from_fork\n\t" \ 115 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
116 "jnz ret_from_fork\n\t" \
102 RESTORE_CONTEXT \ 117 RESTORE_CONTEXT \
103 : "=a" (last) \ 118 : "=a" (last) \
119 __switch_canary_oparam \
104 : [next] "S" (next), [prev] "D" (prev), \ 120 : [next] "S" (next), [prev] "D" (prev), \
105 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \ 121 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
106 [ti_flags] "i" (offsetof(struct thread_info, flags)), \ 122 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
107 [tif_fork] "i" (TIF_FORK), \ 123 [_tif_fork] "i" (_TIF_FORK), \
108 [thread_info] "i" (offsetof(struct task_struct, stack)), \ 124 [thread_info] "i" (offsetof(struct task_struct, stack)), \
109 [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \ 125 [current_task] "m" (per_cpu_var(current_task)) \
126 __switch_canary_iparam \
110 : "memory", "cc" __EXTRA_CLOBBER) 127 : "memory", "cc" __EXTRA_CLOBBER)
111#endif 128#endif
112 129
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 98789647baa9..df9d5f78385e 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -40,6 +40,7 @@ struct thread_info {
40 */ 40 */
41 __u8 supervisor_stack[0]; 41 __u8 supervisor_stack[0];
42#endif 42#endif
43 int uaccess_err;
43}; 44};
44 45
45#define INIT_THREAD_INFO(tsk) \ 46#define INIT_THREAD_INFO(tsk) \
@@ -194,25 +195,21 @@ static inline struct thread_info *current_thread_info(void)
194 195
195#else /* X86_32 */ 196#else /* X86_32 */
196 197
197#include <asm/pda.h> 198#include <asm/percpu.h>
199#define KERNEL_STACK_OFFSET (5*8)
198 200
199/* 201/*
200 * macros/functions for gaining access to the thread information structure 202 * macros/functions for gaining access to the thread information structure
201 * preempt_count needs to be 1 initially, until the scheduler is functional. 203 * preempt_count needs to be 1 initially, until the scheduler is functional.
202 */ 204 */
203#ifndef __ASSEMBLY__ 205#ifndef __ASSEMBLY__
204static inline struct thread_info *current_thread_info(void) 206DECLARE_PER_CPU(unsigned long, kernel_stack);
205{
206 struct thread_info *ti;
207 ti = (void *)(read_pda(kernelstack) + PDA_STACKOFFSET - THREAD_SIZE);
208 return ti;
209}
210 207
211/* do not use in interrupt context */ 208static inline struct thread_info *current_thread_info(void)
212static inline struct thread_info *stack_thread_info(void)
213{ 209{
214 struct thread_info *ti; 210 struct thread_info *ti;
215 asm("andq %%rsp,%0; " : "=r" (ti) : "0" (~(THREAD_SIZE - 1))); 211 ti = (void *)(percpu_read(kernel_stack) +
212 KERNEL_STACK_OFFSET - THREAD_SIZE);
216 return ti; 213 return ti;
217} 214}
218 215
@@ -220,8 +217,8 @@ static inline struct thread_info *stack_thread_info(void)
220 217
221/* how to get the thread information struct from ASM */ 218/* how to get the thread information struct from ASM */
222#define GET_THREAD_INFO(reg) \ 219#define GET_THREAD_INFO(reg) \
223 movq %gs:pda_kernelstack,reg ; \ 220 movq PER_CPU_VAR(kernel_stack),reg ; \
224 subq $(THREAD_SIZE-PDA_STACKOFFSET),reg 221 subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
225 222
226#endif 223#endif
227 224
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 0e7bbb549116..d3539f998f88 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -113,7 +113,7 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
113 __flush_tlb(); 113 __flush_tlb();
114} 114}
115 115
116static inline void native_flush_tlb_others(const cpumask_t *cpumask, 116static inline void native_flush_tlb_others(const struct cpumask *cpumask,
117 struct mm_struct *mm, 117 struct mm_struct *mm,
118 unsigned long va) 118 unsigned long va)
119{ 119{
@@ -142,31 +142,28 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
142 flush_tlb_mm(vma->vm_mm); 142 flush_tlb_mm(vma->vm_mm);
143} 143}
144 144
145void native_flush_tlb_others(const cpumask_t *cpumask, struct mm_struct *mm, 145void native_flush_tlb_others(const struct cpumask *cpumask,
146 unsigned long va); 146 struct mm_struct *mm, unsigned long va);
147 147
148#define TLBSTATE_OK 1 148#define TLBSTATE_OK 1
149#define TLBSTATE_LAZY 2 149#define TLBSTATE_LAZY 2
150 150
151#ifdef CONFIG_X86_32
152struct tlb_state { 151struct tlb_state {
153 struct mm_struct *active_mm; 152 struct mm_struct *active_mm;
154 int state; 153 int state;
155 char __cacheline_padding[L1_CACHE_BYTES-8];
156}; 154};
157DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate); 155DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate);
158 156
159void reset_lazy_tlbstate(void);
160#else
161static inline void reset_lazy_tlbstate(void) 157static inline void reset_lazy_tlbstate(void)
162{ 158{
159 percpu_write(cpu_tlbstate.state, 0);
160 percpu_write(cpu_tlbstate.active_mm, &init_mm);
163} 161}
164#endif
165 162
166#endif /* SMP */ 163#endif /* SMP */
167 164
168#ifndef CONFIG_PARAVIRT 165#ifndef CONFIG_PARAVIRT
169#define flush_tlb_others(mask, mm, va) native_flush_tlb_others(&mask, mm, va) 166#define flush_tlb_others(mask, mm, va) native_flush_tlb_others(mask, mm, va)
170#endif 167#endif
171 168
172static inline void flush_tlb_kernel_range(unsigned long start, 169static inline void flush_tlb_kernel_range(unsigned long start,
@@ -175,4 +172,6 @@ static inline void flush_tlb_kernel_range(unsigned long start,
175 flush_tlb_all(); 172 flush_tlb_all();
176} 173}
177 174
175extern void zap_low_mappings(void);
176
178#endif /* _ASM_X86_TLBFLUSH_H */ 177#endif /* _ASM_X86_TLBFLUSH_H */
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index 4e2f2e0aab27..77cfb2cfb386 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -74,6 +74,8 @@ static inline const struct cpumask *cpumask_of_node(int node)
74 return &node_to_cpumask_map[node]; 74 return &node_to_cpumask_map[node];
75} 75}
76 76
77static inline void setup_node_to_cpumask_map(void) { }
78
77#else /* CONFIG_X86_64 */ 79#else /* CONFIG_X86_64 */
78 80
79/* Mappings between node number and cpus on that node. */ 81/* Mappings between node number and cpus on that node. */
@@ -83,7 +85,8 @@ extern cpumask_t *node_to_cpumask_map;
83DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map); 85DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map);
84 86
85/* Returns the number of the current Node. */ 87/* Returns the number of the current Node. */
86#define numa_node_id() read_pda(nodenumber) 88DECLARE_PER_CPU(int, node_number);
89#define numa_node_id() percpu_read(node_number)
87 90
88#ifdef CONFIG_DEBUG_PER_CPU_MAPS 91#ifdef CONFIG_DEBUG_PER_CPU_MAPS
89extern int cpu_to_node(int cpu); 92extern int cpu_to_node(int cpu);
@@ -102,10 +105,7 @@ static inline int cpu_to_node(int cpu)
102/* Same function but used if called before per_cpu areas are setup */ 105/* Same function but used if called before per_cpu areas are setup */
103static inline int early_cpu_to_node(int cpu) 106static inline int early_cpu_to_node(int cpu)
104{ 107{
105 if (early_per_cpu_ptr(x86_cpu_to_node_map)) 108 return early_per_cpu(x86_cpu_to_node_map, cpu);
106 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
107
108 return per_cpu(x86_cpu_to_node_map, cpu);
109} 109}
110 110
111/* Returns a pointer to the cpumask of CPUs on Node 'node'. */ 111/* Returns a pointer to the cpumask of CPUs on Node 'node'. */
@@ -122,6 +122,8 @@ static inline cpumask_t node_to_cpumask(int node)
122 122
123#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ 123#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
124 124
125extern void setup_node_to_cpumask_map(void);
126
125/* 127/*
126 * Replace default node_to_cpumask_ptr with optimized version 128 * Replace default node_to_cpumask_ptr with optimized version
127 * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)" 129 * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
@@ -192,9 +194,20 @@ extern int __node_distance(int, int);
192 194
193#else /* !CONFIG_NUMA */ 195#else /* !CONFIG_NUMA */
194 196
195#define numa_node_id() 0 197static inline int numa_node_id(void)
196#define cpu_to_node(cpu) 0 198{
197#define early_cpu_to_node(cpu) 0 199 return 0;
200}
201
202static inline int cpu_to_node(int cpu)
203{
204 return 0;
205}
206
207static inline int early_cpu_to_node(int cpu)
208{
209 return 0;
210}
198 211
199static inline const cpumask_t *cpumask_of_node(int node) 212static inline const cpumask_t *cpumask_of_node(int node)
200{ 213{
@@ -209,6 +222,8 @@ static inline int node_to_first_cpu(int node)
209 return first_cpu(cpu_online_map); 222 return first_cpu(cpu_online_map);
210} 223}
211 224
225static inline void setup_node_to_cpumask_map(void) { }
226
212/* 227/*
213 * Replace default node_to_cpumask_ptr with optimized version 228 * Replace default node_to_cpumask_ptr with optimized version
214 * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)" 229 * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
diff --git a/arch/x86/include/asm/trampoline.h b/arch/x86/include/asm/trampoline.h
index 780ba0ab94f9..90f06c25221d 100644
--- a/arch/x86/include/asm/trampoline.h
+++ b/arch/x86/include/asm/trampoline.h
@@ -13,6 +13,7 @@ extern unsigned char *trampoline_base;
13 13
14extern unsigned long init_rsp; 14extern unsigned long init_rsp;
15extern unsigned long initial_code; 15extern unsigned long initial_code;
16extern unsigned long initial_gs;
16 17
17#define TRAMPOLINE_SIZE roundup(trampoline_end - trampoline_data, PAGE_SIZE) 18#define TRAMPOLINE_SIZE roundup(trampoline_end - trampoline_data, PAGE_SIZE)
18#define TRAMPOLINE_BASE 0x6000 19#define TRAMPOLINE_BASE 0x6000
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 4340055b7559..0ec6de4bcb0b 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -121,7 +121,7 @@ extern int __get_user_bad(void);
121 121
122#define __get_user_x(size, ret, x, ptr) \ 122#define __get_user_x(size, ret, x, ptr) \
123 asm volatile("call __get_user_" #size \ 123 asm volatile("call __get_user_" #size \
124 : "=a" (ret),"=d" (x) \ 124 : "=a" (ret), "=d" (x) \
125 : "0" (ptr)) \ 125 : "0" (ptr)) \
126 126
127/* Careful: we have to cast the result to the type of the pointer 127/* Careful: we have to cast the result to the type of the pointer
@@ -181,12 +181,12 @@ extern int __get_user_bad(void);
181 181
182#define __put_user_x(size, x, ptr, __ret_pu) \ 182#define __put_user_x(size, x, ptr, __ret_pu) \
183 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \ 183 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
184 :"0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") 184 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
185 185
186 186
187 187
188#ifdef CONFIG_X86_32 188#ifdef CONFIG_X86_32
189#define __put_user_u64(x, addr, err) \ 189#define __put_user_asm_u64(x, addr, err) \
190 asm volatile("1: movl %%eax,0(%2)\n" \ 190 asm volatile("1: movl %%eax,0(%2)\n" \
191 "2: movl %%edx,4(%2)\n" \ 191 "2: movl %%edx,4(%2)\n" \
192 "3:\n" \ 192 "3:\n" \
@@ -199,12 +199,22 @@ extern int __get_user_bad(void);
199 : "=r" (err) \ 199 : "=r" (err) \
200 : "A" (x), "r" (addr), "i" (-EFAULT), "0" (err)) 200 : "A" (x), "r" (addr), "i" (-EFAULT), "0" (err))
201 201
202#define __put_user_asm_ex_u64(x, addr) \
203 asm volatile("1: movl %%eax,0(%1)\n" \
204 "2: movl %%edx,4(%1)\n" \
205 "3:\n" \
206 _ASM_EXTABLE(1b, 2b - 1b) \
207 _ASM_EXTABLE(2b, 3b - 2b) \
208 : : "A" (x), "r" (addr))
209
202#define __put_user_x8(x, ptr, __ret_pu) \ 210#define __put_user_x8(x, ptr, __ret_pu) \
203 asm volatile("call __put_user_8" : "=a" (__ret_pu) \ 211 asm volatile("call __put_user_8" : "=a" (__ret_pu) \
204 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") 212 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
205#else 213#else
206#define __put_user_u64(x, ptr, retval) \ 214#define __put_user_asm_u64(x, ptr, retval) \
207 __put_user_asm(x, ptr, retval, "q", "", "Zr", -EFAULT) 215 __put_user_asm(x, ptr, retval, "q", "", "Zr", -EFAULT)
216#define __put_user_asm_ex_u64(x, addr) \
217 __put_user_asm_ex(x, addr, "q", "", "Zr")
208#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu) 218#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
209#endif 219#endif
210 220
@@ -276,10 +286,31 @@ do { \
276 __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \ 286 __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \
277 break; \ 287 break; \
278 case 4: \ 288 case 4: \
279 __put_user_asm(x, ptr, retval, "l", "k", "ir", errret);\ 289 __put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \
280 break; \ 290 break; \
281 case 8: \ 291 case 8: \
282 __put_user_u64((__typeof__(*ptr))(x), ptr, retval); \ 292 __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval); \
293 break; \
294 default: \
295 __put_user_bad(); \
296 } \
297} while (0)
298
299#define __put_user_size_ex(x, ptr, size) \
300do { \
301 __chk_user_ptr(ptr); \
302 switch (size) { \
303 case 1: \
304 __put_user_asm_ex(x, ptr, "b", "b", "iq"); \
305 break; \
306 case 2: \
307 __put_user_asm_ex(x, ptr, "w", "w", "ir"); \
308 break; \
309 case 4: \
310 __put_user_asm_ex(x, ptr, "l", "k", "ir"); \
311 break; \
312 case 8: \
313 __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr); \
283 break; \ 314 break; \
284 default: \ 315 default: \
285 __put_user_bad(); \ 316 __put_user_bad(); \
@@ -311,9 +342,12 @@ do { \
311 342
312#ifdef CONFIG_X86_32 343#ifdef CONFIG_X86_32
313#define __get_user_asm_u64(x, ptr, retval, errret) (x) = __get_user_bad() 344#define __get_user_asm_u64(x, ptr, retval, errret) (x) = __get_user_bad()
345#define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad()
314#else 346#else
315#define __get_user_asm_u64(x, ptr, retval, errret) \ 347#define __get_user_asm_u64(x, ptr, retval, errret) \
316 __get_user_asm(x, ptr, retval, "q", "", "=r", errret) 348 __get_user_asm(x, ptr, retval, "q", "", "=r", errret)
349#define __get_user_asm_ex_u64(x, ptr) \
350 __get_user_asm_ex(x, ptr, "q", "", "=r")
317#endif 351#endif
318 352
319#define __get_user_size(x, ptr, size, retval, errret) \ 353#define __get_user_size(x, ptr, size, retval, errret) \
@@ -350,6 +384,33 @@ do { \
350 : "=r" (err), ltype(x) \ 384 : "=r" (err), ltype(x) \
351 : "m" (__m(addr)), "i" (errret), "0" (err)) 385 : "m" (__m(addr)), "i" (errret), "0" (err))
352 386
387#define __get_user_size_ex(x, ptr, size) \
388do { \
389 __chk_user_ptr(ptr); \
390 switch (size) { \
391 case 1: \
392 __get_user_asm_ex(x, ptr, "b", "b", "=q"); \
393 break; \
394 case 2: \
395 __get_user_asm_ex(x, ptr, "w", "w", "=r"); \
396 break; \
397 case 4: \
398 __get_user_asm_ex(x, ptr, "l", "k", "=r"); \
399 break; \
400 case 8: \
401 __get_user_asm_ex_u64(x, ptr); \
402 break; \
403 default: \
404 (x) = __get_user_bad(); \
405 } \
406} while (0)
407
408#define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
409 asm volatile("1: mov"itype" %1,%"rtype"0\n" \
410 "2:\n" \
411 _ASM_EXTABLE(1b, 2b - 1b) \
412 : ltype(x) : "m" (__m(addr)))
413
353#define __put_user_nocheck(x, ptr, size) \ 414#define __put_user_nocheck(x, ptr, size) \
354({ \ 415({ \
355 int __pu_err; \ 416 int __pu_err; \
@@ -385,6 +446,26 @@ struct __large_struct { unsigned long buf[100]; };
385 _ASM_EXTABLE(1b, 3b) \ 446 _ASM_EXTABLE(1b, 3b) \
386 : "=r"(err) \ 447 : "=r"(err) \
387 : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err)) 448 : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
449
450#define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
451 asm volatile("1: mov"itype" %"rtype"0,%1\n" \
452 "2:\n" \
453 _ASM_EXTABLE(1b, 2b - 1b) \
454 : : ltype(x), "m" (__m(addr)))
455
456/*
457 * uaccess_try and catch
458 */
459#define uaccess_try do { \
460 int prev_err = current_thread_info()->uaccess_err; \
461 current_thread_info()->uaccess_err = 0; \
462 barrier();
463
464#define uaccess_catch(err) \
465 (err) |= current_thread_info()->uaccess_err; \
466 current_thread_info()->uaccess_err = prev_err; \
467} while (0)
468
388/** 469/**
389 * __get_user: - Get a simple variable from user space, with less checking. 470 * __get_user: - Get a simple variable from user space, with less checking.
390 * @x: Variable to store result. 471 * @x: Variable to store result.
@@ -408,6 +489,7 @@ struct __large_struct { unsigned long buf[100]; };
408 489
409#define __get_user(x, ptr) \ 490#define __get_user(x, ptr) \
410 __get_user_nocheck((x), (ptr), sizeof(*(ptr))) 491 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
492
411/** 493/**
412 * __put_user: - Write a simple value into user space, with less checking. 494 * __put_user: - Write a simple value into user space, with less checking.
413 * @x: Value to copy to user space. 495 * @x: Value to copy to user space.
@@ -435,6 +517,27 @@ struct __large_struct { unsigned long buf[100]; };
435#define __put_user_unaligned __put_user 517#define __put_user_unaligned __put_user
436 518
437/* 519/*
520 * {get|put}_user_try and catch
521 *
522 * get_user_try {
523 * get_user_ex(...);
524 * } get_user_catch(err)
525 */
526#define get_user_try uaccess_try
527#define get_user_catch(err) uaccess_catch(err)
528#define put_user_try uaccess_try
529#define put_user_catch(err) uaccess_catch(err)
530
531#define get_user_ex(x, ptr) do { \
532 unsigned long __gue_val; \
533 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
534 (x) = (__force __typeof__(*(ptr)))__gue_val; \
535} while (0)
536
537#define put_user_ex(x, ptr) \
538 __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
539
540/*
438 * movsl can be slow when source and dest are not both 8-byte aligned 541 * movsl can be slow when source and dest are not both 8-byte aligned
439 */ 542 */
440#ifdef CONFIG_X86_INTEL_USERCOPY 543#ifdef CONFIG_X86_INTEL_USERCOPY
diff --git a/arch/x86/include/asm/uv/uv.h b/arch/x86/include/asm/uv/uv.h
new file mode 100644
index 000000000000..8ac1d7e312f3
--- /dev/null
+++ b/arch/x86/include/asm/uv/uv.h
@@ -0,0 +1,33 @@
1#ifndef _ASM_X86_UV_UV_H
2#define _ASM_X86_UV_UV_H
3
4enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
5
6#ifdef CONFIG_X86_UV
7
8extern enum uv_system_type get_uv_system_type(void);
9extern int is_uv_system(void);
10extern void uv_cpu_init(void);
11extern void uv_system_init(void);
12extern int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip);
13extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
14 struct mm_struct *mm,
15 unsigned long va,
16 unsigned int cpu);
17
18#else /* X86_UV */
19
20static inline enum uv_system_type get_uv_system_type(void) { return UV_NONE; }
21static inline int is_uv_system(void) { return 0; }
22static inline void uv_cpu_init(void) { }
23static inline void uv_system_init(void) { }
24static inline int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip)
25{ return 1; }
26static inline const struct cpumask *
27uv_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm,
28 unsigned long va, unsigned int cpu)
29{ return cpumask; }
30
31#endif /* X86_UV */
32
33#endif /* _ASM_X86_UV_UV_H */
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
index 50423c7b56b2..9b0e61bf7a88 100644
--- a/arch/x86/include/asm/uv/uv_bau.h
+++ b/arch/x86/include/asm/uv/uv_bau.h
@@ -325,7 +325,6 @@ static inline void bau_cpubits_clear(struct bau_local_cpumask *dstp, int nbits)
325#define cpubit_isset(cpu, bau_local_cpumask) \ 325#define cpubit_isset(cpu, bau_local_cpumask) \
326 test_bit((cpu), (bau_local_cpumask).bits) 326 test_bit((cpu), (bau_local_cpumask).bits)
327 327
328extern int uv_flush_tlb_others(cpumask_t *, struct mm_struct *, unsigned long);
329extern void uv_bau_message_intr1(void); 328extern void uv_bau_message_intr1(void);
330extern void uv_bau_timeout_intr1(void); 329extern void uv_bau_timeout_intr1(void);
331 330
diff --git a/arch/x86/include/asm/voyager.h b/arch/x86/include/asm/voyager.h
index b3e647307625..c1635d43616f 100644
--- a/arch/x86/include/asm/voyager.h
+++ b/arch/x86/include/asm/voyager.h
@@ -527,3 +527,45 @@ extern void voyager_smp_intr_init(void);
527#define VOYAGER_PSI_SUBREAD 2 527#define VOYAGER_PSI_SUBREAD 2
528#define VOYAGER_PSI_SUBWRITE 3 528#define VOYAGER_PSI_SUBWRITE 3
529extern void voyager_cat_psi(__u8, __u16, __u8 *); 529extern void voyager_cat_psi(__u8, __u16, __u8 *);
530
531/* These define the CPIs we use in linux */
532#define VIC_CPI_LEVEL0 0
533#define VIC_CPI_LEVEL1 1
534/* now the fake CPIs */
535#define VIC_TIMER_CPI 2
536#define VIC_INVALIDATE_CPI 3
537#define VIC_RESCHEDULE_CPI 4
538#define VIC_ENABLE_IRQ_CPI 5
539#define VIC_CALL_FUNCTION_CPI 6
540#define VIC_CALL_FUNCTION_SINGLE_CPI 7
541
542/* Now the QIC CPIs: Since we don't need the two initial levels,
543 * these are 2 less than the VIC CPIs */
544#define QIC_CPI_OFFSET 1
545#define QIC_TIMER_CPI (VIC_TIMER_CPI - QIC_CPI_OFFSET)
546#define QIC_INVALIDATE_CPI (VIC_INVALIDATE_CPI - QIC_CPI_OFFSET)
547#define QIC_RESCHEDULE_CPI (VIC_RESCHEDULE_CPI - QIC_CPI_OFFSET)
548#define QIC_ENABLE_IRQ_CPI (VIC_ENABLE_IRQ_CPI - QIC_CPI_OFFSET)
549#define QIC_CALL_FUNCTION_CPI (VIC_CALL_FUNCTION_CPI - QIC_CPI_OFFSET)
550#define QIC_CALL_FUNCTION_SINGLE_CPI (VIC_CALL_FUNCTION_SINGLE_CPI - QIC_CPI_OFFSET)
551
552#define VIC_START_FAKE_CPI VIC_TIMER_CPI
553#define VIC_END_FAKE_CPI VIC_CALL_FUNCTION_SINGLE_CPI
554
555/* this is the SYS_INT CPI. */
556#define VIC_SYS_INT 8
557#define VIC_CMN_INT 15
558
559/* This is the boot CPI for alternate processors. It gets overwritten
560 * by the above once the system has activated all available processors */
561#define VIC_CPU_BOOT_CPI VIC_CPI_LEVEL0
562#define VIC_CPU_BOOT_ERRATA_CPI (VIC_CPI_LEVEL0 + 8)
563
564extern asmlinkage void vic_cpi_interrupt(void);
565extern asmlinkage void vic_sys_interrupt(void);
566extern asmlinkage void vic_cmn_interrupt(void);
567extern asmlinkage void qic_timer_interrupt(void);
568extern asmlinkage void qic_invalidate_interrupt(void);
569extern asmlinkage void qic_reschedule_interrupt(void);
570extern asmlinkage void qic_enable_irq_interrupt(void);
571extern asmlinkage void qic_call_function_interrupt(void);
diff --git a/arch/x86/include/asm/xen/events.h b/arch/x86/include/asm/xen/events.h
index 19144184983a..1df35417c412 100644
--- a/arch/x86/include/asm/xen/events.h
+++ b/arch/x86/include/asm/xen/events.h
@@ -15,10 +15,4 @@ static inline int xen_irqs_disabled(struct pt_regs *regs)
15 return raw_irqs_disabled_flags(regs->flags); 15 return raw_irqs_disabled_flags(regs->flags);
16} 16}
17 17
18static inline void xen_do_IRQ(int irq, struct pt_regs *regs)
19{
20 regs->orig_ax = ~irq;
21 do_IRQ(regs);
22}
23
24#endif /* _ASM_X86_XEN_EVENTS_H */ 18#endif /* _ASM_X86_XEN_EVENTS_H */
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index d364df03c1d6..24f357e7557a 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -23,13 +23,14 @@ nostackp := $(call cc-option, -fno-stack-protector)
23CFLAGS_vsyscall_64.o := $(PROFILING) -g0 $(nostackp) 23CFLAGS_vsyscall_64.o := $(PROFILING) -g0 $(nostackp)
24CFLAGS_hpet.o := $(nostackp) 24CFLAGS_hpet.o := $(nostackp)
25CFLAGS_tsc.o := $(nostackp) 25CFLAGS_tsc.o := $(nostackp)
26CFLAGS_paravirt.o := $(nostackp)
26 27
27obj-y := process_$(BITS).o signal.o entry_$(BITS).o 28obj-y := process_$(BITS).o signal.o entry_$(BITS).o
28obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o 29obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
29obj-y += time_$(BITS).o ioport.o ldt.o dumpstack.o 30obj-y += time_$(BITS).o ioport.o ldt.o dumpstack.o
30obj-y += setup.o i8259.o irqinit_$(BITS).o setup_percpu.o 31obj-y += setup.o i8259.o irqinit_$(BITS).o
31obj-$(CONFIG_X86_VISWS) += visws_quirks.o 32obj-$(CONFIG_X86_VISWS) += visws_quirks.o
32obj-$(CONFIG_X86_32) += probe_roms_32.o 33obj-$(CONFIG_X86_32) += probe_32.o probe_roms_32.o
33obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o 34obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
34obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o 35obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
35obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o 36obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o
@@ -49,20 +50,20 @@ obj-y += step.o
49obj-$(CONFIG_STACKTRACE) += stacktrace.o 50obj-$(CONFIG_STACKTRACE) += stacktrace.o
50obj-y += cpu/ 51obj-y += cpu/
51obj-y += acpi/ 52obj-y += acpi/
52obj-$(CONFIG_X86_BIOS_REBOOT) += reboot.o 53obj-y += reboot.o
53obj-$(CONFIG_MCA) += mca_32.o 54obj-$(CONFIG_MCA) += mca_32.o
54obj-$(CONFIG_X86_MSR) += msr.o 55obj-$(CONFIG_X86_MSR) += msr.o
55obj-$(CONFIG_X86_CPUID) += cpuid.o 56obj-$(CONFIG_X86_CPUID) += cpuid.o
56obj-$(CONFIG_PCI) += early-quirks.o 57obj-$(CONFIG_PCI) += early-quirks.o
57apm-y := apm_32.o 58apm-y := apm_32.o
58obj-$(CONFIG_APM) += apm.o 59obj-$(CONFIG_APM) += apm.o
59obj-$(CONFIG_X86_SMP) += smp.o 60obj-$(CONFIG_SMP) += smp.o
60obj-$(CONFIG_X86_SMP) += smpboot.o tsc_sync.o ipi.o tlb_$(BITS).o 61obj-$(CONFIG_SMP) += smpboot.o tsc_sync.o ipi.o
61obj-$(CONFIG_X86_32_SMP) += smpcommon.o 62obj-$(CONFIG_SMP) += setup_percpu.o
62obj-$(CONFIG_X86_64_SMP) += tsc_sync.o smpcommon.o 63obj-$(CONFIG_X86_64_SMP) += tsc_sync.o
63obj-$(CONFIG_X86_TRAMPOLINE) += trampoline_$(BITS).o 64obj-$(CONFIG_X86_TRAMPOLINE) += trampoline_$(BITS).o
64obj-$(CONFIG_X86_MPPARSE) += mpparse.o 65obj-$(CONFIG_X86_MPPARSE) += mpparse.o
65obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o 66obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o ipi.o
66obj-$(CONFIG_X86_IO_APIC) += io_apic.o 67obj-$(CONFIG_X86_IO_APIC) += io_apic.o
67obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o 68obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o
68obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o 69obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
@@ -70,9 +71,10 @@ obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
70obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o 71obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o
71obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o 72obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o
72obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o 73obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o
74obj-$(CONFIG_X86_BIGSMP) += bigsmp_32.o
73obj-$(CONFIG_X86_NUMAQ) += numaq_32.o 75obj-$(CONFIG_X86_NUMAQ) += numaq_32.o
74obj-$(CONFIG_X86_ES7000) += es7000_32.o 76obj-$(CONFIG_X86_ES7000) += es7000_32.o
75obj-$(CONFIG_X86_SUMMIT_NUMA) += summit_32.o 77obj-$(CONFIG_X86_SUMMIT) += summit_32.o
76obj-y += vsmp_64.o 78obj-y += vsmp_64.o
77obj-$(CONFIG_KPROBES) += kprobes.o 79obj-$(CONFIG_KPROBES) += kprobes.o
78obj-$(CONFIG_MODULES) += module_$(BITS).o 80obj-$(CONFIG_MODULES) += module_$(BITS).o
@@ -114,10 +116,11 @@ obj-$(CONFIG_SWIOTLB) += pci-swiotlb_64.o # NB rename without _64
114### 116###
115# 64 bit specific files 117# 64 bit specific files
116ifeq ($(CONFIG_X86_64),y) 118ifeq ($(CONFIG_X86_64),y)
117 obj-y += genapic_64.o genapic_flat_64.o genx2apic_uv_x.o tlb_uv.o 119 obj-y += genapic_64.o genapic_flat_64.o
118 obj-y += bios_uv.o uv_irq.o uv_sysfs.o
119 obj-y += genx2apic_cluster.o 120 obj-y += genx2apic_cluster.o
120 obj-y += genx2apic_phys.o 121 obj-y += genx2apic_phys.o
122 obj-$(CONFIG_X86_UV) += genx2apic_uv_x.o tlb_uv.o
123 obj-$(CONFIG_X86_UV) += bios_uv.o uv_irq.o uv_sysfs.o
121 obj-$(CONFIG_X86_PM_TIMER) += pmtimer_64.o 124 obj-$(CONFIG_X86_PM_TIMER) += pmtimer_64.o
122 obj-$(CONFIG_AUDIT) += audit_64.o 125 obj-$(CONFIG_AUDIT) += audit_64.o
123 126
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 7678f10c4568..bba162c81d5b 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -42,10 +42,6 @@
42#include <asm/mpspec.h> 42#include <asm/mpspec.h>
43#include <asm/smp.h> 43#include <asm/smp.h>
44 44
45#ifdef CONFIG_X86_LOCAL_APIC
46# include <mach_apic.h>
47#endif
48
49static int __initdata acpi_force = 0; 45static int __initdata acpi_force = 0;
50u32 acpi_rsdt_forced; 46u32 acpi_rsdt_forced;
51#ifdef CONFIG_ACPI 47#ifdef CONFIG_ACPI
@@ -56,16 +52,7 @@ int acpi_disabled = 1;
56EXPORT_SYMBOL(acpi_disabled); 52EXPORT_SYMBOL(acpi_disabled);
57 53
58#ifdef CONFIG_X86_64 54#ifdef CONFIG_X86_64
59 55# include <asm/proto.h>
60#include <asm/proto.h>
61
62#else /* X86 */
63
64#ifdef CONFIG_X86_LOCAL_APIC
65#include <mach_apic.h>
66#include <mach_mpparse.h>
67#endif /* CONFIG_X86_LOCAL_APIC */
68
69#endif /* X86 */ 56#endif /* X86 */
70 57
71#define BAD_MADT_ENTRY(entry, end) ( \ 58#define BAD_MADT_ENTRY(entry, end) ( \
@@ -239,7 +226,8 @@ static int __init acpi_parse_madt(struct acpi_table_header *table)
239 madt->address); 226 madt->address);
240 } 227 }
241 228
242 acpi_madt_oem_check(madt->header.oem_id, madt->header.oem_table_id); 229 default_acpi_madt_oem_check(madt->header.oem_id,
230 madt->header.oem_table_id);
243 231
244 return 0; 232 return 0;
245} 233}
@@ -884,7 +872,7 @@ static struct {
884 DECLARE_BITMAP(pin_programmed, MP_MAX_IOAPIC_PIN + 1); 872 DECLARE_BITMAP(pin_programmed, MP_MAX_IOAPIC_PIN + 1);
885} mp_ioapic_routing[MAX_IO_APICS]; 873} mp_ioapic_routing[MAX_IO_APICS];
886 874
887static int mp_find_ioapic(int gsi) 875int mp_find_ioapic(int gsi)
888{ 876{
889 int i = 0; 877 int i = 0;
890 878
@@ -899,6 +887,16 @@ static int mp_find_ioapic(int gsi)
899 return -1; 887 return -1;
900} 888}
901 889
890int mp_find_ioapic_pin(int ioapic, int gsi)
891{
892 if (WARN_ON(ioapic == -1))
893 return -1;
894 if (WARN_ON(gsi > mp_ioapic_routing[ioapic].gsi_end))
895 return -1;
896
897 return gsi - mp_ioapic_routing[ioapic].gsi_base;
898}
899
902static u8 __init uniq_ioapic_id(u8 id) 900static u8 __init uniq_ioapic_id(u8 id)
903{ 901{
904#ifdef CONFIG_X86_32 902#ifdef CONFIG_X86_32
@@ -912,8 +910,8 @@ static u8 __init uniq_ioapic_id(u8 id)
912 DECLARE_BITMAP(used, 256); 910 DECLARE_BITMAP(used, 256);
913 bitmap_zero(used, 256); 911 bitmap_zero(used, 256);
914 for (i = 0; i < nr_ioapics; i++) { 912 for (i = 0; i < nr_ioapics; i++) {
915 struct mp_config_ioapic *ia = &mp_ioapics[i]; 913 struct mpc_ioapic *ia = &mp_ioapics[i];
916 __set_bit(ia->mp_apicid, used); 914 __set_bit(ia->apicid, used);
917 } 915 }
918 if (!test_bit(id, used)) 916 if (!test_bit(id, used))
919 return id; 917 return id;
@@ -945,29 +943,29 @@ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base)
945 943
946 idx = nr_ioapics; 944 idx = nr_ioapics;
947 945
948 mp_ioapics[idx].mp_type = MP_IOAPIC; 946 mp_ioapics[idx].type = MP_IOAPIC;
949 mp_ioapics[idx].mp_flags = MPC_APIC_USABLE; 947 mp_ioapics[idx].flags = MPC_APIC_USABLE;
950 mp_ioapics[idx].mp_apicaddr = address; 948 mp_ioapics[idx].apicaddr = address;
951 949
952 set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); 950 set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
953 mp_ioapics[idx].mp_apicid = uniq_ioapic_id(id); 951 mp_ioapics[idx].apicid = uniq_ioapic_id(id);
954#ifdef CONFIG_X86_32 952#ifdef CONFIG_X86_32
955 mp_ioapics[idx].mp_apicver = io_apic_get_version(idx); 953 mp_ioapics[idx].apicver = io_apic_get_version(idx);
956#else 954#else
957 mp_ioapics[idx].mp_apicver = 0; 955 mp_ioapics[idx].apicver = 0;
958#endif 956#endif
959 /* 957 /*
960 * Build basic GSI lookup table to facilitate gsi->io_apic lookups 958 * Build basic GSI lookup table to facilitate gsi->io_apic lookups
961 * and to prevent reprogramming of IOAPIC pins (PCI GSIs). 959 * and to prevent reprogramming of IOAPIC pins (PCI GSIs).
962 */ 960 */
963 mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mp_apicid; 961 mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].apicid;
964 mp_ioapic_routing[idx].gsi_base = gsi_base; 962 mp_ioapic_routing[idx].gsi_base = gsi_base;
965 mp_ioapic_routing[idx].gsi_end = gsi_base + 963 mp_ioapic_routing[idx].gsi_end = gsi_base +
966 io_apic_get_redir_entries(idx); 964 io_apic_get_redir_entries(idx);
967 965
968 printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%lx, " 966 printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, "
969 "GSI %d-%d\n", idx, mp_ioapics[idx].mp_apicid, 967 "GSI %d-%d\n", idx, mp_ioapics[idx].apicid,
970 mp_ioapics[idx].mp_apicver, mp_ioapics[idx].mp_apicaddr, 968 mp_ioapics[idx].apicver, mp_ioapics[idx].apicaddr,
971 mp_ioapic_routing[idx].gsi_base, mp_ioapic_routing[idx].gsi_end); 969 mp_ioapic_routing[idx].gsi_base, mp_ioapic_routing[idx].gsi_end);
972 970
973 nr_ioapics++; 971 nr_ioapics++;
@@ -996,19 +994,19 @@ int __init acpi_probe_gsi(void)
996 return max_gsi + 1; 994 return max_gsi + 1;
997} 995}
998 996
999static void assign_to_mp_irq(struct mp_config_intsrc *m, 997static void assign_to_mp_irq(struct mpc_intsrc *m,
1000 struct mp_config_intsrc *mp_irq) 998 struct mpc_intsrc *mp_irq)
1001{ 999{
1002 memcpy(mp_irq, m, sizeof(struct mp_config_intsrc)); 1000 memcpy(mp_irq, m, sizeof(struct mpc_intsrc));
1003} 1001}
1004 1002
1005static int mp_irq_cmp(struct mp_config_intsrc *mp_irq, 1003static int mp_irq_cmp(struct mpc_intsrc *mp_irq,
1006 struct mp_config_intsrc *m) 1004 struct mpc_intsrc *m)
1007{ 1005{
1008 return memcmp(mp_irq, m, sizeof(struct mp_config_intsrc)); 1006 return memcmp(mp_irq, m, sizeof(struct mpc_intsrc));
1009} 1007}
1010 1008
1011static void save_mp_irq(struct mp_config_intsrc *m) 1009static void save_mp_irq(struct mpc_intsrc *m)
1012{ 1010{
1013 int i; 1011 int i;
1014 1012
@@ -1026,7 +1024,7 @@ void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi)
1026{ 1024{
1027 int ioapic; 1025 int ioapic;
1028 int pin; 1026 int pin;
1029 struct mp_config_intsrc mp_irq; 1027 struct mpc_intsrc mp_irq;
1030 1028
1031 /* 1029 /*
1032 * Convert 'gsi' to 'ioapic.pin'. 1030 * Convert 'gsi' to 'ioapic.pin'.
@@ -1034,7 +1032,7 @@ void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi)
1034 ioapic = mp_find_ioapic(gsi); 1032 ioapic = mp_find_ioapic(gsi);
1035 if (ioapic < 0) 1033 if (ioapic < 0)
1036 return; 1034 return;
1037 pin = gsi - mp_ioapic_routing[ioapic].gsi_base; 1035 pin = mp_find_ioapic_pin(ioapic, gsi);
1038 1036
1039 /* 1037 /*
1040 * TBD: This check is for faulty timer entries, where the override 1038 * TBD: This check is for faulty timer entries, where the override
@@ -1044,13 +1042,13 @@ void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi)
1044 if ((bus_irq == 0) && (trigger == 3)) 1042 if ((bus_irq == 0) && (trigger == 3))
1045 trigger = 1; 1043 trigger = 1;
1046 1044
1047 mp_irq.mp_type = MP_INTSRC; 1045 mp_irq.type = MP_INTSRC;
1048 mp_irq.mp_irqtype = mp_INT; 1046 mp_irq.irqtype = mp_INT;
1049 mp_irq.mp_irqflag = (trigger << 2) | polarity; 1047 mp_irq.irqflag = (trigger << 2) | polarity;
1050 mp_irq.mp_srcbus = MP_ISA_BUS; 1048 mp_irq.srcbus = MP_ISA_BUS;
1051 mp_irq.mp_srcbusirq = bus_irq; /* IRQ */ 1049 mp_irq.srcbusirq = bus_irq; /* IRQ */
1052 mp_irq.mp_dstapic = mp_ioapics[ioapic].mp_apicid; /* APIC ID */ 1050 mp_irq.dstapic = mp_ioapics[ioapic].apicid; /* APIC ID */
1053 mp_irq.mp_dstirq = pin; /* INTIN# */ 1051 mp_irq.dstirq = pin; /* INTIN# */
1054 1052
1055 save_mp_irq(&mp_irq); 1053 save_mp_irq(&mp_irq);
1056} 1054}
@@ -1060,7 +1058,7 @@ void __init mp_config_acpi_legacy_irqs(void)
1060 int i; 1058 int i;
1061 int ioapic; 1059 int ioapic;
1062 unsigned int dstapic; 1060 unsigned int dstapic;
1063 struct mp_config_intsrc mp_irq; 1061 struct mpc_intsrc mp_irq;
1064 1062
1065#if defined (CONFIG_MCA) || defined (CONFIG_EISA) 1063#if defined (CONFIG_MCA) || defined (CONFIG_EISA)
1066 /* 1064 /*
@@ -1085,7 +1083,7 @@ void __init mp_config_acpi_legacy_irqs(void)
1085 ioapic = mp_find_ioapic(0); 1083 ioapic = mp_find_ioapic(0);
1086 if (ioapic < 0) 1084 if (ioapic < 0)
1087 return; 1085 return;
1088 dstapic = mp_ioapics[ioapic].mp_apicid; 1086 dstapic = mp_ioapics[ioapic].apicid;
1089 1087
1090 /* 1088 /*
1091 * Use the default configuration for the IRQs 0-15. Unless 1089 * Use the default configuration for the IRQs 0-15. Unless
@@ -1095,16 +1093,14 @@ void __init mp_config_acpi_legacy_irqs(void)
1095 int idx; 1093 int idx;
1096 1094
1097 for (idx = 0; idx < mp_irq_entries; idx++) { 1095 for (idx = 0; idx < mp_irq_entries; idx++) {
1098 struct mp_config_intsrc *irq = mp_irqs + idx; 1096 struct mpc_intsrc *irq = mp_irqs + idx;
1099 1097
1100 /* Do we already have a mapping for this ISA IRQ? */ 1098 /* Do we already have a mapping for this ISA IRQ? */
1101 if (irq->mp_srcbus == MP_ISA_BUS 1099 if (irq->srcbus == MP_ISA_BUS && irq->srcbusirq == i)
1102 && irq->mp_srcbusirq == i)
1103 break; 1100 break;
1104 1101
1105 /* Do we already have a mapping for this IOAPIC pin */ 1102 /* Do we already have a mapping for this IOAPIC pin */
1106 if (irq->mp_dstapic == dstapic && 1103 if (irq->dstapic == dstapic && irq->dstirq == i)
1107 irq->mp_dstirq == i)
1108 break; 1104 break;
1109 } 1105 }
1110 1106
@@ -1113,13 +1109,13 @@ void __init mp_config_acpi_legacy_irqs(void)
1113 continue; /* IRQ already used */ 1109 continue; /* IRQ already used */
1114 } 1110 }
1115 1111
1116 mp_irq.mp_type = MP_INTSRC; 1112 mp_irq.type = MP_INTSRC;
1117 mp_irq.mp_irqflag = 0; /* Conforming */ 1113 mp_irq.irqflag = 0; /* Conforming */
1118 mp_irq.mp_srcbus = MP_ISA_BUS; 1114 mp_irq.srcbus = MP_ISA_BUS;
1119 mp_irq.mp_dstapic = dstapic; 1115 mp_irq.dstapic = dstapic;
1120 mp_irq.mp_irqtype = mp_INT; 1116 mp_irq.irqtype = mp_INT;
1121 mp_irq.mp_srcbusirq = i; /* Identity mapped */ 1117 mp_irq.srcbusirq = i; /* Identity mapped */
1122 mp_irq.mp_dstirq = i; 1118 mp_irq.dstirq = i;
1123 1119
1124 save_mp_irq(&mp_irq); 1120 save_mp_irq(&mp_irq);
1125 } 1121 }
@@ -1156,7 +1152,7 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity)
1156 return gsi; 1152 return gsi;
1157 } 1153 }
1158 1154
1159 ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_base; 1155 ioapic_pin = mp_find_ioapic_pin(ioapic, gsi);
1160 1156
1161#ifdef CONFIG_X86_32 1157#ifdef CONFIG_X86_32
1162 if (ioapic_renumber_irq) 1158 if (ioapic_renumber_irq)
@@ -1230,22 +1226,22 @@ int mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin,
1230 u32 gsi, int triggering, int polarity) 1226 u32 gsi, int triggering, int polarity)
1231{ 1227{
1232#ifdef CONFIG_X86_MPPARSE 1228#ifdef CONFIG_X86_MPPARSE
1233 struct mp_config_intsrc mp_irq; 1229 struct mpc_intsrc mp_irq;
1234 int ioapic; 1230 int ioapic;
1235 1231
1236 if (!acpi_ioapic) 1232 if (!acpi_ioapic)
1237 return 0; 1233 return 0;
1238 1234
1239 /* print the entry should happen on mptable identically */ 1235 /* print the entry should happen on mptable identically */
1240 mp_irq.mp_type = MP_INTSRC; 1236 mp_irq.type = MP_INTSRC;
1241 mp_irq.mp_irqtype = mp_INT; 1237 mp_irq.irqtype = mp_INT;
1242 mp_irq.mp_irqflag = (triggering == ACPI_EDGE_SENSITIVE ? 4 : 0x0c) | 1238 mp_irq.irqflag = (triggering == ACPI_EDGE_SENSITIVE ? 4 : 0x0c) |
1243 (polarity == ACPI_ACTIVE_HIGH ? 1 : 3); 1239 (polarity == ACPI_ACTIVE_HIGH ? 1 : 3);
1244 mp_irq.mp_srcbus = number; 1240 mp_irq.srcbus = number;
1245 mp_irq.mp_srcbusirq = (((devfn >> 3) & 0x1f) << 2) | ((pin - 1) & 3); 1241 mp_irq.srcbusirq = (((devfn >> 3) & 0x1f) << 2) | ((pin - 1) & 3);
1246 ioapic = mp_find_ioapic(gsi); 1242 ioapic = mp_find_ioapic(gsi);
1247 mp_irq.mp_dstapic = mp_ioapic_routing[ioapic].apic_id; 1243 mp_irq.dstapic = mp_ioapic_routing[ioapic].apic_id;
1248 mp_irq.mp_dstirq = gsi - mp_ioapic_routing[ioapic].gsi_base; 1244 mp_irq.dstirq = mp_find_ioapic_pin(ioapic, gsi);
1249 1245
1250 save_mp_irq(&mp_irq); 1246 save_mp_irq(&mp_irq);
1251#endif 1247#endif
@@ -1372,7 +1368,7 @@ static void __init acpi_process_madt(void)
1372 if (!error) { 1368 if (!error) {
1373 acpi_lapic = 1; 1369 acpi_lapic = 1;
1374 1370
1375#ifdef CONFIG_X86_GENERICARCH 1371#ifdef CONFIG_X86_BIGSMP
1376 generic_bigsmp_probe(); 1372 generic_bigsmp_probe();
1377#endif 1373#endif
1378 /* 1374 /*
@@ -1384,9 +1380,8 @@ static void __init acpi_process_madt(void)
1384 acpi_ioapic = 1; 1380 acpi_ioapic = 1;
1385 1381
1386 smp_found_config = 1; 1382 smp_found_config = 1;
1387#ifdef CONFIG_X86_32 1383 if (apic->setup_apic_routing)
1388 setup_apic_routing(); 1384 apic->setup_apic_routing();
1389#endif
1390 } 1385 }
1391 } 1386 }
1392 if (error == -EINVAL) { 1387 if (error == -EINVAL) {
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index a60c1f3bcb87..7c243a2c5115 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -101,6 +101,7 @@ int acpi_save_state_mem(void)
101 stack_start.sp = temp_stack + sizeof(temp_stack); 101 stack_start.sp = temp_stack + sizeof(temp_stack);
102 early_gdt_descr.address = 102 early_gdt_descr.address =
103 (unsigned long)get_cpu_gdt_table(smp_processor_id()); 103 (unsigned long)get_cpu_gdt_table(smp_processor_id());
104 initial_gs = per_cpu_offset(smp_processor_id());
104#endif 105#endif
105 initial_code = (unsigned long)wakeup_long64; 106 initial_code = (unsigned long)wakeup_long64;
106 saved_magic = 0x123456789abcdef0; 107 saved_magic = 0x123456789abcdef0;
diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c
index 115449f869ee..8bd801db24d9 100644
--- a/arch/x86/kernel/apic.c
+++ b/arch/x86/kernel/apic.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Local APIC handling, local APIC timers 2 * Local APIC handling, local APIC timers
3 * 3 *
4 * (c) 1999, 2000 Ingo Molnar <mingo@redhat.com> 4 * (c) 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
5 * 5 *
6 * Fixes 6 * Fixes
7 * Maciej W. Rozycki : Bits for genuine 82489DX APICs; 7 * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
@@ -14,51 +14,71 @@
14 * Mikael Pettersson : PM converted to driver model. 14 * Mikael Pettersson : PM converted to driver model.
15 */ 15 */
16 16
17#include <linux/init.h>
18
19#include <linux/mm.h>
20#include <linux/delay.h>
21#include <linux/bootmem.h>
22#include <linux/interrupt.h>
23#include <linux/mc146818rtc.h>
24#include <linux/kernel_stat.h> 17#include <linux/kernel_stat.h>
25#include <linux/sysdev.h> 18#include <linux/mc146818rtc.h>
26#include <linux/ioport.h>
27#include <linux/cpu.h>
28#include <linux/clockchips.h>
29#include <linux/acpi_pmtmr.h> 19#include <linux/acpi_pmtmr.h>
20#include <linux/clockchips.h>
21#include <linux/interrupt.h>
22#include <linux/bootmem.h>
23#include <linux/ftrace.h>
24#include <linux/ioport.h>
30#include <linux/module.h> 25#include <linux/module.h>
31#include <linux/dmi.h> 26#include <linux/sysdev.h>
27#include <linux/delay.h>
28#include <linux/timex.h>
32#include <linux/dmar.h> 29#include <linux/dmar.h>
33#include <linux/ftrace.h> 30#include <linux/init.h>
34#include <linux/smp.h> 31#include <linux/cpu.h>
32#include <linux/dmi.h>
35#include <linux/nmi.h> 33#include <linux/nmi.h>
36#include <linux/timex.h> 34#include <linux/smp.h>
35#include <linux/mm.h>
37 36
38#include <asm/atomic.h>
39#include <asm/mtrr.h>
40#include <asm/mpspec.h>
41#include <asm/desc.h>
42#include <asm/arch_hooks.h> 37#include <asm/arch_hooks.h>
43#include <asm/hpet.h>
44#include <asm/pgalloc.h> 38#include <asm/pgalloc.h>
39#include <asm/genapic.h>
40#include <asm/atomic.h>
41#include <asm/mpspec.h>
45#include <asm/i8253.h> 42#include <asm/i8253.h>
46#include <asm/idle.h> 43#include <asm/i8259.h>
47#include <asm/proto.h> 44#include <asm/proto.h>
48#include <asm/apic.h> 45#include <asm/apic.h>
49#include <asm/i8259.h> 46#include <asm/desc.h>
47#include <asm/hpet.h>
48#include <asm/idle.h>
49#include <asm/mtrr.h>
50#include <asm/smp.h> 50#include <asm/smp.h>
51 51
52#include <mach_apic.h> 52unsigned int num_processors;
53#include <mach_apicdef.h> 53
54#include <mach_ipi.h> 54unsigned disabled_cpus __cpuinitdata;
55
56/* Processor that is doing the boot up */
57unsigned int boot_cpu_physical_apicid = -1U;
55 58
56/* 59/*
57 * Sanity check 60 * The highest APIC ID seen during enumeration.
61 *
62 * This determines the messaging protocol we can use: if all APIC IDs
63 * are in the 0 ... 7 range, then we can use logical addressing which
64 * has some performance advantages (better broadcasting).
65 *
66 * If there's an APIC ID above 8, we use physical addressing.
58 */ 67 */
59#if ((SPURIOUS_APIC_VECTOR & 0x0F) != 0x0F) 68unsigned int max_physical_apicid;
60# error SPURIOUS_APIC_VECTOR definition error 69
61#endif 70/*
71 * Bitmask of physically existing CPUs:
72 */
73physid_mask_t phys_cpu_present_map;
74
75/*
76 * Map cpu index to physical APIC ID
77 */
78DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID);
79DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID);
80EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
81EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
62 82
63#ifdef CONFIG_X86_32 83#ifdef CONFIG_X86_32
64/* 84/*
@@ -457,7 +477,7 @@ static void lapic_timer_setup(enum clock_event_mode mode,
457static void lapic_timer_broadcast(const struct cpumask *mask) 477static void lapic_timer_broadcast(const struct cpumask *mask)
458{ 478{
459#ifdef CONFIG_SMP 479#ifdef CONFIG_SMP
460 send_IPI_mask(mask, LOCAL_TIMER_VECTOR); 480 apic->send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
461#endif 481#endif
462} 482}
463 483
@@ -991,11 +1011,11 @@ int __init verify_local_APIC(void)
991 */ 1011 */
992 reg0 = apic_read(APIC_ID); 1012 reg0 = apic_read(APIC_ID);
993 apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg0); 1013 apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg0);
994 apic_write(APIC_ID, reg0 ^ APIC_ID_MASK); 1014 apic_write(APIC_ID, reg0 ^ apic->apic_id_mask);
995 reg1 = apic_read(APIC_ID); 1015 reg1 = apic_read(APIC_ID);
996 apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg1); 1016 apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg1);
997 apic_write(APIC_ID, reg0); 1017 apic_write(APIC_ID, reg0);
998 if (reg1 != (reg0 ^ APIC_ID_MASK)) 1018 if (reg1 != (reg0 ^ apic->apic_id_mask))
999 return 0; 1019 return 0;
1000 1020
1001 /* 1021 /*
@@ -1089,7 +1109,7 @@ static void __cpuinit lapic_setup_esr(void)
1089 return; 1109 return;
1090 } 1110 }
1091 1111
1092 if (esr_disable) { 1112 if (apic->disable_esr) {
1093 /* 1113 /*
1094 * Something untraceable is creating bad interrupts on 1114 * Something untraceable is creating bad interrupts on
1095 * secondary quads ... for the moment, just leave the 1115 * secondary quads ... for the moment, just leave the
@@ -1130,9 +1150,14 @@ void __cpuinit setup_local_APIC(void)
1130 unsigned int value; 1150 unsigned int value;
1131 int i, j; 1151 int i, j;
1132 1152
1153 if (disable_apic) {
1154 arch_disable_smp_support();
1155 return;
1156 }
1157
1133#ifdef CONFIG_X86_32 1158#ifdef CONFIG_X86_32
1134 /* Pound the ESR really hard over the head with a big hammer - mbligh */ 1159 /* Pound the ESR really hard over the head with a big hammer - mbligh */
1135 if (lapic_is_integrated() && esr_disable) { 1160 if (lapic_is_integrated() && apic->disable_esr) {
1136 apic_write(APIC_ESR, 0); 1161 apic_write(APIC_ESR, 0);
1137 apic_write(APIC_ESR, 0); 1162 apic_write(APIC_ESR, 0);
1138 apic_write(APIC_ESR, 0); 1163 apic_write(APIC_ESR, 0);
@@ -1146,7 +1171,7 @@ void __cpuinit setup_local_APIC(void)
1146 * Double-check whether this APIC is really registered. 1171 * Double-check whether this APIC is really registered.
1147 * This is meaningless in clustered apic mode, so we skip it. 1172 * This is meaningless in clustered apic mode, so we skip it.
1148 */ 1173 */
1149 if (!apic_id_registered()) 1174 if (!apic->apic_id_registered())
1150 BUG(); 1175 BUG();
1151 1176
1152 /* 1177 /*
@@ -1154,7 +1179,7 @@ void __cpuinit setup_local_APIC(void)
1154 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel 1179 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
1155 * document number 292116). So here it goes... 1180 * document number 292116). So here it goes...
1156 */ 1181 */
1157 init_apic_ldr(); 1182 apic->init_apic_ldr();
1158 1183
1159 /* 1184 /*
1160 * Set Task Priority to 'accept all'. We never change this 1185 * Set Task Priority to 'accept all'. We never change this
@@ -1570,11 +1595,11 @@ int apic_version[MAX_APICS];
1570 1595
1571int __init APIC_init_uniprocessor(void) 1596int __init APIC_init_uniprocessor(void)
1572{ 1597{
1573#ifdef CONFIG_X86_64
1574 if (disable_apic) { 1598 if (disable_apic) {
1575 pr_info("Apic disabled\n"); 1599 pr_info("Apic disabled\n");
1576 return -1; 1600 return -1;
1577 } 1601 }
1602#ifdef CONFIG_X86_64
1578 if (!cpu_has_apic) { 1603 if (!cpu_has_apic) {
1579 disable_apic = 1; 1604 disable_apic = 1;
1580 pr_info("Apic disabled by BIOS\n"); 1605 pr_info("Apic disabled by BIOS\n");
@@ -1600,7 +1625,7 @@ int __init APIC_init_uniprocessor(void)
1600 enable_IR_x2apic(); 1625 enable_IR_x2apic();
1601#endif 1626#endif
1602#ifdef CONFIG_X86_64 1627#ifdef CONFIG_X86_64
1603 setup_apic_routing(); 1628 default_setup_apic_routing();
1604#endif 1629#endif
1605 1630
1606 verify_local_APIC(); 1631 verify_local_APIC();
@@ -1738,7 +1763,8 @@ void __init connect_bsp_APIC(void)
1738 outb(0x01, 0x23); 1763 outb(0x01, 0x23);
1739 } 1764 }
1740#endif 1765#endif
1741 enable_apic_mode(); 1766 if (apic->enable_apic_mode)
1767 apic->enable_apic_mode();
1742} 1768}
1743 1769
1744/** 1770/**
@@ -1876,29 +1902,39 @@ void __cpuinit generic_processor_info(int apicid, int version)
1876 } 1902 }
1877#endif 1903#endif
1878 1904
1879#if defined(CONFIG_X86_SMP) || defined(CONFIG_X86_64) 1905#if defined(CONFIG_SMP) || defined(CONFIG_X86_64)
1880 /* are we being called early in kernel startup? */ 1906 early_per_cpu(x86_cpu_to_apicid, cpu) = apicid;
1881 if (early_per_cpu_ptr(x86_cpu_to_apicid)) { 1907 early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
1882 u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
1883 u16 *bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
1884
1885 cpu_to_apicid[cpu] = apicid;
1886 bios_cpu_apicid[cpu] = apicid;
1887 } else {
1888 per_cpu(x86_cpu_to_apicid, cpu) = apicid;
1889 per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
1890 }
1891#endif 1908#endif
1892 1909
1893 set_cpu_possible(cpu, true); 1910 set_cpu_possible(cpu, true);
1894 set_cpu_present(cpu, true); 1911 set_cpu_present(cpu, true);
1895} 1912}
1896 1913
1897#ifdef CONFIG_X86_64
1898int hard_smp_processor_id(void) 1914int hard_smp_processor_id(void)
1899{ 1915{
1900 return read_apic_id(); 1916 return read_apic_id();
1901} 1917}
1918
1919void default_init_apic_ldr(void)
1920{
1921 unsigned long val;
1922
1923 apic_write(APIC_DFR, APIC_DFR_VALUE);
1924 val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
1925 val |= SET_APIC_LOGICAL_ID(1UL << smp_processor_id());
1926 apic_write(APIC_LDR, val);
1927}
1928
1929#ifdef CONFIG_X86_32
1930int default_apicid_to_node(int logical_apicid)
1931{
1932#ifdef CONFIG_SMP
1933 return apicid_2_node[hard_smp_processor_id()];
1934#else
1935 return 0;
1936#endif
1937}
1902#endif 1938#endif
1903 1939
1904/* 1940/*
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index 98807bb095ad..37ba5f85b718 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -301,7 +301,7 @@ extern int (*console_blank_hook)(int);
301 */ 301 */
302#define APM_ZERO_SEGS 302#define APM_ZERO_SEGS
303 303
304#include "apm.h" 304#include <asm/apm.h>
305 305
306/* 306/*
307 * Define to re-initialize the interrupt 0 timer to 100 Hz after a suspend. 307 * Define to re-initialize the interrupt 0 timer to 100 Hz after a suspend.
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
index 1d41d3f1edbc..8793ab33e2c1 100644
--- a/arch/x86/kernel/asm-offsets_64.c
+++ b/arch/x86/kernel/asm-offsets_64.c
@@ -11,7 +11,6 @@
11#include <linux/hardirq.h> 11#include <linux/hardirq.h>
12#include <linux/suspend.h> 12#include <linux/suspend.h>
13#include <linux/kbuild.h> 13#include <linux/kbuild.h>
14#include <asm/pda.h>
15#include <asm/processor.h> 14#include <asm/processor.h>
16#include <asm/segment.h> 15#include <asm/segment.h>
17#include <asm/thread_info.h> 16#include <asm/thread_info.h>
@@ -48,16 +47,6 @@ int main(void)
48#endif 47#endif
49 BLANK(); 48 BLANK();
50#undef ENTRY 49#undef ENTRY
51#define ENTRY(entry) DEFINE(pda_ ## entry, offsetof(struct x8664_pda, entry))
52 ENTRY(kernelstack);
53 ENTRY(oldrsp);
54 ENTRY(pcurrent);
55 ENTRY(irqcount);
56 ENTRY(cpunumber);
57 ENTRY(irqstackptr);
58 ENTRY(data_offset);
59 BLANK();
60#undef ENTRY
61#ifdef CONFIG_PARAVIRT 50#ifdef CONFIG_PARAVIRT
62 BLANK(); 51 BLANK();
63 OFFSET(PARAVIRT_enabled, pv_info, paravirt_enabled); 52 OFFSET(PARAVIRT_enabled, pv_info, paravirt_enabled);
diff --git a/arch/x86/kernel/bigsmp_32.c b/arch/x86/kernel/bigsmp_32.c
new file mode 100644
index 000000000000..47a62f46afdb
--- /dev/null
+++ b/arch/x86/kernel/bigsmp_32.c
@@ -0,0 +1,266 @@
1/*
2 * APIC driver for "bigsmp" XAPIC machines with more than 8 virtual CPUs.
3 * Drives the local APIC in "clustered mode".
4 */
5#define APIC_DEFINITION 1
6#include <linux/threads.h>
7#include <linux/cpumask.h>
8#include <asm/mpspec.h>
9#include <asm/genapic.h>
10#include <asm/fixmap.h>
11#include <asm/apicdef.h>
12#include <asm/ipi.h>
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/dmi.h>
16#include <linux/smp.h>
17
18
19static inline unsigned bigsmp_get_apic_id(unsigned long x)
20{
21 return (x >> 24) & 0xFF;
22}
23
24#define xapic_phys_to_log_apicid(cpu) (per_cpu(x86_bios_cpu_apicid, cpu))
25
26static inline int bigsmp_apic_id_registered(void)
27{
28 return 1;
29}
30
31static inline const cpumask_t *bigsmp_target_cpus(void)
32{
33#ifdef CONFIG_SMP
34 return &cpu_online_map;
35#else
36 return &cpumask_of_cpu(0);
37#endif
38}
39
40#define APIC_DFR_VALUE (APIC_DFR_FLAT)
41
42static inline unsigned long
43bigsmp_check_apicid_used(physid_mask_t bitmap, int apicid)
44{
45 return 0;
46}
47
48static inline unsigned long bigsmp_check_apicid_present(int bit)
49{
50 return 1;
51}
52
53static inline unsigned long calculate_ldr(int cpu)
54{
55 unsigned long val, id;
56 val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
57 id = xapic_phys_to_log_apicid(cpu);
58 val |= SET_APIC_LOGICAL_ID(id);
59 return val;
60}
61
62/*
63 * Set up the logical destination ID.
64 *
65 * Intel recommends to set DFR, LDR and TPR before enabling
66 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
67 * document number 292116). So here it goes...
68 */
69static inline void bigsmp_init_apic_ldr(void)
70{
71 unsigned long val;
72 int cpu = smp_processor_id();
73
74 apic_write(APIC_DFR, APIC_DFR_VALUE);
75 val = calculate_ldr(cpu);
76 apic_write(APIC_LDR, val);
77}
78
79static inline void bigsmp_setup_apic_routing(void)
80{
81 printk("Enabling APIC mode: %s. Using %d I/O APICs\n",
82 "Physflat", nr_ioapics);
83}
84
85static inline int bigsmp_apicid_to_node(int logical_apicid)
86{
87 return apicid_2_node[hard_smp_processor_id()];
88}
89
90static inline int bigsmp_cpu_present_to_apicid(int mps_cpu)
91{
92 if (mps_cpu < nr_cpu_ids)
93 return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
94
95 return BAD_APICID;
96}
97
98static inline physid_mask_t bigsmp_apicid_to_cpu_present(int phys_apicid)
99{
100 return physid_mask_of_physid(phys_apicid);
101}
102
103extern u8 cpu_2_logical_apicid[];
104/* Mapping from cpu number to logical apicid */
105static inline int bigsmp_cpu_to_logical_apicid(int cpu)
106{
107 if (cpu >= nr_cpu_ids)
108 return BAD_APICID;
109 return cpu_physical_id(cpu);
110}
111
112static inline physid_mask_t bigsmp_ioapic_phys_id_map(physid_mask_t phys_map)
113{
114 /* For clustered we don't have a good way to do this yet - hack */
115 return physids_promote(0xFFL);
116}
117
118static inline void bigsmp_setup_portio_remap(void)
119{
120}
121
122static inline int bigsmp_check_phys_apicid_present(int boot_cpu_physical_apicid)
123{
124 return 1;
125}
126
127/* As we are using single CPU as destination, pick only one CPU here */
128static inline unsigned int bigsmp_cpu_mask_to_apicid(const cpumask_t *cpumask)
129{
130 return bigsmp_cpu_to_logical_apicid(first_cpu(*cpumask));
131}
132
133static inline unsigned int
134bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
135 const struct cpumask *andmask)
136{
137 int cpu;
138
139 /*
140 * We're using fixed IRQ delivery, can only return one phys APIC ID.
141 * May as well be the first.
142 */
143 for_each_cpu_and(cpu, cpumask, andmask) {
144 if (cpumask_test_cpu(cpu, cpu_online_mask))
145 break;
146 }
147 if (cpu < nr_cpu_ids)
148 return bigsmp_cpu_to_logical_apicid(cpu);
149
150 return BAD_APICID;
151}
152
153static inline int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb)
154{
155 return cpuid_apic >> index_msb;
156}
157
158static inline void bigsmp_send_IPI_mask(const struct cpumask *mask, int vector)
159{
160 default_send_IPI_mask_sequence_phys(mask, vector);
161}
162
163static inline void bigsmp_send_IPI_allbutself(int vector)
164{
165 default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector);
166}
167
168static inline void bigsmp_send_IPI_all(int vector)
169{
170 bigsmp_send_IPI_mask(cpu_online_mask, vector);
171}
172
173static int dmi_bigsmp; /* can be set by dmi scanners */
174
175static int hp_ht_bigsmp(const struct dmi_system_id *d)
176{
177 printk(KERN_NOTICE "%s detected: force use of apic=bigsmp\n", d->ident);
178 dmi_bigsmp = 1;
179 return 0;
180}
181
182
183static const struct dmi_system_id bigsmp_dmi_table[] = {
184 { hp_ht_bigsmp, "HP ProLiant DL760 G2",
185 { DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
186 DMI_MATCH(DMI_BIOS_VERSION, "P44-"),}
187 },
188
189 { hp_ht_bigsmp, "HP ProLiant DL740",
190 { DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
191 DMI_MATCH(DMI_BIOS_VERSION, "P47-"),}
192 },
193 { }
194};
195
196static void bigsmp_vector_allocation_domain(int cpu, cpumask_t *retmask)
197{
198 cpus_clear(*retmask);
199 cpu_set(cpu, *retmask);
200}
201
202static int probe_bigsmp(void)
203{
204 if (def_to_bigsmp)
205 dmi_bigsmp = 1;
206 else
207 dmi_check_system(bigsmp_dmi_table);
208 return dmi_bigsmp;
209}
210
211struct genapic apic_bigsmp = {
212
213 .name = "bigsmp",
214 .probe = probe_bigsmp,
215 .acpi_madt_oem_check = NULL,
216 .apic_id_registered = bigsmp_apic_id_registered,
217
218 .irq_delivery_mode = dest_Fixed,
219 /* phys delivery to target CPU: */
220 .irq_dest_mode = 0,
221
222 .target_cpus = bigsmp_target_cpus,
223 .disable_esr = 1,
224 .dest_logical = 0,
225 .check_apicid_used = bigsmp_check_apicid_used,
226 .check_apicid_present = bigsmp_check_apicid_present,
227
228 .vector_allocation_domain = bigsmp_vector_allocation_domain,
229 .init_apic_ldr = bigsmp_init_apic_ldr,
230
231 .ioapic_phys_id_map = bigsmp_ioapic_phys_id_map,
232 .setup_apic_routing = bigsmp_setup_apic_routing,
233 .multi_timer_check = NULL,
234 .apicid_to_node = bigsmp_apicid_to_node,
235 .cpu_to_logical_apicid = bigsmp_cpu_to_logical_apicid,
236 .cpu_present_to_apicid = bigsmp_cpu_present_to_apicid,
237 .apicid_to_cpu_present = bigsmp_apicid_to_cpu_present,
238 .setup_portio_remap = NULL,
239 .check_phys_apicid_present = bigsmp_check_phys_apicid_present,
240 .enable_apic_mode = NULL,
241 .phys_pkg_id = bigsmp_phys_pkg_id,
242 .mps_oem_check = NULL,
243
244 .get_apic_id = bigsmp_get_apic_id,
245 .set_apic_id = NULL,
246 .apic_id_mask = 0xFF << 24,
247
248 .cpu_mask_to_apicid = bigsmp_cpu_mask_to_apicid,
249 .cpu_mask_to_apicid_and = bigsmp_cpu_mask_to_apicid_and,
250
251 .send_IPI_mask = bigsmp_send_IPI_mask,
252 .send_IPI_mask_allbutself = NULL,
253 .send_IPI_allbutself = bigsmp_send_IPI_allbutself,
254 .send_IPI_all = bigsmp_send_IPI_all,
255 .send_IPI_self = default_send_IPI_self,
256
257 .wakeup_cpu = NULL,
258 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
259 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
260
261 .wait_for_init_deassert = default_wait_for_init_deassert,
262
263 .smp_callin_clear_local_apic = NULL,
264 .store_NMI_vector = NULL,
265 .inquire_remote_apic = default_inquire_remote_apic,
266};
diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c
index 2cf23634b6d9..e48640cfac0c 100644
--- a/arch/x86/kernel/cpu/addon_cpuid_features.c
+++ b/arch/x86/kernel/cpu/addon_cpuid_features.c
@@ -7,7 +7,7 @@
7#include <asm/pat.h> 7#include <asm/pat.h>
8#include <asm/processor.h> 8#include <asm/processor.h>
9 9
10#include <mach_apic.h> 10#include <asm/genapic.h>
11 11
12struct cpuid_bit { 12struct cpuid_bit {
13 u16 feature; 13 u16 feature;
@@ -69,7 +69,7 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
69 */ 69 */
70void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c) 70void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c)
71{ 71{
72#ifdef CONFIG_X86_SMP 72#ifdef CONFIG_SMP
73 unsigned int eax, ebx, ecx, edx, sub_index; 73 unsigned int eax, ebx, ecx, edx, sub_index;
74 unsigned int ht_mask_width, core_plus_mask_width; 74 unsigned int ht_mask_width, core_plus_mask_width;
75 unsigned int core_select_mask, core_level_siblings; 75 unsigned int core_select_mask, core_level_siblings;
@@ -116,22 +116,14 @@ void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c)
116 116
117 core_select_mask = (~(-1 << core_plus_mask_width)) >> ht_mask_width; 117 core_select_mask = (~(-1 << core_plus_mask_width)) >> ht_mask_width;
118 118
119#ifdef CONFIG_X86_32 119 c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, ht_mask_width)
120 c->cpu_core_id = phys_pkg_id(c->initial_apicid, ht_mask_width)
121 & core_select_mask; 120 & core_select_mask;
122 c->phys_proc_id = phys_pkg_id(c->initial_apicid, core_plus_mask_width); 121 c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, core_plus_mask_width);
123 /* 122 /*
124 * Reinit the apicid, now that we have extended initial_apicid. 123 * Reinit the apicid, now that we have extended initial_apicid.
125 */ 124 */
126 c->apicid = phys_pkg_id(c->initial_apicid, 0); 125 c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
127#else 126
128 c->cpu_core_id = phys_pkg_id(ht_mask_width) & core_select_mask;
129 c->phys_proc_id = phys_pkg_id(core_plus_mask_width);
130 /*
131 * Reinit the apicid, now that we have extended initial_apicid.
132 */
133 c->apicid = phys_pkg_id(0);
134#endif
135 c->x86_max_cores = (core_level_siblings / smp_num_siblings); 127 c->x86_max_cores = (core_level_siblings / smp_num_siblings);
136 128
137 129
@@ -143,37 +135,3 @@ void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c)
143 return; 135 return;
144#endif 136#endif
145} 137}
146
147#ifdef CONFIG_X86_PAT
148void __cpuinit validate_pat_support(struct cpuinfo_x86 *c)
149{
150 if (!cpu_has_pat)
151 pat_disable("PAT not supported by CPU.");
152
153 switch (c->x86_vendor) {
154 case X86_VENDOR_INTEL:
155 /*
156 * There is a known erratum on Pentium III and Core Solo
157 * and Core Duo CPUs.
158 * " Page with PAT set to WC while associated MTRR is UC
159 * may consolidate to UC "
160 * Because of this erratum, it is better to stick with
161 * setting WC in MTRR rather than using PAT on these CPUs.
162 *
163 * Enable PAT WC only on P4, Core 2 or later CPUs.
164 */
165 if (c->x86 > 0x6 || (c->x86 == 6 && c->x86_model >= 15))
166 return;
167
168 pat_disable("PAT WC disabled due to known CPU erratum.");
169 return;
170
171 case X86_VENDOR_AMD:
172 case X86_VENDOR_CENTAUR:
173 case X86_VENDOR_TRANSMETA:
174 return;
175 }
176
177 pat_disable("PAT disabled. Not yet verified on this CPU type.");
178}
179#endif
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 7c878f6aa919..ff4d7b9e32e4 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -12,7 +12,7 @@
12# include <asm/cacheflush.h> 12# include <asm/cacheflush.h>
13#endif 13#endif
14 14
15#include <mach_apic.h> 15#include <asm/genapic.h>
16 16
17#include "cpu.h" 17#include "cpu.h"
18 18
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 83492b1f93b1..cbcdb796d47f 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -21,14 +21,16 @@
21#include <asm/asm.h> 21#include <asm/asm.h>
22#include <asm/numa.h> 22#include <asm/numa.h>
23#include <asm/smp.h> 23#include <asm/smp.h>
24#include <asm/cpu.h>
25#include <asm/cpumask.h>
24#ifdef CONFIG_X86_LOCAL_APIC 26#ifdef CONFIG_X86_LOCAL_APIC
25#include <asm/mpspec.h> 27#include <asm/mpspec.h>
26#include <asm/apic.h> 28#include <asm/apic.h>
27#include <mach_apic.h>
28#include <asm/genapic.h> 29#include <asm/genapic.h>
30#include <asm/genapic.h>
31#include <asm/uv/uv.h>
29#endif 32#endif
30 33
31#include <asm/pda.h>
32#include <asm/pgtable.h> 34#include <asm/pgtable.h>
33#include <asm/processor.h> 35#include <asm/processor.h>
34#include <asm/desc.h> 36#include <asm/desc.h>
@@ -50,6 +52,15 @@ cpumask_var_t cpu_initialized_mask;
50/* representing cpus for which sibling maps can be computed */ 52/* representing cpus for which sibling maps can be computed */
51cpumask_var_t cpu_sibling_setup_mask; 53cpumask_var_t cpu_sibling_setup_mask;
52 54
55/* correctly size the local cpu masks */
56void __init setup_cpu_local_masks(void)
57{
58 alloc_bootmem_cpumask_var(&cpu_initialized_mask);
59 alloc_bootmem_cpumask_var(&cpu_callin_mask);
60 alloc_bootmem_cpumask_var(&cpu_callout_mask);
61 alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
62}
63
53#else /* CONFIG_X86_32 */ 64#else /* CONFIG_X86_32 */
54 65
55cpumask_t cpu_callin_map; 66cpumask_t cpu_callin_map;
@@ -62,23 +73,23 @@ cpumask_t cpu_sibling_setup_map;
62 73
63static struct cpu_dev *this_cpu __cpuinitdata; 74static struct cpu_dev *this_cpu __cpuinitdata;
64 75
76DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
65#ifdef CONFIG_X86_64 77#ifdef CONFIG_X86_64
66/* We need valid kernel segments for data and code in long mode too 78 /*
67 * IRET will check the segment types kkeil 2000/10/28 79 * We need valid kernel segments for data and code in long mode too
68 * Also sysret mandates a special GDT layout 80 * IRET will check the segment types kkeil 2000/10/28
69 */ 81 * Also sysret mandates a special GDT layout
70/* The TLS descriptors are currently at a different place compared to i386. 82 *
71 Hopefully nobody expects them at a fixed place (Wine?) */ 83 * The TLS descriptors are currently at a different place compared to i386.
72DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = { 84 * Hopefully nobody expects them at a fixed place (Wine?)
85 */
73 [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } }, 86 [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } },
74 [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } }, 87 [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } },
75 [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } }, 88 [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } },
76 [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } }, 89 [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } },
77 [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } }, 90 [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } },
78 [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } }, 91 [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } },
79} };
80#else 92#else
81DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
82 [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } }, 93 [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } },
83 [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } }, 94 [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } },
84 [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } }, 95 [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } },
@@ -110,9 +121,9 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
110 [GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } }, 121 [GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } },
111 122
112 [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } }, 123 [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } },
113 [GDT_ENTRY_PERCPU] = { { { 0x00000000, 0x00000000 } } }, 124 [GDT_ENTRY_PERCPU] = { { { 0x0000ffff, 0x00cf9200 } } },
114} };
115#endif 125#endif
126} };
116EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); 127EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
117 128
118#ifdef CONFIG_X86_32 129#ifdef CONFIG_X86_32
@@ -213,6 +224,49 @@ static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
213#endif 224#endif
214 225
215/* 226/*
227 * Some CPU features depend on higher CPUID levels, which may not always
228 * be available due to CPUID level capping or broken virtualization
229 * software. Add those features to this table to auto-disable them.
230 */
231struct cpuid_dependent_feature {
232 u32 feature;
233 u32 level;
234};
235static const struct cpuid_dependent_feature __cpuinitconst
236cpuid_dependent_features[] = {
237 { X86_FEATURE_MWAIT, 0x00000005 },
238 { X86_FEATURE_DCA, 0x00000009 },
239 { X86_FEATURE_XSAVE, 0x0000000d },
240 { 0, 0 }
241};
242
243static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
244{
245 const struct cpuid_dependent_feature *df;
246 for (df = cpuid_dependent_features; df->feature; df++) {
247 /*
248 * Note: cpuid_level is set to -1 if unavailable, but
249 * extended_extended_level is set to 0 if unavailable
250 * and the legitimate extended levels are all negative
251 * when signed; hence the weird messing around with
252 * signs here...
253 */
254 if (cpu_has(c, df->feature) &&
255 ((s32)df->feature < 0 ?
256 (u32)df->feature > (u32)c->extended_cpuid_level :
257 (s32)df->feature > (s32)c->cpuid_level)) {
258 clear_cpu_cap(c, df->feature);
259 if (warn)
260 printk(KERN_WARNING
261 "CPU: CPU feature %s disabled "
262 "due to lack of CPUID level 0x%x\n",
263 x86_cap_flags[df->feature],
264 df->level);
265 }
266 }
267}
268
269/*
216 * Naming convention should be: <Name> [(<Codename>)] 270 * Naming convention should be: <Name> [(<Codename>)]
217 * This table only is used unless init_<vendor>() below doesn't set it; 271 * This table only is used unless init_<vendor>() below doesn't set it;
218 * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used 272 * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used
@@ -242,18 +296,28 @@ static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c)
242 296
243__u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata; 297__u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata;
244 298
299void load_percpu_segment(int cpu)
300{
301#ifdef CONFIG_X86_32
302 loadsegment(fs, __KERNEL_PERCPU);
303#else
304 loadsegment(gs, 0);
305 wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu));
306#endif
307}
308
245/* Current gdt points %fs at the "master" per-cpu area: after this, 309/* Current gdt points %fs at the "master" per-cpu area: after this,
246 * it's on the real one. */ 310 * it's on the real one. */
247void switch_to_new_gdt(void) 311void switch_to_new_gdt(int cpu)
248{ 312{
249 struct desc_ptr gdt_descr; 313 struct desc_ptr gdt_descr;
250 314
251 gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id()); 315 gdt_descr.address = (long)get_cpu_gdt_table(cpu);
252 gdt_descr.size = GDT_SIZE - 1; 316 gdt_descr.size = GDT_SIZE - 1;
253 load_gdt(&gdt_descr); 317 load_gdt(&gdt_descr);
254#ifdef CONFIG_X86_32 318 /* Reload the per-cpu base */
255 asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory"); 319
256#endif 320 load_percpu_segment(cpu);
257} 321}
258 322
259static struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; 323static struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
@@ -383,11 +447,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
383 } 447 }
384 448
385 index_msb = get_count_order(smp_num_siblings); 449 index_msb = get_count_order(smp_num_siblings);
386#ifdef CONFIG_X86_64 450 c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
387 c->phys_proc_id = phys_pkg_id(index_msb);
388#else
389 c->phys_proc_id = phys_pkg_id(c->initial_apicid, index_msb);
390#endif
391 451
392 smp_num_siblings = smp_num_siblings / c->x86_max_cores; 452 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
393 453
@@ -395,13 +455,8 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
395 455
396 core_bits = get_count_order(c->x86_max_cores); 456 core_bits = get_count_order(c->x86_max_cores);
397 457
398#ifdef CONFIG_X86_64 458 c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
399 c->cpu_core_id = phys_pkg_id(index_msb) &
400 ((1 << core_bits) - 1); 459 ((1 << core_bits) - 1);
401#else
402 c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) &
403 ((1 << core_bits) - 1);
404#endif
405 } 460 }
406 461
407out: 462out:
@@ -570,11 +625,10 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
570 if (this_cpu->c_early_init) 625 if (this_cpu->c_early_init)
571 this_cpu->c_early_init(c); 626 this_cpu->c_early_init(c);
572 627
573 validate_pat_support(c);
574
575#ifdef CONFIG_SMP 628#ifdef CONFIG_SMP
576 c->cpu_index = boot_cpu_id; 629 c->cpu_index = boot_cpu_id;
577#endif 630#endif
631 filter_cpuid_features(c, false);
578} 632}
579 633
580void __init early_cpu_init(void) 634void __init early_cpu_init(void)
@@ -637,7 +691,7 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
637 c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF; 691 c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF;
638#ifdef CONFIG_X86_32 692#ifdef CONFIG_X86_32
639# ifdef CONFIG_X86_HT 693# ifdef CONFIG_X86_HT
640 c->apicid = phys_pkg_id(c->initial_apicid, 0); 694 c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
641# else 695# else
642 c->apicid = c->initial_apicid; 696 c->apicid = c->initial_apicid;
643# endif 697# endif
@@ -684,7 +738,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
684 this_cpu->c_identify(c); 738 this_cpu->c_identify(c);
685 739
686#ifdef CONFIG_X86_64 740#ifdef CONFIG_X86_64
687 c->apicid = phys_pkg_id(0); 741 c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
688#endif 742#endif
689 743
690 /* 744 /*
@@ -708,6 +762,9 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
708 * we do "generic changes." 762 * we do "generic changes."
709 */ 763 */
710 764
765 /* Filter out anything that depends on CPUID levels we don't have */
766 filter_cpuid_features(c, true);
767
711 /* If the model name is still unset, do table lookup. */ 768 /* If the model name is still unset, do table lookup. */
712 if (!c->x86_model_id[0]) { 769 if (!c->x86_model_id[0]) {
713 char *p; 770 char *p;
@@ -877,54 +934,26 @@ static __init int setup_disablecpuid(char *arg)
877__setup("clearcpuid=", setup_disablecpuid); 934__setup("clearcpuid=", setup_disablecpuid);
878 935
879#ifdef CONFIG_X86_64 936#ifdef CONFIG_X86_64
880struct x8664_pda **_cpu_pda __read_mostly;
881EXPORT_SYMBOL(_cpu_pda);
882
883struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table }; 937struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
884 938
885static char boot_cpu_stack[IRQSTACKSIZE] __page_aligned_bss; 939DEFINE_PER_CPU_FIRST(union irq_stack_union,
940 irq_stack_union) __aligned(PAGE_SIZE);
941#ifdef CONFIG_SMP
942DEFINE_PER_CPU(char *, irq_stack_ptr); /* will be set during per cpu init */
943#else
944DEFINE_PER_CPU(char *, irq_stack_ptr) =
945 per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64;
946#endif
886 947
887void __cpuinit pda_init(int cpu) 948DEFINE_PER_CPU(unsigned long, kernel_stack) =
888{ 949 (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
889 struct x8664_pda *pda = cpu_pda(cpu); 950EXPORT_PER_CPU_SYMBOL(kernel_stack);
890 951
891 /* Setup up data that may be needed in __get_free_pages early */ 952DEFINE_PER_CPU(unsigned int, irq_count) = -1;
892 loadsegment(fs, 0);
893 loadsegment(gs, 0);
894 /* Memory clobbers used to order PDA accessed */
895 mb();
896 wrmsrl(MSR_GS_BASE, pda);
897 mb();
898
899 pda->cpunumber = cpu;
900 pda->irqcount = -1;
901 pda->kernelstack = (unsigned long)stack_thread_info() -
902 PDA_STACKOFFSET + THREAD_SIZE;
903 pda->active_mm = &init_mm;
904 pda->mmu_state = 0;
905
906 if (cpu == 0) {
907 /* others are initialized in smpboot.c */
908 pda->pcurrent = &init_task;
909 pda->irqstackptr = boot_cpu_stack;
910 pda->irqstackptr += IRQSTACKSIZE - 64;
911 } else {
912 if (!pda->irqstackptr) {
913 pda->irqstackptr = (char *)
914 __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER);
915 if (!pda->irqstackptr)
916 panic("cannot allocate irqstack for cpu %d",
917 cpu);
918 pda->irqstackptr += IRQSTACKSIZE - 64;
919 }
920
921 if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE)
922 pda->nodenumber = cpu_to_node(cpu);
923 }
924}
925 953
926static char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + 954static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
927 DEBUG_STKSZ] __page_aligned_bss; 955 [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ])
956 __aligned(PAGE_SIZE);
928 957
929extern asmlinkage void ignore_sysret(void); 958extern asmlinkage void ignore_sysret(void);
930 959
@@ -982,15 +1011,14 @@ void __cpuinit cpu_init(void)
982 struct tss_struct *t = &per_cpu(init_tss, cpu); 1011 struct tss_struct *t = &per_cpu(init_tss, cpu);
983 struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu); 1012 struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu);
984 unsigned long v; 1013 unsigned long v;
985 char *estacks = NULL;
986 struct task_struct *me; 1014 struct task_struct *me;
987 int i; 1015 int i;
988 1016
989 /* CPU 0 is initialised in head64.c */ 1017#ifdef CONFIG_NUMA
990 if (cpu != 0) 1018 if (cpu != 0 && percpu_read(node_number) == 0 &&
991 pda_init(cpu); 1019 cpu_to_node(cpu) != NUMA_NO_NODE)
992 else 1020 percpu_write(node_number, cpu_to_node(cpu));
993 estacks = boot_exception_stacks; 1021#endif
994 1022
995 me = current; 1023 me = current;
996 1024
@@ -1006,7 +1034,9 @@ void __cpuinit cpu_init(void)
1006 * and set up the GDT descriptor: 1034 * and set up the GDT descriptor:
1007 */ 1035 */
1008 1036
1009 switch_to_new_gdt(); 1037 switch_to_new_gdt(cpu);
1038 loadsegment(fs, 0);
1039
1010 load_idt((const struct desc_ptr *)&idt_descr); 1040 load_idt((const struct desc_ptr *)&idt_descr);
1011 1041
1012 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); 1042 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
@@ -1024,18 +1054,13 @@ void __cpuinit cpu_init(void)
1024 * set up and load the per-CPU TSS 1054 * set up and load the per-CPU TSS
1025 */ 1055 */
1026 if (!orig_ist->ist[0]) { 1056 if (!orig_ist->ist[0]) {
1027 static const unsigned int order[N_EXCEPTION_STACKS] = { 1057 static const unsigned int sizes[N_EXCEPTION_STACKS] = {
1028 [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER, 1058 [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ,
1029 [DEBUG_STACK - 1] = DEBUG_STACK_ORDER 1059 [DEBUG_STACK - 1] = DEBUG_STKSZ
1030 }; 1060 };
1061 char *estacks = per_cpu(exception_stacks, cpu);
1031 for (v = 0; v < N_EXCEPTION_STACKS; v++) { 1062 for (v = 0; v < N_EXCEPTION_STACKS; v++) {
1032 if (cpu) { 1063 estacks += sizes[v];
1033 estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]);
1034 if (!estacks)
1035 panic("Cannot allocate exception "
1036 "stack %ld %d\n", v, cpu);
1037 }
1038 estacks += PAGE_SIZE << order[v];
1039 orig_ist->ist[v] = t->x86_tss.ist[v] = 1064 orig_ist->ist[v] = t->x86_tss.ist[v] =
1040 (unsigned long)estacks; 1065 (unsigned long)estacks;
1041 } 1066 }
@@ -1069,22 +1094,19 @@ void __cpuinit cpu_init(void)
1069 */ 1094 */
1070 if (kgdb_connected && arch_kgdb_ops.correct_hw_break) 1095 if (kgdb_connected && arch_kgdb_ops.correct_hw_break)
1071 arch_kgdb_ops.correct_hw_break(); 1096 arch_kgdb_ops.correct_hw_break();
1072 else { 1097 else
1073#endif 1098#endif
1074 /* 1099 {
1075 * Clear all 6 debug registers: 1100 /*
1076 */ 1101 * Clear all 6 debug registers:
1077 1102 */
1078 set_debugreg(0UL, 0); 1103 set_debugreg(0UL, 0);
1079 set_debugreg(0UL, 1); 1104 set_debugreg(0UL, 1);
1080 set_debugreg(0UL, 2); 1105 set_debugreg(0UL, 2);
1081 set_debugreg(0UL, 3); 1106 set_debugreg(0UL, 3);
1082 set_debugreg(0UL, 6); 1107 set_debugreg(0UL, 6);
1083 set_debugreg(0UL, 7); 1108 set_debugreg(0UL, 7);
1084#ifdef CONFIG_KGDB
1085 /* If the kgdb is connected no debug regs should be altered. */
1086 } 1109 }
1087#endif
1088 1110
1089 fpu_init(); 1111 fpu_init();
1090 1112
@@ -1114,7 +1136,7 @@ void __cpuinit cpu_init(void)
1114 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); 1136 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
1115 1137
1116 load_idt(&idt_descr); 1138 load_idt(&idt_descr);
1117 switch_to_new_gdt(); 1139 switch_to_new_gdt(cpu);
1118 1140
1119 /* 1141 /*
1120 * Set up and load the per-CPU TSS and LDT 1142 * Set up and load the per-CPU TSS and LDT
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 24ff26a38ade..1f137a87d4bd 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -24,7 +24,7 @@
24#ifdef CONFIG_X86_LOCAL_APIC 24#ifdef CONFIG_X86_LOCAL_APIC
25#include <asm/mpspec.h> 25#include <asm/mpspec.h>
26#include <asm/apic.h> 26#include <asm/apic.h>
27#include <mach_apic.h> 27#include <asm/genapic.h>
28#endif 28#endif
29 29
30static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) 30static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
@@ -63,6 +63,18 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
63 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); 63 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
64 } 64 }
65 65
66 /*
67 * There is a known erratum on Pentium III and Core Solo
68 * and Core Duo CPUs.
69 * " Page with PAT set to WC while associated MTRR is UC
70 * may consolidate to UC "
71 * Because of this erratum, it is better to stick with
72 * setting WC in MTRR rather than using PAT on these CPUs.
73 *
74 * Enable PAT WC only on P4, Core 2 or later CPUs.
75 */
76 if (c->x86 == 6 && c->x86_model < 15)
77 clear_cpu_cap(c, X86_FEATURE_PAT);
66} 78}
67 79
68#ifdef CONFIG_X86_32 80#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index da299eb85fc0..7293508d8f5c 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -147,7 +147,16 @@ struct _cpuid4_info {
147 union _cpuid4_leaf_ecx ecx; 147 union _cpuid4_leaf_ecx ecx;
148 unsigned long size; 148 unsigned long size;
149 unsigned long can_disable; 149 unsigned long can_disable;
150 cpumask_t shared_cpu_map; /* future?: only cpus/node is needed */ 150 DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
151};
152
153/* subset of above _cpuid4_info w/o shared_cpu_map */
154struct _cpuid4_info_regs {
155 union _cpuid4_leaf_eax eax;
156 union _cpuid4_leaf_ebx ebx;
157 union _cpuid4_leaf_ecx ecx;
158 unsigned long size;
159 unsigned long can_disable;
151}; 160};
152 161
153#ifdef CONFIG_PCI 162#ifdef CONFIG_PCI
@@ -278,7 +287,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
278} 287}
279 288
280static void __cpuinit 289static void __cpuinit
281amd_check_l3_disable(int index, struct _cpuid4_info *this_leaf) 290amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
282{ 291{
283 if (index < 3) 292 if (index < 3)
284 return; 293 return;
@@ -286,7 +295,8 @@ amd_check_l3_disable(int index, struct _cpuid4_info *this_leaf)
286} 295}
287 296
288static int 297static int
289__cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf) 298__cpuinit cpuid4_cache_lookup_regs(int index,
299 struct _cpuid4_info_regs *this_leaf)
290{ 300{
291 union _cpuid4_leaf_eax eax; 301 union _cpuid4_leaf_eax eax;
292 union _cpuid4_leaf_ebx ebx; 302 union _cpuid4_leaf_ebx ebx;
@@ -314,6 +324,15 @@ __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
314 return 0; 324 return 0;
315} 325}
316 326
327static int
328__cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
329{
330 struct _cpuid4_info_regs *leaf_regs =
331 (struct _cpuid4_info_regs *)this_leaf;
332
333 return cpuid4_cache_lookup_regs(index, leaf_regs);
334}
335
317static int __cpuinit find_num_cache_leaves(void) 336static int __cpuinit find_num_cache_leaves(void)
318{ 337{
319 unsigned int eax, ebx, ecx, edx; 338 unsigned int eax, ebx, ecx, edx;
@@ -353,11 +372,10 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
353 * parameters cpuid leaf to find the cache details 372 * parameters cpuid leaf to find the cache details
354 */ 373 */
355 for (i = 0; i < num_cache_leaves; i++) { 374 for (i = 0; i < num_cache_leaves; i++) {
356 struct _cpuid4_info this_leaf; 375 struct _cpuid4_info_regs this_leaf;
357
358 int retval; 376 int retval;
359 377
360 retval = cpuid4_cache_lookup(i, &this_leaf); 378 retval = cpuid4_cache_lookup_regs(i, &this_leaf);
361 if (retval >= 0) { 379 if (retval >= 0) {
362 switch(this_leaf.eax.split.level) { 380 switch(this_leaf.eax.split.level) {
363 case 1: 381 case 1:
@@ -506,17 +524,20 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
506 num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing; 524 num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
507 525
508 if (num_threads_sharing == 1) 526 if (num_threads_sharing == 1)
509 cpu_set(cpu, this_leaf->shared_cpu_map); 527 cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map));
510 else { 528 else {
511 index_msb = get_count_order(num_threads_sharing); 529 index_msb = get_count_order(num_threads_sharing);
512 530
513 for_each_online_cpu(i) { 531 for_each_online_cpu(i) {
514 if (cpu_data(i).apicid >> index_msb == 532 if (cpu_data(i).apicid >> index_msb ==
515 c->apicid >> index_msb) { 533 c->apicid >> index_msb) {
516 cpu_set(i, this_leaf->shared_cpu_map); 534 cpumask_set_cpu(i,
535 to_cpumask(this_leaf->shared_cpu_map));
517 if (i != cpu && per_cpu(cpuid4_info, i)) { 536 if (i != cpu && per_cpu(cpuid4_info, i)) {
518 sibling_leaf = CPUID4_INFO_IDX(i, index); 537 sibling_leaf =
519 cpu_set(cpu, sibling_leaf->shared_cpu_map); 538 CPUID4_INFO_IDX(i, index);
539 cpumask_set_cpu(cpu, to_cpumask(
540 sibling_leaf->shared_cpu_map));
520 } 541 }
521 } 542 }
522 } 543 }
@@ -528,9 +549,10 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
528 int sibling; 549 int sibling;
529 550
530 this_leaf = CPUID4_INFO_IDX(cpu, index); 551 this_leaf = CPUID4_INFO_IDX(cpu, index);
531 for_each_cpu_mask_nr(sibling, this_leaf->shared_cpu_map) { 552 for_each_cpu(sibling, to_cpumask(this_leaf->shared_cpu_map)) {
532 sibling_leaf = CPUID4_INFO_IDX(sibling, index); 553 sibling_leaf = CPUID4_INFO_IDX(sibling, index);
533 cpu_clear(cpu, sibling_leaf->shared_cpu_map); 554 cpumask_clear_cpu(cpu,
555 to_cpumask(sibling_leaf->shared_cpu_map));
534 } 556 }
535} 557}
536#else 558#else
@@ -635,8 +657,9 @@ static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
635 int n = 0; 657 int n = 0;
636 658
637 if (len > 1) { 659 if (len > 1) {
638 cpumask_t *mask = &this_leaf->shared_cpu_map; 660 const struct cpumask *mask;
639 661
662 mask = to_cpumask(this_leaf->shared_cpu_map);
640 n = type? 663 n = type?
641 cpulist_scnprintf(buf, len-2, mask) : 664 cpulist_scnprintf(buf, len-2, mask) :
642 cpumask_scnprintf(buf, len-2, mask); 665 cpumask_scnprintf(buf, len-2, mask);
@@ -699,7 +722,8 @@ static struct pci_dev *get_k8_northbridge(int node)
699 722
700static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf) 723static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf)
701{ 724{
702 int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map)); 725 const struct cpumask *mask = to_cpumask(this_leaf->shared_cpu_map);
726 int node = cpu_to_node(cpumask_first(mask));
703 struct pci_dev *dev = NULL; 727 struct pci_dev *dev = NULL;
704 ssize_t ret = 0; 728 ssize_t ret = 0;
705 int i; 729 int i;
@@ -733,7 +757,8 @@ static ssize_t
733store_cache_disable(struct _cpuid4_info *this_leaf, const char *buf, 757store_cache_disable(struct _cpuid4_info *this_leaf, const char *buf,
734 size_t count) 758 size_t count)
735{ 759{
736 int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map)); 760 const struct cpumask *mask = to_cpumask(this_leaf->shared_cpu_map);
761 int node = cpu_to_node(cpumask_first(mask));
737 struct pci_dev *dev = NULL; 762 struct pci_dev *dev = NULL;
738 unsigned int ret, index, val; 763 unsigned int ret, index, val;
739 764
@@ -878,7 +903,7 @@ err_out:
878 return -ENOMEM; 903 return -ENOMEM;
879} 904}
880 905
881static cpumask_t cache_dev_map = CPU_MASK_NONE; 906static DECLARE_BITMAP(cache_dev_map, NR_CPUS);
882 907
883/* Add/Remove cache interface for CPU device */ 908/* Add/Remove cache interface for CPU device */
884static int __cpuinit cache_add_dev(struct sys_device * sys_dev) 909static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
@@ -918,7 +943,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
918 } 943 }
919 kobject_uevent(&(this_object->kobj), KOBJ_ADD); 944 kobject_uevent(&(this_object->kobj), KOBJ_ADD);
920 } 945 }
921 cpu_set(cpu, cache_dev_map); 946 cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
922 947
923 kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD); 948 kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD);
924 return 0; 949 return 0;
@@ -931,9 +956,9 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
931 956
932 if (per_cpu(cpuid4_info, cpu) == NULL) 957 if (per_cpu(cpuid4_info, cpu) == NULL)
933 return; 958 return;
934 if (!cpu_isset(cpu, cache_dev_map)) 959 if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
935 return; 960 return;
936 cpu_clear(cpu, cache_dev_map); 961 cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map));
937 962
938 for (i = 0; i < num_cache_leaves; i++) 963 for (i = 0; i < num_cache_leaves; i++)
939 kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj)); 964 kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
index 8ae8c4ff094d..4772e91e8246 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
@@ -67,7 +67,7 @@ static struct threshold_block threshold_defaults = {
67struct threshold_bank { 67struct threshold_bank {
68 struct kobject *kobj; 68 struct kobject *kobj;
69 struct threshold_block *blocks; 69 struct threshold_block *blocks;
70 cpumask_t cpus; 70 cpumask_var_t cpus;
71}; 71};
72static DEFINE_PER_CPU(struct threshold_bank *, threshold_banks[NR_BANKS]); 72static DEFINE_PER_CPU(struct threshold_bank *, threshold_banks[NR_BANKS]);
73 73
@@ -481,7 +481,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
481 481
482#ifdef CONFIG_SMP 482#ifdef CONFIG_SMP
483 if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */ 483 if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */
484 i = first_cpu(per_cpu(cpu_core_map, cpu)); 484 i = cpumask_first(&per_cpu(cpu_core_map, cpu));
485 485
486 /* first core not up yet */ 486 /* first core not up yet */
487 if (cpu_data(i).cpu_core_id) 487 if (cpu_data(i).cpu_core_id)
@@ -501,7 +501,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
501 if (err) 501 if (err)
502 goto out; 502 goto out;
503 503
504 b->cpus = per_cpu(cpu_core_map, cpu); 504 cpumask_copy(b->cpus, &per_cpu(cpu_core_map, cpu));
505 per_cpu(threshold_banks, cpu)[bank] = b; 505 per_cpu(threshold_banks, cpu)[bank] = b;
506 goto out; 506 goto out;
507 } 507 }
@@ -512,15 +512,20 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
512 err = -ENOMEM; 512 err = -ENOMEM;
513 goto out; 513 goto out;
514 } 514 }
515 if (!alloc_cpumask_var(&b->cpus, GFP_KERNEL)) {
516 kfree(b);
517 err = -ENOMEM;
518 goto out;
519 }
515 520
516 b->kobj = kobject_create_and_add(name, &per_cpu(device_mce, cpu).kobj); 521 b->kobj = kobject_create_and_add(name, &per_cpu(device_mce, cpu).kobj);
517 if (!b->kobj) 522 if (!b->kobj)
518 goto out_free; 523 goto out_free;
519 524
520#ifndef CONFIG_SMP 525#ifndef CONFIG_SMP
521 b->cpus = CPU_MASK_ALL; 526 cpumask_setall(b->cpus);
522#else 527#else
523 b->cpus = per_cpu(cpu_core_map, cpu); 528 cpumask_copy(b->cpus, &per_cpu(cpu_core_map, cpu));
524#endif 529#endif
525 530
526 per_cpu(threshold_banks, cpu)[bank] = b; 531 per_cpu(threshold_banks, cpu)[bank] = b;
@@ -529,7 +534,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
529 if (err) 534 if (err)
530 goto out_free; 535 goto out_free;
531 536
532 for_each_cpu_mask_nr(i, b->cpus) { 537 for_each_cpu(i, b->cpus) {
533 if (i == cpu) 538 if (i == cpu)
534 continue; 539 continue;
535 540
@@ -545,6 +550,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
545 550
546out_free: 551out_free:
547 per_cpu(threshold_banks, cpu)[bank] = NULL; 552 per_cpu(threshold_banks, cpu)[bank] = NULL;
553 free_cpumask_var(b->cpus);
548 kfree(b); 554 kfree(b);
549out: 555out:
550 return err; 556 return err;
@@ -619,7 +625,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank)
619#endif 625#endif
620 626
621 /* remove all sibling symlinks before unregistering */ 627 /* remove all sibling symlinks before unregistering */
622 for_each_cpu_mask_nr(i, b->cpus) { 628 for_each_cpu(i, b->cpus) {
623 if (i == cpu) 629 if (i == cpu)
624 continue; 630 continue;
625 631
@@ -632,6 +638,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank)
632free_out: 638free_out:
633 kobject_del(b->kobj); 639 kobject_del(b->kobj);
634 kobject_put(b->kobj); 640 kobject_put(b->kobj);
641 free_cpumask_var(b->cpus);
635 kfree(b); 642 kfree(b);
636 per_cpu(threshold_banks, cpu)[bank] = NULL; 643 per_cpu(threshold_banks, cpu)[bank] = NULL;
637} 644}
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c
index 4b48f251fd39..5e8c79e748a6 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c
@@ -7,6 +7,7 @@
7#include <linux/interrupt.h> 7#include <linux/interrupt.h>
8#include <linux/percpu.h> 8#include <linux/percpu.h>
9#include <asm/processor.h> 9#include <asm/processor.h>
10#include <asm/apic.h>
10#include <asm/msr.h> 11#include <asm/msr.h>
11#include <asm/mce.h> 12#include <asm/mce.h>
12#include <asm/hw_irq.h> 13#include <asm/hw_irq.h>
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index c689d19e35ab..ad7f2a696f4a 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -24,11 +24,11 @@
24#include <asm/apic.h> 24#include <asm/apic.h>
25#include <asm/hpet.h> 25#include <asm/hpet.h>
26#include <linux/kdebug.h> 26#include <linux/kdebug.h>
27#include <asm/smp.h> 27#include <asm/cpu.h>
28#include <asm/reboot.h> 28#include <asm/reboot.h>
29#include <asm/virtext.h> 29#include <asm/virtext.h>
30 30
31#include <mach_ipi.h> 31#include <asm/genapic.h>
32 32
33 33
34#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) 34#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index c302d0707048..d35db5993fd6 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -106,7 +106,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
106 const struct stacktrace_ops *ops, void *data) 106 const struct stacktrace_ops *ops, void *data)
107{ 107{
108 const unsigned cpu = get_cpu(); 108 const unsigned cpu = get_cpu();
109 unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr; 109 unsigned long *irq_stack_end =
110 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
110 unsigned used = 0; 111 unsigned used = 0;
111 struct thread_info *tinfo; 112 struct thread_info *tinfo;
112 int graph = 0; 113 int graph = 0;
@@ -160,23 +161,23 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
160 stack = (unsigned long *) estack_end[-2]; 161 stack = (unsigned long *) estack_end[-2];
161 continue; 162 continue;
162 } 163 }
163 if (irqstack_end) { 164 if (irq_stack_end) {
164 unsigned long *irqstack; 165 unsigned long *irq_stack;
165 irqstack = irqstack_end - 166 irq_stack = irq_stack_end -
166 (IRQSTACKSIZE - 64) / sizeof(*irqstack); 167 (IRQ_STACK_SIZE - 64) / sizeof(*irq_stack);
167 168
168 if (stack >= irqstack && stack < irqstack_end) { 169 if (stack >= irq_stack && stack < irq_stack_end) {
169 if (ops->stack(data, "IRQ") < 0) 170 if (ops->stack(data, "IRQ") < 0)
170 break; 171 break;
171 bp = print_context_stack(tinfo, stack, bp, 172 bp = print_context_stack(tinfo, stack, bp,
172 ops, data, irqstack_end, &graph); 173 ops, data, irq_stack_end, &graph);
173 /* 174 /*
174 * We link to the next stack (which would be 175 * We link to the next stack (which would be
175 * the process stack normally) the last 176 * the process stack normally) the last
176 * pointer (index -1 to end) in the IRQ stack: 177 * pointer (index -1 to end) in the IRQ stack:
177 */ 178 */
178 stack = (unsigned long *) (irqstack_end[-1]); 179 stack = (unsigned long *) (irq_stack_end[-1]);
179 irqstack_end = NULL; 180 irq_stack_end = NULL;
180 ops->stack(data, "EOI"); 181 ops->stack(data, "EOI");
181 continue; 182 continue;
182 } 183 }
@@ -199,10 +200,10 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
199 unsigned long *stack; 200 unsigned long *stack;
200 int i; 201 int i;
201 const int cpu = smp_processor_id(); 202 const int cpu = smp_processor_id();
202 unsigned long *irqstack_end = 203 unsigned long *irq_stack_end =
203 (unsigned long *) (cpu_pda(cpu)->irqstackptr); 204 (unsigned long *)(per_cpu(irq_stack_ptr, cpu));
204 unsigned long *irqstack = 205 unsigned long *irq_stack =
205 (unsigned long *) (cpu_pda(cpu)->irqstackptr - IRQSTACKSIZE); 206 (unsigned long *)(per_cpu(irq_stack_ptr, cpu) - IRQ_STACK_SIZE);
206 207
207 /* 208 /*
208 * debugging aid: "show_stack(NULL, NULL);" prints the 209 * debugging aid: "show_stack(NULL, NULL);" prints the
@@ -218,9 +219,9 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
218 219
219 stack = sp; 220 stack = sp;
220 for (i = 0; i < kstack_depth_to_print; i++) { 221 for (i = 0; i < kstack_depth_to_print; i++) {
221 if (stack >= irqstack && stack <= irqstack_end) { 222 if (stack >= irq_stack && stack <= irq_stack_end) {
222 if (stack == irqstack_end) { 223 if (stack == irq_stack_end) {
223 stack = (unsigned long *) (irqstack_end[-1]); 224 stack = (unsigned long *) (irq_stack_end[-1]);
224 printk(" <EOI> "); 225 printk(" <EOI> ");
225 } 226 }
226 } else { 227 } else {
@@ -241,7 +242,7 @@ void show_registers(struct pt_regs *regs)
241 int i; 242 int i;
242 unsigned long sp; 243 unsigned long sp;
243 const int cpu = smp_processor_id(); 244 const int cpu = smp_processor_id();
244 struct task_struct *cur = cpu_pda(cpu)->pcurrent; 245 struct task_struct *cur = current;
245 246
246 sp = regs->sp; 247 sp = regs->sp;
247 printk("CPU %d ", cpu); 248 printk("CPU %d ", cpu);
diff --git a/arch/x86/kernel/efi.c b/arch/x86/kernel/efi.c
index 1119d247fe11..b205272ad394 100644
--- a/arch/x86/kernel/efi.c
+++ b/arch/x86/kernel/efi.c
@@ -366,10 +366,12 @@ void __init efi_init(void)
366 SMBIOS_TABLE_GUID)) { 366 SMBIOS_TABLE_GUID)) {
367 efi.smbios = config_tables[i].table; 367 efi.smbios = config_tables[i].table;
368 printk(" SMBIOS=0x%lx ", config_tables[i].table); 368 printk(" SMBIOS=0x%lx ", config_tables[i].table);
369#ifdef CONFIG_X86_UV
369 } else if (!efi_guidcmp(config_tables[i].guid, 370 } else if (!efi_guidcmp(config_tables[i].guid,
370 UV_SYSTEM_TABLE_GUID)) { 371 UV_SYSTEM_TABLE_GUID)) {
371 efi.uv_systab = config_tables[i].table; 372 efi.uv_systab = config_tables[i].table;
372 printk(" UVsystab=0x%lx ", config_tables[i].table); 373 printk(" UVsystab=0x%lx ", config_tables[i].table);
374#endif
373 } else if (!efi_guidcmp(config_tables[i].guid, 375 } else if (!efi_guidcmp(config_tables[i].guid,
374 HCDP_TABLE_GUID)) { 376 HCDP_TABLE_GUID)) {
375 efi.hcdp = config_tables[i].table; 377 efi.hcdp = config_tables[i].table;
diff --git a/arch/x86/kernel/efi_64.c b/arch/x86/kernel/efi_64.c
index 652c5287215f..a4ee29127fdf 100644
--- a/arch/x86/kernel/efi_64.c
+++ b/arch/x86/kernel/efi_64.c
@@ -36,6 +36,7 @@
36#include <asm/proto.h> 36#include <asm/proto.h>
37#include <asm/efi.h> 37#include <asm/efi.h>
38#include <asm/cacheflush.h> 38#include <asm/cacheflush.h>
39#include <asm/fixmap.h>
39 40
40static pgd_t save_pgd __initdata; 41static pgd_t save_pgd __initdata;
41static unsigned long efi_flags __initdata; 42static unsigned long efi_flags __initdata;
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 46469029e9d3..65efd42454be 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -672,7 +672,7 @@ common_interrupt:
672ENDPROC(common_interrupt) 672ENDPROC(common_interrupt)
673 CFI_ENDPROC 673 CFI_ENDPROC
674 674
675#define BUILD_INTERRUPT(name, nr) \ 675#define BUILD_INTERRUPT3(name, nr, fn) \
676ENTRY(name) \ 676ENTRY(name) \
677 RING0_INT_FRAME; \ 677 RING0_INT_FRAME; \
678 pushl $~(nr); \ 678 pushl $~(nr); \
@@ -680,13 +680,15 @@ ENTRY(name) \
680 SAVE_ALL; \ 680 SAVE_ALL; \
681 TRACE_IRQS_OFF \ 681 TRACE_IRQS_OFF \
682 movl %esp,%eax; \ 682 movl %esp,%eax; \
683 call smp_##name; \ 683 call fn; \
684 jmp ret_from_intr; \ 684 jmp ret_from_intr; \
685 CFI_ENDPROC; \ 685 CFI_ENDPROC; \
686ENDPROC(name) 686ENDPROC(name)
687 687
688#define BUILD_INTERRUPT(name, nr) BUILD_INTERRUPT3(name, nr, smp_##name)
689
688/* The include is where all of the SMP etc. interrupts come from */ 690/* The include is where all of the SMP etc. interrupts come from */
689#include "entry_arch.h" 691#include <asm/entry_arch.h>
690 692
691ENTRY(coprocessor_error) 693ENTRY(coprocessor_error)
692 RING0_INT_FRAME 694 RING0_INT_FRAME
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index a1346217e43c..fbcf96b295ff 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -52,6 +52,7 @@
52#include <asm/irqflags.h> 52#include <asm/irqflags.h>
53#include <asm/paravirt.h> 53#include <asm/paravirt.h>
54#include <asm/ftrace.h> 54#include <asm/ftrace.h>
55#include <asm/percpu.h>
55 56
56/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ 57/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
57#include <linux/elf-em.h> 58#include <linux/elf-em.h>
@@ -209,7 +210,7 @@ ENTRY(native_usergs_sysret64)
209 210
210 /* %rsp:at FRAMEEND */ 211 /* %rsp:at FRAMEEND */
211 .macro FIXUP_TOP_OF_STACK tmp offset=0 212 .macro FIXUP_TOP_OF_STACK tmp offset=0
212 movq %gs:pda_oldrsp,\tmp 213 movq PER_CPU_VAR(old_rsp),\tmp
213 movq \tmp,RSP+\offset(%rsp) 214 movq \tmp,RSP+\offset(%rsp)
214 movq $__USER_DS,SS+\offset(%rsp) 215 movq $__USER_DS,SS+\offset(%rsp)
215 movq $__USER_CS,CS+\offset(%rsp) 216 movq $__USER_CS,CS+\offset(%rsp)
@@ -220,7 +221,7 @@ ENTRY(native_usergs_sysret64)
220 221
221 .macro RESTORE_TOP_OF_STACK tmp offset=0 222 .macro RESTORE_TOP_OF_STACK tmp offset=0
222 movq RSP+\offset(%rsp),\tmp 223 movq RSP+\offset(%rsp),\tmp
223 movq \tmp,%gs:pda_oldrsp 224 movq \tmp,PER_CPU_VAR(old_rsp)
224 movq EFLAGS+\offset(%rsp),\tmp 225 movq EFLAGS+\offset(%rsp),\tmp
225 movq \tmp,R11+\offset(%rsp) 226 movq \tmp,R11+\offset(%rsp)
226 .endm 227 .endm
@@ -336,15 +337,15 @@ ENTRY(save_args)
336 je 1f 337 je 1f
337 SWAPGS 338 SWAPGS
338 /* 339 /*
339 * irqcount is used to check if a CPU is already on an interrupt stack 340 * irq_count is used to check if a CPU is already on an interrupt stack
340 * or not. While this is essentially redundant with preempt_count it is 341 * or not. While this is essentially redundant with preempt_count it is
341 * a little cheaper to use a separate counter in the PDA (short of 342 * a little cheaper to use a separate counter in the PDA (short of
342 * moving irq_enter into assembly, which would be too much work) 343 * moving irq_enter into assembly, which would be too much work)
343 */ 344 */
3441: incl %gs:pda_irqcount 3451: incl PER_CPU_VAR(irq_count)
345 jne 2f 346 jne 2f
346 popq_cfi %rax /* move return address... */ 347 popq_cfi %rax /* move return address... */
347 mov %gs:pda_irqstackptr,%rsp 348 mov PER_CPU_VAR(irq_stack_ptr),%rsp
348 EMPTY_FRAME 0 349 EMPTY_FRAME 0
349 pushq_cfi %rbp /* backlink for unwinder */ 350 pushq_cfi %rbp /* backlink for unwinder */
350 pushq_cfi %rax /* ... to the new stack */ 351 pushq_cfi %rax /* ... to the new stack */
@@ -409,6 +410,8 @@ END(save_paranoid)
409ENTRY(ret_from_fork) 410ENTRY(ret_from_fork)
410 DEFAULT_FRAME 411 DEFAULT_FRAME
411 412
413 LOCK ; btr $TIF_FORK,TI_flags(%r8)
414
412 push kernel_eflags(%rip) 415 push kernel_eflags(%rip)
413 CFI_ADJUST_CFA_OFFSET 8 416 CFI_ADJUST_CFA_OFFSET 8
414 popf # reset kernel eflags 417 popf # reset kernel eflags
@@ -468,7 +471,7 @@ END(ret_from_fork)
468ENTRY(system_call) 471ENTRY(system_call)
469 CFI_STARTPROC simple 472 CFI_STARTPROC simple
470 CFI_SIGNAL_FRAME 473 CFI_SIGNAL_FRAME
471 CFI_DEF_CFA rsp,PDA_STACKOFFSET 474 CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
472 CFI_REGISTER rip,rcx 475 CFI_REGISTER rip,rcx
473 /*CFI_REGISTER rflags,r11*/ 476 /*CFI_REGISTER rflags,r11*/
474 SWAPGS_UNSAFE_STACK 477 SWAPGS_UNSAFE_STACK
@@ -479,8 +482,8 @@ ENTRY(system_call)
479 */ 482 */
480ENTRY(system_call_after_swapgs) 483ENTRY(system_call_after_swapgs)
481 484
482 movq %rsp,%gs:pda_oldrsp 485 movq %rsp,PER_CPU_VAR(old_rsp)
483 movq %gs:pda_kernelstack,%rsp 486 movq PER_CPU_VAR(kernel_stack),%rsp
484 /* 487 /*
485 * No need to follow this irqs off/on section - it's straight 488 * No need to follow this irqs off/on section - it's straight
486 * and short: 489 * and short:
@@ -523,7 +526,7 @@ sysret_check:
523 CFI_REGISTER rip,rcx 526 CFI_REGISTER rip,rcx
524 RESTORE_ARGS 0,-ARG_SKIP,1 527 RESTORE_ARGS 0,-ARG_SKIP,1
525 /*CFI_REGISTER rflags,r11*/ 528 /*CFI_REGISTER rflags,r11*/
526 movq %gs:pda_oldrsp, %rsp 529 movq PER_CPU_VAR(old_rsp), %rsp
527 USERGS_SYSRET64 530 USERGS_SYSRET64
528 531
529 CFI_RESTORE_STATE 532 CFI_RESTORE_STATE
@@ -833,11 +836,11 @@ common_interrupt:
833 XCPT_FRAME 836 XCPT_FRAME
834 addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */ 837 addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */
835 interrupt do_IRQ 838 interrupt do_IRQ
836 /* 0(%rsp): oldrsp-ARGOFFSET */ 839 /* 0(%rsp): old_rsp-ARGOFFSET */
837ret_from_intr: 840ret_from_intr:
838 DISABLE_INTERRUPTS(CLBR_NONE) 841 DISABLE_INTERRUPTS(CLBR_NONE)
839 TRACE_IRQS_OFF 842 TRACE_IRQS_OFF
840 decl %gs:pda_irqcount 843 decl PER_CPU_VAR(irq_count)
841 leaveq 844 leaveq
842 CFI_DEF_CFA_REGISTER rsp 845 CFI_DEF_CFA_REGISTER rsp
843 CFI_ADJUST_CFA_OFFSET -8 846 CFI_ADJUST_CFA_OFFSET -8
@@ -982,8 +985,10 @@ apicinterrupt IRQ_MOVE_CLEANUP_VECTOR \
982 irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt 985 irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt
983#endif 986#endif
984 987
988#ifdef CONFIG_X86_UV
985apicinterrupt UV_BAU_MESSAGE \ 989apicinterrupt UV_BAU_MESSAGE \
986 uv_bau_message_intr1 uv_bau_message_interrupt 990 uv_bau_message_intr1 uv_bau_message_interrupt
991#endif
987apicinterrupt LOCAL_TIMER_VECTOR \ 992apicinterrupt LOCAL_TIMER_VECTOR \
988 apic_timer_interrupt smp_apic_timer_interrupt 993 apic_timer_interrupt smp_apic_timer_interrupt
989 994
@@ -1073,10 +1078,10 @@ ENTRY(\sym)
1073 TRACE_IRQS_OFF 1078 TRACE_IRQS_OFF
1074 movq %rsp,%rdi /* pt_regs pointer */ 1079 movq %rsp,%rdi /* pt_regs pointer */
1075 xorl %esi,%esi /* no error code */ 1080 xorl %esi,%esi /* no error code */
1076 movq %gs:pda_data_offset, %rbp 1081 PER_CPU(init_tss, %rbp)
1077 subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp) 1082 subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
1078 call \do_sym 1083 call \do_sym
1079 addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp) 1084 addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
1080 jmp paranoid_exit /* %ebx: no swapgs flag */ 1085 jmp paranoid_exit /* %ebx: no swapgs flag */
1081 CFI_ENDPROC 1086 CFI_ENDPROC
1082END(\sym) 1087END(\sym)
@@ -1138,7 +1143,7 @@ ENTRY(native_load_gs_index)
1138 CFI_STARTPROC 1143 CFI_STARTPROC
1139 pushf 1144 pushf
1140 CFI_ADJUST_CFA_OFFSET 8 1145 CFI_ADJUST_CFA_OFFSET 8
1141 DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI)) 1146 DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
1142 SWAPGS 1147 SWAPGS
1143gs_change: 1148gs_change:
1144 movl %edi,%gs 1149 movl %edi,%gs
@@ -1260,14 +1265,14 @@ ENTRY(call_softirq)
1260 CFI_REL_OFFSET rbp,0 1265 CFI_REL_OFFSET rbp,0
1261 mov %rsp,%rbp 1266 mov %rsp,%rbp
1262 CFI_DEF_CFA_REGISTER rbp 1267 CFI_DEF_CFA_REGISTER rbp
1263 incl %gs:pda_irqcount 1268 incl PER_CPU_VAR(irq_count)
1264 cmove %gs:pda_irqstackptr,%rsp 1269 cmove PER_CPU_VAR(irq_stack_ptr),%rsp
1265 push %rbp # backlink for old unwinder 1270 push %rbp # backlink for old unwinder
1266 call __do_softirq 1271 call __do_softirq
1267 leaveq 1272 leaveq
1268 CFI_DEF_CFA_REGISTER rsp 1273 CFI_DEF_CFA_REGISTER rsp
1269 CFI_ADJUST_CFA_OFFSET -8 1274 CFI_ADJUST_CFA_OFFSET -8
1270 decl %gs:pda_irqcount 1275 decl PER_CPU_VAR(irq_count)
1271 ret 1276 ret
1272 CFI_ENDPROC 1277 CFI_ENDPROC
1273END(call_softirq) 1278END(call_softirq)
@@ -1297,15 +1302,15 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
1297 movq %rdi, %rsp # we don't return, adjust the stack frame 1302 movq %rdi, %rsp # we don't return, adjust the stack frame
1298 CFI_ENDPROC 1303 CFI_ENDPROC
1299 DEFAULT_FRAME 1304 DEFAULT_FRAME
130011: incl %gs:pda_irqcount 130511: incl PER_CPU_VAR(irq_count)
1301 movq %rsp,%rbp 1306 movq %rsp,%rbp
1302 CFI_DEF_CFA_REGISTER rbp 1307 CFI_DEF_CFA_REGISTER rbp
1303 cmovzq %gs:pda_irqstackptr,%rsp 1308 cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp
1304 pushq %rbp # backlink for old unwinder 1309 pushq %rbp # backlink for old unwinder
1305 call xen_evtchn_do_upcall 1310 call xen_evtchn_do_upcall
1306 popq %rsp 1311 popq %rsp
1307 CFI_DEF_CFA_REGISTER rsp 1312 CFI_DEF_CFA_REGISTER rsp
1308 decl %gs:pda_irqcount 1313 decl PER_CPU_VAR(irq_count)
1309 jmp error_exit 1314 jmp error_exit
1310 CFI_ENDPROC 1315 CFI_ENDPROC
1311END(do_hypervisor_callback) 1316END(do_hypervisor_callback)
diff --git a/arch/x86/kernel/es7000_32.c b/arch/x86/kernel/es7000_32.c
index 53699c931ad4..d6184c12a182 100644
--- a/arch/x86/kernel/es7000_32.c
+++ b/arch/x86/kernel/es7000_32.c
@@ -40,7 +40,6 @@
40#include <asm/smp.h> 40#include <asm/smp.h>
41#include <asm/atomic.h> 41#include <asm/atomic.h>
42#include <asm/apicdef.h> 42#include <asm/apicdef.h>
43#include <mach_mpparse.h>
44#include <asm/genapic.h> 43#include <asm/genapic.h>
45#include <asm/setup.h> 44#include <asm/setup.h>
46 45
@@ -182,20 +181,16 @@ static int wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip)
182 return 0; 181 return 0;
183} 182}
184 183
185static void noop_wait_for_deassert(atomic_t *deassert_not_used)
186{
187}
188
189static int __init es7000_update_genapic(void) 184static int __init es7000_update_genapic(void)
190{ 185{
191 genapic->wakeup_cpu = wakeup_secondary_cpu_via_mip; 186 apic->wakeup_cpu = wakeup_secondary_cpu_via_mip;
192 187
193 /* MPENTIUMIII */ 188 /* MPENTIUMIII */
194 if (boot_cpu_data.x86 == 6 && 189 if (boot_cpu_data.x86 == 6 &&
195 (boot_cpu_data.x86_model >= 7 || boot_cpu_data.x86_model <= 11)) { 190 (boot_cpu_data.x86_model >= 7 || boot_cpu_data.x86_model <= 11)) {
196 es7000_update_genapic_to_cluster(); 191 es7000_update_genapic_to_cluster();
197 genapic->wait_for_init_deassert = noop_wait_for_deassert; 192 apic->wait_for_init_deassert = NULL;
198 genapic->wakeup_cpu = wakeup_secondary_cpu_via_mip; 193 apic->wakeup_cpu = wakeup_secondary_cpu_via_mip;
199 } 194 }
200 195
201 return 0; 196 return 0;
@@ -359,20 +354,449 @@ es7000_mip_write(struct mip_reg *mip_reg)
359 return status; 354 return status;
360} 355}
361 356
362void __init 357void __init es7000_enable_apic_mode(void)
363es7000_sw_apic(void) 358{
364{ 359 struct mip_reg es7000_mip_reg;
365 if (es7000_plat) { 360 int mip_status;
366 int mip_status; 361
367 struct mip_reg es7000_mip_reg; 362 if (!es7000_plat)
368
369 printk("ES7000: Enabling APIC mode.\n");
370 memset(&es7000_mip_reg, 0, sizeof(struct mip_reg));
371 es7000_mip_reg.off_0 = MIP_SW_APIC;
372 es7000_mip_reg.off_38 = (MIP_VALID);
373 while ((mip_status = es7000_mip_write(&es7000_mip_reg)) != 0)
374 printk("es7000_sw_apic: command failed, status = %x\n",
375 mip_status);
376 return; 363 return;
364
365 printk("ES7000: Enabling APIC mode.\n");
366 memset(&es7000_mip_reg, 0, sizeof(struct mip_reg));
367 es7000_mip_reg.off_0 = MIP_SW_APIC;
368 es7000_mip_reg.off_38 = MIP_VALID;
369
370 while ((mip_status = es7000_mip_write(&es7000_mip_reg)) != 0) {
371 printk("es7000_enable_apic_mode: command failed, status = %x\n",
372 mip_status);
373 }
374}
375
376/*
377 * APIC driver for the Unisys ES7000 chipset.
378 */
379#define APIC_DEFINITION 1
380#include <linux/threads.h>
381#include <linux/cpumask.h>
382#include <asm/mpspec.h>
383#include <asm/genapic.h>
384#include <asm/fixmap.h>
385#include <asm/apicdef.h>
386#include <linux/kernel.h>
387#include <linux/string.h>
388#include <linux/init.h>
389#include <linux/acpi.h>
390#include <linux/smp.h>
391#include <asm/ipi.h>
392
393#define APIC_DFR_VALUE_CLUSTER (APIC_DFR_CLUSTER)
394#define INT_DELIVERY_MODE_CLUSTER (dest_LowestPrio)
395#define INT_DEST_MODE_CLUSTER (1) /* logical delivery broadcast to all procs */
396
397#define APIC_DFR_VALUE (APIC_DFR_FLAT)
398
399extern void es7000_enable_apic_mode(void);
400extern int apic_version [MAX_APICS];
401extern u8 cpu_2_logical_apicid[];
402extern unsigned int boot_cpu_physical_apicid;
403
404extern int parse_unisys_oem (char *oemptr);
405extern int find_unisys_acpi_oem_table(unsigned long *oem_addr);
406extern void unmap_unisys_acpi_oem_table(unsigned long oem_addr);
407extern void setup_unisys(void);
408
409#define apicid_cluster(apicid) (apicid & 0xF0)
410#define xapic_phys_to_log_apicid(cpu) per_cpu(x86_bios_cpu_apicid, cpu)
411
412static void es7000_vector_allocation_domain(int cpu, cpumask_t *retmask)
413{
414 /* Careful. Some cpus do not strictly honor the set of cpus
415 * specified in the interrupt destination when using lowest
416 * priority interrupt delivery mode.
417 *
418 * In particular there was a hyperthreading cpu observed to
419 * deliver interrupts to the wrong hyperthread when only one
420 * hyperthread was specified in the interrupt desitination.
421 */
422 *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
423}
424
425
426static void es7000_wait_for_init_deassert(atomic_t *deassert)
427{
428#ifndef CONFIG_ES7000_CLUSTERED_APIC
429 while (!atomic_read(deassert))
430 cpu_relax();
431#endif
432 return;
433}
434
435static unsigned int es7000_get_apic_id(unsigned long x)
436{
437 return (x >> 24) & 0xFF;
438}
439
440#ifdef CONFIG_ACPI
441static int es7000_check_dsdt(void)
442{
443 struct acpi_table_header header;
444
445 if (ACPI_SUCCESS(acpi_get_table_header(ACPI_SIG_DSDT, 0, &header)) &&
446 !strncmp(header.oem_id, "UNISYS", 6))
447 return 1;
448 return 0;
449}
450#endif
451
452static void es7000_send_IPI_mask(const struct cpumask *mask, int vector)
453{
454 default_send_IPI_mask_sequence_phys(mask, vector);
455}
456
457static void es7000_send_IPI_allbutself(int vector)
458{
459 default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector);
460}
461
462static void es7000_send_IPI_all(int vector)
463{
464 es7000_send_IPI_mask(cpu_online_mask, vector);
465}
466
467static int es7000_apic_id_registered(void)
468{
469 return 1;
470}
471
472static const cpumask_t *target_cpus_cluster(void)
473{
474 return &CPU_MASK_ALL;
475}
476
477static const cpumask_t *es7000_target_cpus(void)
478{
479 return &cpumask_of_cpu(smp_processor_id());
480}
481
482static unsigned long
483es7000_check_apicid_used(physid_mask_t bitmap, int apicid)
484{
485 return 0;
486}
487static unsigned long es7000_check_apicid_present(int bit)
488{
489 return physid_isset(bit, phys_cpu_present_map);
490}
491
492static unsigned long calculate_ldr(int cpu)
493{
494 unsigned long id = xapic_phys_to_log_apicid(cpu);
495
496 return (SET_APIC_LOGICAL_ID(id));
497}
498
499/*
500 * Set up the logical destination ID.
501 *
502 * Intel recommends to set DFR, LdR and TPR before enabling
503 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
504 * document number 292116). So here it goes...
505 */
506static void es7000_init_apic_ldr_cluster(void)
507{
508 unsigned long val;
509 int cpu = smp_processor_id();
510
511 apic_write(APIC_DFR, APIC_DFR_VALUE_CLUSTER);
512 val = calculate_ldr(cpu);
513 apic_write(APIC_LDR, val);
514}
515
516static void es7000_init_apic_ldr(void)
517{
518 unsigned long val;
519 int cpu = smp_processor_id();
520
521 apic_write(APIC_DFR, APIC_DFR_VALUE);
522 val = calculate_ldr(cpu);
523 apic_write(APIC_LDR, val);
524}
525
526static void es7000_setup_apic_routing(void)
527{
528 int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id());
529 printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n",
530 (apic_version[apic] == 0x14) ?
531 "Physical Cluster" : "Logical Cluster",
532 nr_ioapics, cpus_addr(*es7000_target_cpus())[0]);
533}
534
535static int es7000_apicid_to_node(int logical_apicid)
536{
537 return 0;
538}
539
540
541static int es7000_cpu_present_to_apicid(int mps_cpu)
542{
543 if (!mps_cpu)
544 return boot_cpu_physical_apicid;
545 else if (mps_cpu < nr_cpu_ids)
546 return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
547 else
548 return BAD_APICID;
549}
550
551static physid_mask_t es7000_apicid_to_cpu_present(int phys_apicid)
552{
553 static int id = 0;
554 physid_mask_t mask;
555
556 mask = physid_mask_of_physid(id);
557 ++id;
558
559 return mask;
560}
561
562/* Mapping from cpu number to logical apicid */
563static int es7000_cpu_to_logical_apicid(int cpu)
564{
565#ifdef CONFIG_SMP
566 if (cpu >= nr_cpu_ids)
567 return BAD_APICID;
568 return (int)cpu_2_logical_apicid[cpu];
569#else
570 return logical_smp_processor_id();
571#endif
572}
573
574static physid_mask_t es7000_ioapic_phys_id_map(physid_mask_t phys_map)
575{
576 /* For clustered we don't have a good way to do this yet - hack */
577 return physids_promote(0xff);
578}
579
580static int es7000_check_phys_apicid_present(int cpu_physical_apicid)
581{
582 boot_cpu_physical_apicid = read_apic_id();
583 return (1);
584}
585
586static unsigned int
587es7000_cpu_mask_to_apicid_cluster(const struct cpumask *cpumask)
588{
589 int cpus_found = 0;
590 int num_bits_set;
591 int apicid;
592 int cpu;
593
594 num_bits_set = cpumask_weight(cpumask);
595 /* Return id to all */
596 if (num_bits_set == nr_cpu_ids)
597 return 0xFF;
598 /*
599 * The cpus in the mask must all be on the apic cluster. If are not
600 * on the same apicid cluster return default value of target_cpus():
601 */
602 cpu = cpumask_first(cpumask);
603 apicid = es7000_cpu_to_logical_apicid(cpu);
604
605 while (cpus_found < num_bits_set) {
606 if (cpumask_test_cpu(cpu, cpumask)) {
607 int new_apicid = es7000_cpu_to_logical_apicid(cpu);
608
609 if (apicid_cluster(apicid) !=
610 apicid_cluster(new_apicid)) {
611 printk ("%s: Not a valid mask!\n", __func__);
612
613 return 0xFF;
614 }
615 apicid = new_apicid;
616 cpus_found++;
617 }
618 cpu++;
377 } 619 }
620 return apicid;
378} 621}
622
623static unsigned int es7000_cpu_mask_to_apicid(const cpumask_t *cpumask)
624{
625 int cpus_found = 0;
626 int num_bits_set;
627 int apicid;
628 int cpu;
629
630 num_bits_set = cpus_weight(*cpumask);
631 /* Return id to all */
632 if (num_bits_set == nr_cpu_ids)
633 return es7000_cpu_to_logical_apicid(0);
634 /*
635 * The cpus in the mask must all be on the apic cluster. If are not
636 * on the same apicid cluster return default value of target_cpus():
637 */
638 cpu = first_cpu(*cpumask);
639 apicid = es7000_cpu_to_logical_apicid(cpu);
640 while (cpus_found < num_bits_set) {
641 if (cpu_isset(cpu, *cpumask)) {
642 int new_apicid = es7000_cpu_to_logical_apicid(cpu);
643
644 if (apicid_cluster(apicid) !=
645 apicid_cluster(new_apicid)) {
646 printk ("%s: Not a valid mask!\n", __func__);
647
648 return es7000_cpu_to_logical_apicid(0);
649 }
650 apicid = new_apicid;
651 cpus_found++;
652 }
653 cpu++;
654 }
655 return apicid;
656}
657
658static unsigned int
659es7000_cpu_mask_to_apicid_and(const struct cpumask *inmask,
660 const struct cpumask *andmask)
661{
662 int apicid = es7000_cpu_to_logical_apicid(0);
663 cpumask_var_t cpumask;
664
665 if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
666 return apicid;
667
668 cpumask_and(cpumask, inmask, andmask);
669 cpumask_and(cpumask, cpumask, cpu_online_mask);
670 apicid = es7000_cpu_mask_to_apicid(cpumask);
671
672 free_cpumask_var(cpumask);
673
674 return apicid;
675}
676
677static int es7000_phys_pkg_id(int cpuid_apic, int index_msb)
678{
679 return cpuid_apic >> index_msb;
680}
681
682void __init es7000_update_genapic_to_cluster(void)
683{
684 apic->target_cpus = target_cpus_cluster;
685 apic->irq_delivery_mode = INT_DELIVERY_MODE_CLUSTER;
686 apic->irq_dest_mode = INT_DEST_MODE_CLUSTER;
687
688 apic->init_apic_ldr = es7000_init_apic_ldr_cluster;
689
690 apic->cpu_mask_to_apicid = es7000_cpu_mask_to_apicid_cluster;
691}
692
693static int probe_es7000(void)
694{
695 /* probed later in mptable/ACPI hooks */
696 return 0;
697}
698
699static __init int
700es7000_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
701{
702 if (mpc->oemptr) {
703 struct mpc_oemtable *oem_table =
704 (struct mpc_oemtable *)mpc->oemptr;
705
706 if (!strncmp(oem, "UNISYS", 6))
707 return parse_unisys_oem((char *)oem_table);
708 }
709 return 0;
710}
711
712#ifdef CONFIG_ACPI
713/* Hook from generic ACPI tables.c */
714static int __init es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
715{
716 unsigned long oem_addr = 0;
717 int check_dsdt;
718 int ret = 0;
719
720 /* check dsdt at first to avoid clear fix_map for oem_addr */
721 check_dsdt = es7000_check_dsdt();
722
723 if (!find_unisys_acpi_oem_table(&oem_addr)) {
724 if (check_dsdt)
725 ret = parse_unisys_oem((char *)oem_addr);
726 else {
727 setup_unisys();
728 ret = 1;
729 }
730 /*
731 * we need to unmap it
732 */
733 unmap_unisys_acpi_oem_table(oem_addr);
734 }
735 return ret;
736}
737#else
738static int __init es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
739{
740 return 0;
741}
742#endif
743
744
745struct genapic apic_es7000 = {
746
747 .name = "es7000",
748 .probe = probe_es7000,
749 .acpi_madt_oem_check = es7000_acpi_madt_oem_check,
750 .apic_id_registered = es7000_apic_id_registered,
751
752 .irq_delivery_mode = dest_Fixed,
753 /* phys delivery to target CPUs: */
754 .irq_dest_mode = 0,
755
756 .target_cpus = es7000_target_cpus,
757 .disable_esr = 1,
758 .dest_logical = 0,
759 .check_apicid_used = es7000_check_apicid_used,
760 .check_apicid_present = es7000_check_apicid_present,
761
762 .vector_allocation_domain = es7000_vector_allocation_domain,
763 .init_apic_ldr = es7000_init_apic_ldr,
764
765 .ioapic_phys_id_map = es7000_ioapic_phys_id_map,
766 .setup_apic_routing = es7000_setup_apic_routing,
767 .multi_timer_check = NULL,
768 .apicid_to_node = es7000_apicid_to_node,
769 .cpu_to_logical_apicid = es7000_cpu_to_logical_apicid,
770 .cpu_present_to_apicid = es7000_cpu_present_to_apicid,
771 .apicid_to_cpu_present = es7000_apicid_to_cpu_present,
772 .setup_portio_remap = NULL,
773 .check_phys_apicid_present = es7000_check_phys_apicid_present,
774 .enable_apic_mode = es7000_enable_apic_mode,
775 .phys_pkg_id = es7000_phys_pkg_id,
776 .mps_oem_check = es7000_mps_oem_check,
777
778 .get_apic_id = es7000_get_apic_id,
779 .set_apic_id = NULL,
780 .apic_id_mask = 0xFF << 24,
781
782 .cpu_mask_to_apicid = es7000_cpu_mask_to_apicid,
783 .cpu_mask_to_apicid_and = es7000_cpu_mask_to_apicid_and,
784
785 .send_IPI_mask = es7000_send_IPI_mask,
786 .send_IPI_mask_allbutself = NULL,
787 .send_IPI_allbutself = es7000_send_IPI_allbutself,
788 .send_IPI_all = es7000_send_IPI_all,
789 .send_IPI_self = default_send_IPI_self,
790
791 .wakeup_cpu = NULL,
792
793 .trampoline_phys_low = 0x467,
794 .trampoline_phys_high = 0x469,
795
796 .wait_for_init_deassert = es7000_wait_for_init_deassert,
797
798 /* Nothing to do for most platforms, since cleared by the INIT cycle: */
799 .smp_callin_clear_local_apic = NULL,
800 .store_NMI_vector = NULL,
801 .inquire_remote_apic = default_inquire_remote_apic,
802};
diff --git a/arch/x86/kernel/genapic_64.c b/arch/x86/kernel/genapic_64.c
index 2bced78b0b8e..820dea5d0ebe 100644
--- a/arch/x86/kernel/genapic_64.c
+++ b/arch/x86/kernel/genapic_64.c
@@ -29,10 +29,12 @@ extern struct genapic apic_x2xpic_uv_x;
29extern struct genapic apic_x2apic_phys; 29extern struct genapic apic_x2apic_phys;
30extern struct genapic apic_x2apic_cluster; 30extern struct genapic apic_x2apic_cluster;
31 31
32struct genapic __read_mostly *genapic = &apic_flat; 32struct genapic __read_mostly *apic = &apic_flat;
33 33
34static struct genapic *apic_probe[] __initdata = { 34static struct genapic *apic_probe[] __initdata = {
35#ifdef CONFIG_X86_UV
35 &apic_x2apic_uv_x, 36 &apic_x2apic_uv_x,
37#endif
36 &apic_x2apic_phys, 38 &apic_x2apic_phys,
37 &apic_x2apic_cluster, 39 &apic_x2apic_cluster,
38 &apic_physflat, 40 &apic_physflat,
@@ -42,17 +44,17 @@ static struct genapic *apic_probe[] __initdata = {
42/* 44/*
43 * Check the APIC IDs in bios_cpu_apicid and choose the APIC mode. 45 * Check the APIC IDs in bios_cpu_apicid and choose the APIC mode.
44 */ 46 */
45void __init setup_apic_routing(void) 47void __init default_setup_apic_routing(void)
46{ 48{
47 if (genapic == &apic_x2apic_phys || genapic == &apic_x2apic_cluster) { 49 if (apic == &apic_x2apic_phys || apic == &apic_x2apic_cluster) {
48 if (!intr_remapping_enabled) 50 if (!intr_remapping_enabled)
49 genapic = &apic_flat; 51 apic = &apic_flat;
50 } 52 }
51 53
52 if (genapic == &apic_flat) { 54 if (apic == &apic_flat) {
53 if (max_physical_apicid >= 8) 55 if (max_physical_apicid >= 8)
54 genapic = &apic_physflat; 56 apic = &apic_physflat;
55 printk(KERN_INFO "Setting APIC routing to %s\n", genapic->name); 57 printk(KERN_INFO "Setting APIC routing to %s\n", apic->name);
56 } 58 }
57 59
58 if (x86_quirks->update_genapic) 60 if (x86_quirks->update_genapic)
@@ -63,18 +65,18 @@ void __init setup_apic_routing(void)
63 65
64void apic_send_IPI_self(int vector) 66void apic_send_IPI_self(int vector)
65{ 67{
66 __send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL); 68 __default_send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL);
67} 69}
68 70
69int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id) 71int __init default_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
70{ 72{
71 int i; 73 int i;
72 74
73 for (i = 0; apic_probe[i]; ++i) { 75 for (i = 0; apic_probe[i]; ++i) {
74 if (apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id)) { 76 if (apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id)) {
75 genapic = apic_probe[i]; 77 apic = apic_probe[i];
76 printk(KERN_INFO "Setting APIC routing to %s.\n", 78 printk(KERN_INFO "Setting APIC routing to %s.\n",
77 genapic->name); 79 apic->name);
78 return 1; 80 return 1;
79 } 81 }
80 } 82 }
diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c
index 34185488e4fb..249d2d3c034c 100644
--- a/arch/x86/kernel/genapic_flat_64.c
+++ b/arch/x86/kernel/genapic_flat_64.c
@@ -19,7 +19,6 @@
19#include <asm/smp.h> 19#include <asm/smp.h>
20#include <asm/ipi.h> 20#include <asm/ipi.h>
21#include <asm/genapic.h> 21#include <asm/genapic.h>
22#include <mach_apicdef.h>
23 22
24#ifdef CONFIG_ACPI 23#ifdef CONFIG_ACPI
25#include <acpi/acpi_bus.h> 24#include <acpi/acpi_bus.h>
@@ -74,7 +73,7 @@ static inline void _flat_send_IPI_mask(unsigned long mask, int vector)
74 unsigned long flags; 73 unsigned long flags;
75 74
76 local_irq_save(flags); 75 local_irq_save(flags);
77 __send_IPI_dest_field(mask, vector, APIC_DEST_LOGICAL); 76 __default_send_IPI_dest_field(mask, vector, apic->dest_logical);
78 local_irq_restore(flags); 77 local_irq_restore(flags);
79} 78}
80 79
@@ -85,14 +84,15 @@ static void flat_send_IPI_mask(const struct cpumask *cpumask, int vector)
85 _flat_send_IPI_mask(mask, vector); 84 _flat_send_IPI_mask(mask, vector);
86} 85}
87 86
88static void flat_send_IPI_mask_allbutself(const struct cpumask *cpumask, 87static void
89 int vector) 88 flat_send_IPI_mask_allbutself(const struct cpumask *cpumask, int vector)
90{ 89{
91 unsigned long mask = cpumask_bits(cpumask)[0]; 90 unsigned long mask = cpumask_bits(cpumask)[0];
92 int cpu = smp_processor_id(); 91 int cpu = smp_processor_id();
93 92
94 if (cpu < BITS_PER_LONG) 93 if (cpu < BITS_PER_LONG)
95 clear_bit(cpu, &mask); 94 clear_bit(cpu, &mask);
95
96 _flat_send_IPI_mask(mask, vector); 96 _flat_send_IPI_mask(mask, vector);
97} 97}
98 98
@@ -114,23 +114,27 @@ static void flat_send_IPI_allbutself(int vector)
114 _flat_send_IPI_mask(mask, vector); 114 _flat_send_IPI_mask(mask, vector);
115 } 115 }
116 } else if (num_online_cpus() > 1) { 116 } else if (num_online_cpus() > 1) {
117 __send_IPI_shortcut(APIC_DEST_ALLBUT, vector,APIC_DEST_LOGICAL); 117 __default_send_IPI_shortcut(APIC_DEST_ALLBUT,
118 vector, apic->dest_logical);
118 } 119 }
119} 120}
120 121
121static void flat_send_IPI_all(int vector) 122static void flat_send_IPI_all(int vector)
122{ 123{
123 if (vector == NMI_VECTOR) 124 if (vector == NMI_VECTOR) {
124 flat_send_IPI_mask(cpu_online_mask, vector); 125 flat_send_IPI_mask(cpu_online_mask, vector);
125 else 126 } else {
126 __send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL); 127 __default_send_IPI_shortcut(APIC_DEST_ALLINC,
128 vector, apic->dest_logical);
129 }
127} 130}
128 131
129static unsigned int get_apic_id(unsigned long x) 132static unsigned int flat_get_apic_id(unsigned long x)
130{ 133{
131 unsigned int id; 134 unsigned int id;
132 135
133 id = (((x)>>24) & 0xFFu); 136 id = (((x)>>24) & 0xFFu);
137
134 return id; 138 return id;
135} 139}
136 140
@@ -146,7 +150,7 @@ static unsigned int read_xapic_id(void)
146{ 150{
147 unsigned int id; 151 unsigned int id;
148 152
149 id = get_apic_id(apic_read(APIC_ID)); 153 id = flat_get_apic_id(apic_read(APIC_ID));
150 return id; 154 return id;
151} 155}
152 156
@@ -169,31 +173,62 @@ static unsigned int flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
169 return mask1 & mask2; 173 return mask1 & mask2;
170} 174}
171 175
172static unsigned int phys_pkg_id(int index_msb) 176static int flat_phys_pkg_id(int initial_apic_id, int index_msb)
173{ 177{
174 return hard_smp_processor_id() >> index_msb; 178 return hard_smp_processor_id() >> index_msb;
175} 179}
176 180
177struct genapic apic_flat = { 181struct genapic apic_flat = {
178 .name = "flat", 182 .name = "flat",
179 .acpi_madt_oem_check = flat_acpi_madt_oem_check, 183 .probe = NULL,
180 .int_delivery_mode = dest_LowestPrio, 184 .acpi_madt_oem_check = flat_acpi_madt_oem_check,
181 .int_dest_mode = (APIC_DEST_LOGICAL != 0), 185 .apic_id_registered = flat_apic_id_registered,
182 .target_cpus = flat_target_cpus, 186
183 .vector_allocation_domain = flat_vector_allocation_domain, 187 .irq_delivery_mode = dest_LowestPrio,
184 .apic_id_registered = flat_apic_id_registered, 188 .irq_dest_mode = 1, /* logical */
185 .init_apic_ldr = flat_init_apic_ldr, 189
186 .send_IPI_all = flat_send_IPI_all, 190 .target_cpus = flat_target_cpus,
187 .send_IPI_allbutself = flat_send_IPI_allbutself, 191 .disable_esr = 0,
188 .send_IPI_mask = flat_send_IPI_mask, 192 .dest_logical = APIC_DEST_LOGICAL,
189 .send_IPI_mask_allbutself = flat_send_IPI_mask_allbutself, 193 .check_apicid_used = NULL,
190 .send_IPI_self = apic_send_IPI_self, 194 .check_apicid_present = NULL,
191 .cpu_mask_to_apicid = flat_cpu_mask_to_apicid, 195
192 .cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and, 196 .vector_allocation_domain = flat_vector_allocation_domain,
193 .phys_pkg_id = phys_pkg_id, 197 .init_apic_ldr = flat_init_apic_ldr,
194 .get_apic_id = get_apic_id, 198
195 .set_apic_id = set_apic_id, 199 .ioapic_phys_id_map = NULL,
196 .apic_id_mask = (0xFFu<<24), 200 .setup_apic_routing = NULL,
201 .multi_timer_check = NULL,
202 .apicid_to_node = NULL,
203 .cpu_to_logical_apicid = NULL,
204 .cpu_present_to_apicid = default_cpu_present_to_apicid,
205 .apicid_to_cpu_present = NULL,
206 .setup_portio_remap = NULL,
207 .check_phys_apicid_present = default_check_phys_apicid_present,
208 .enable_apic_mode = NULL,
209 .phys_pkg_id = flat_phys_pkg_id,
210 .mps_oem_check = NULL,
211
212 .get_apic_id = flat_get_apic_id,
213 .set_apic_id = set_apic_id,
214 .apic_id_mask = 0xFFu << 24,
215
216 .cpu_mask_to_apicid = flat_cpu_mask_to_apicid,
217 .cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and,
218
219 .send_IPI_mask = flat_send_IPI_mask,
220 .send_IPI_mask_allbutself = flat_send_IPI_mask_allbutself,
221 .send_IPI_allbutself = flat_send_IPI_allbutself,
222 .send_IPI_all = flat_send_IPI_all,
223 .send_IPI_self = apic_send_IPI_self,
224
225 .wakeup_cpu = NULL,
226 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
227 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
228 .wait_for_init_deassert = NULL,
229 .smp_callin_clear_local_apic = NULL,
230 .store_NMI_vector = NULL,
231 .inquire_remote_apic = NULL,
197}; 232};
198 233
199/* 234/*
@@ -232,18 +267,18 @@ static void physflat_vector_allocation_domain(int cpu, struct cpumask *retmask)
232 267
233static void physflat_send_IPI_mask(const struct cpumask *cpumask, int vector) 268static void physflat_send_IPI_mask(const struct cpumask *cpumask, int vector)
234{ 269{
235 send_IPI_mask_sequence(cpumask, vector); 270 default_send_IPI_mask_sequence_phys(cpumask, vector);
236} 271}
237 272
238static void physflat_send_IPI_mask_allbutself(const struct cpumask *cpumask, 273static void physflat_send_IPI_mask_allbutself(const struct cpumask *cpumask,
239 int vector) 274 int vector)
240{ 275{
241 send_IPI_mask_allbutself(cpumask, vector); 276 default_send_IPI_mask_allbutself_phys(cpumask, vector);
242} 277}
243 278
244static void physflat_send_IPI_allbutself(int vector) 279static void physflat_send_IPI_allbutself(int vector)
245{ 280{
246 send_IPI_mask_allbutself(cpu_online_mask, vector); 281 default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector);
247} 282}
248 283
249static void physflat_send_IPI_all(int vector) 284static void physflat_send_IPI_all(int vector)
@@ -276,32 +311,67 @@ physflat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
276 * We're using fixed IRQ delivery, can only return one phys APIC ID. 311 * We're using fixed IRQ delivery, can only return one phys APIC ID.
277 * May as well be the first. 312 * May as well be the first.
278 */ 313 */
279 for_each_cpu_and(cpu, cpumask, andmask) 314 for_each_cpu_and(cpu, cpumask, andmask) {
280 if (cpumask_test_cpu(cpu, cpu_online_mask)) 315 if (cpumask_test_cpu(cpu, cpu_online_mask))
281 break; 316 break;
317 }
282 if (cpu < nr_cpu_ids) 318 if (cpu < nr_cpu_ids)
283 return per_cpu(x86_cpu_to_apicid, cpu); 319 return per_cpu(x86_cpu_to_apicid, cpu);
320
284 return BAD_APICID; 321 return BAD_APICID;
285} 322}
286 323
287struct genapic apic_physflat = { 324struct genapic apic_physflat = {
288 .name = "physical flat", 325
289 .acpi_madt_oem_check = physflat_acpi_madt_oem_check, 326 .name = "physical flat",
290 .int_delivery_mode = dest_Fixed, 327 .probe = NULL,
291 .int_dest_mode = (APIC_DEST_PHYSICAL != 0), 328 .acpi_madt_oem_check = physflat_acpi_madt_oem_check,
292 .target_cpus = physflat_target_cpus, 329 .apic_id_registered = flat_apic_id_registered,
293 .vector_allocation_domain = physflat_vector_allocation_domain, 330
294 .apic_id_registered = flat_apic_id_registered, 331 .irq_delivery_mode = dest_Fixed,
295 .init_apic_ldr = flat_init_apic_ldr,/*not needed, but shouldn't hurt*/ 332 .irq_dest_mode = 0, /* physical */
296 .send_IPI_all = physflat_send_IPI_all, 333
297 .send_IPI_allbutself = physflat_send_IPI_allbutself, 334 .target_cpus = physflat_target_cpus,
298 .send_IPI_mask = physflat_send_IPI_mask, 335 .disable_esr = 0,
299 .send_IPI_mask_allbutself = physflat_send_IPI_mask_allbutself, 336 .dest_logical = 0,
300 .send_IPI_self = apic_send_IPI_self, 337 .check_apicid_used = NULL,
301 .cpu_mask_to_apicid = physflat_cpu_mask_to_apicid, 338 .check_apicid_present = NULL,
302 .cpu_mask_to_apicid_and = physflat_cpu_mask_to_apicid_and, 339
303 .phys_pkg_id = phys_pkg_id, 340 .vector_allocation_domain = physflat_vector_allocation_domain,
304 .get_apic_id = get_apic_id, 341 /* not needed, but shouldn't hurt: */
305 .set_apic_id = set_apic_id, 342 .init_apic_ldr = flat_init_apic_ldr,
306 .apic_id_mask = (0xFFu<<24), 343
344 .ioapic_phys_id_map = NULL,
345 .setup_apic_routing = NULL,
346 .multi_timer_check = NULL,
347 .apicid_to_node = NULL,
348 .cpu_to_logical_apicid = NULL,
349 .cpu_present_to_apicid = default_cpu_present_to_apicid,
350 .apicid_to_cpu_present = NULL,
351 .setup_portio_remap = NULL,
352 .check_phys_apicid_present = default_check_phys_apicid_present,
353 .enable_apic_mode = NULL,
354 .phys_pkg_id = flat_phys_pkg_id,
355 .mps_oem_check = NULL,
356
357 .get_apic_id = flat_get_apic_id,
358 .set_apic_id = set_apic_id,
359 .apic_id_mask = 0xFFu << 24,
360
361 .cpu_mask_to_apicid = physflat_cpu_mask_to_apicid,
362 .cpu_mask_to_apicid_and = physflat_cpu_mask_to_apicid_and,
363
364 .send_IPI_mask = physflat_send_IPI_mask,
365 .send_IPI_mask_allbutself = physflat_send_IPI_mask_allbutself,
366 .send_IPI_allbutself = physflat_send_IPI_allbutself,
367 .send_IPI_all = physflat_send_IPI_all,
368 .send_IPI_self = apic_send_IPI_self,
369
370 .wakeup_cpu = NULL,
371 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
372 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
373 .wait_for_init_deassert = NULL,
374 .smp_callin_clear_local_apic = NULL,
375 .store_NMI_vector = NULL,
376 .inquire_remote_apic = NULL,
307}; 377};
diff --git a/arch/x86/kernel/genx2apic_cluster.c b/arch/x86/kernel/genx2apic_cluster.c
index 6ce497cc372d..7c87156b6411 100644
--- a/arch/x86/kernel/genx2apic_cluster.c
+++ b/arch/x86/kernel/genx2apic_cluster.c
@@ -36,8 +36,8 @@ static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask)
36 cpumask_set_cpu(cpu, retmask); 36 cpumask_set_cpu(cpu, retmask);
37} 37}
38 38
39static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, 39static void
40 unsigned int dest) 40 __x2apic_send_IPI_dest(unsigned int apicid, int vector, unsigned int dest)
41{ 41{
42 unsigned long cfg; 42 unsigned long cfg;
43 43
@@ -57,45 +57,50 @@ static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
57 */ 57 */
58static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector) 58static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
59{ 59{
60 unsigned long flags;
61 unsigned long query_cpu; 60 unsigned long query_cpu;
61 unsigned long flags;
62 62
63 local_irq_save(flags); 63 local_irq_save(flags);
64 for_each_cpu(query_cpu, mask) 64 for_each_cpu(query_cpu, mask) {
65 __x2apic_send_IPI_dest( 65 __x2apic_send_IPI_dest(
66 per_cpu(x86_cpu_to_logical_apicid, query_cpu), 66 per_cpu(x86_cpu_to_logical_apicid, query_cpu),
67 vector, APIC_DEST_LOGICAL); 67 vector, apic->dest_logical);
68 }
68 local_irq_restore(flags); 69 local_irq_restore(flags);
69} 70}
70 71
71static void x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, 72static void
72 int vector) 73 x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
73{ 74{
74 unsigned long flags;
75 unsigned long query_cpu;
76 unsigned long this_cpu = smp_processor_id(); 75 unsigned long this_cpu = smp_processor_id();
76 unsigned long query_cpu;
77 unsigned long flags;
77 78
78 local_irq_save(flags); 79 local_irq_save(flags);
79 for_each_cpu(query_cpu, mask) 80 for_each_cpu(query_cpu, mask) {
80 if (query_cpu != this_cpu) 81 if (query_cpu == this_cpu)
81 __x2apic_send_IPI_dest( 82 continue;
83 __x2apic_send_IPI_dest(
82 per_cpu(x86_cpu_to_logical_apicid, query_cpu), 84 per_cpu(x86_cpu_to_logical_apicid, query_cpu),
83 vector, APIC_DEST_LOGICAL); 85 vector, apic->dest_logical);
86 }
84 local_irq_restore(flags); 87 local_irq_restore(flags);
85} 88}
86 89
87static void x2apic_send_IPI_allbutself(int vector) 90static void x2apic_send_IPI_allbutself(int vector)
88{ 91{
89 unsigned long flags;
90 unsigned long query_cpu;
91 unsigned long this_cpu = smp_processor_id(); 92 unsigned long this_cpu = smp_processor_id();
93 unsigned long query_cpu;
94 unsigned long flags;
92 95
93 local_irq_save(flags); 96 local_irq_save(flags);
94 for_each_online_cpu(query_cpu) 97 for_each_online_cpu(query_cpu) {
95 if (query_cpu != this_cpu) 98 if (query_cpu == this_cpu)
96 __x2apic_send_IPI_dest( 99 continue;
100 __x2apic_send_IPI_dest(
97 per_cpu(x86_cpu_to_logical_apicid, query_cpu), 101 per_cpu(x86_cpu_to_logical_apicid, query_cpu),
98 vector, APIC_DEST_LOGICAL); 102 vector, apic->dest_logical);
103 }
99 local_irq_restore(flags); 104 local_irq_restore(flags);
100} 105}
101 106
@@ -111,21 +116,21 @@ static int x2apic_apic_id_registered(void)
111 116
112static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask) 117static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask)
113{ 118{
114 int cpu;
115
116 /* 119 /*
117 * We're using fixed IRQ delivery, can only return one logical APIC ID. 120 * We're using fixed IRQ delivery, can only return one logical APIC ID.
118 * May as well be the first. 121 * May as well be the first.
119 */ 122 */
120 cpu = cpumask_first(cpumask); 123 int cpu = cpumask_first(cpumask);
124
121 if ((unsigned)cpu < nr_cpu_ids) 125 if ((unsigned)cpu < nr_cpu_ids)
122 return per_cpu(x86_cpu_to_logical_apicid, cpu); 126 return per_cpu(x86_cpu_to_logical_apicid, cpu);
123 else 127 else
124 return BAD_APICID; 128 return BAD_APICID;
125} 129}
126 130
127static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask, 131static unsigned int
128 const struct cpumask *andmask) 132x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
133 const struct cpumask *andmask)
129{ 134{
130 int cpu; 135 int cpu;
131 136
@@ -133,15 +138,18 @@ static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
133 * We're using fixed IRQ delivery, can only return one logical APIC ID. 138 * We're using fixed IRQ delivery, can only return one logical APIC ID.
134 * May as well be the first. 139 * May as well be the first.
135 */ 140 */
136 for_each_cpu_and(cpu, cpumask, andmask) 141 for_each_cpu_and(cpu, cpumask, andmask) {
137 if (cpumask_test_cpu(cpu, cpu_online_mask)) 142 if (cpumask_test_cpu(cpu, cpu_online_mask))
138 break; 143 break;
144 }
145
139 if (cpu < nr_cpu_ids) 146 if (cpu < nr_cpu_ids)
140 return per_cpu(x86_cpu_to_logical_apicid, cpu); 147 return per_cpu(x86_cpu_to_logical_apicid, cpu);
148
141 return BAD_APICID; 149 return BAD_APICID;
142} 150}
143 151
144static unsigned int get_apic_id(unsigned long x) 152static unsigned int x2apic_cluster_phys_get_apic_id(unsigned long x)
145{ 153{
146 unsigned int id; 154 unsigned int id;
147 155
@@ -157,7 +165,7 @@ static unsigned long set_apic_id(unsigned int id)
157 return x; 165 return x;
158} 166}
159 167
160static unsigned int phys_pkg_id(int index_msb) 168static int x2apic_cluster_phys_pkg_id(int initial_apicid, int index_msb)
161{ 169{
162 return current_cpu_data.initial_apicid >> index_msb; 170 return current_cpu_data.initial_apicid >> index_msb;
163} 171}
@@ -172,27 +180,58 @@ static void init_x2apic_ldr(void)
172 int cpu = smp_processor_id(); 180 int cpu = smp_processor_id();
173 181
174 per_cpu(x86_cpu_to_logical_apicid, cpu) = apic_read(APIC_LDR); 182 per_cpu(x86_cpu_to_logical_apicid, cpu) = apic_read(APIC_LDR);
175 return;
176} 183}
177 184
178struct genapic apic_x2apic_cluster = { 185struct genapic apic_x2apic_cluster = {
179 .name = "cluster x2apic", 186
180 .acpi_madt_oem_check = x2apic_acpi_madt_oem_check, 187 .name = "cluster x2apic",
181 .int_delivery_mode = dest_LowestPrio, 188 .probe = NULL,
182 .int_dest_mode = (APIC_DEST_LOGICAL != 0), 189 .acpi_madt_oem_check = x2apic_acpi_madt_oem_check,
183 .target_cpus = x2apic_target_cpus, 190 .apic_id_registered = x2apic_apic_id_registered,
184 .vector_allocation_domain = x2apic_vector_allocation_domain, 191
185 .apic_id_registered = x2apic_apic_id_registered, 192 .irq_delivery_mode = dest_LowestPrio,
186 .init_apic_ldr = init_x2apic_ldr, 193 .irq_dest_mode = 1, /* logical */
187 .send_IPI_all = x2apic_send_IPI_all, 194
188 .send_IPI_allbutself = x2apic_send_IPI_allbutself, 195 .target_cpus = x2apic_target_cpus,
189 .send_IPI_mask = x2apic_send_IPI_mask, 196 .disable_esr = 0,
190 .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself, 197 .dest_logical = APIC_DEST_LOGICAL,
191 .send_IPI_self = x2apic_send_IPI_self, 198 .check_apicid_used = NULL,
192 .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid, 199 .check_apicid_present = NULL,
193 .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and, 200
194 .phys_pkg_id = phys_pkg_id, 201 .vector_allocation_domain = x2apic_vector_allocation_domain,
195 .get_apic_id = get_apic_id, 202 .init_apic_ldr = init_x2apic_ldr,
196 .set_apic_id = set_apic_id, 203
197 .apic_id_mask = (0xFFFFFFFFu), 204 .ioapic_phys_id_map = NULL,
205 .setup_apic_routing = NULL,
206 .multi_timer_check = NULL,
207 .apicid_to_node = NULL,
208 .cpu_to_logical_apicid = NULL,
209 .cpu_present_to_apicid = default_cpu_present_to_apicid,
210 .apicid_to_cpu_present = NULL,
211 .setup_portio_remap = NULL,
212 .check_phys_apicid_present = default_check_phys_apicid_present,
213 .enable_apic_mode = NULL,
214 .phys_pkg_id = x2apic_cluster_phys_pkg_id,
215 .mps_oem_check = NULL,
216
217 .get_apic_id = x2apic_cluster_phys_get_apic_id,
218 .set_apic_id = set_apic_id,
219 .apic_id_mask = 0xFFFFFFFFu,
220
221 .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid,
222 .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and,
223
224 .send_IPI_mask = x2apic_send_IPI_mask,
225 .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself,
226 .send_IPI_allbutself = x2apic_send_IPI_allbutself,
227 .send_IPI_all = x2apic_send_IPI_all,
228 .send_IPI_self = x2apic_send_IPI_self,
229
230 .wakeup_cpu = NULL,
231 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
232 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
233 .wait_for_init_deassert = NULL,
234 .smp_callin_clear_local_apic = NULL,
235 .store_NMI_vector = NULL,
236 .inquire_remote_apic = NULL,
198}; 237};
diff --git a/arch/x86/kernel/genx2apic_phys.c b/arch/x86/kernel/genx2apic_phys.c
index 21bcc0e098ba..5cbae8aa0408 100644
--- a/arch/x86/kernel/genx2apic_phys.c
+++ b/arch/x86/kernel/genx2apic_phys.c
@@ -55,8 +55,8 @@ static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
55 55
56static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector) 56static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
57{ 57{
58 unsigned long flags;
59 unsigned long query_cpu; 58 unsigned long query_cpu;
59 unsigned long flags;
60 60
61 local_irq_save(flags); 61 local_irq_save(flags);
62 for_each_cpu(query_cpu, mask) { 62 for_each_cpu(query_cpu, mask) {
@@ -66,12 +66,12 @@ static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
66 local_irq_restore(flags); 66 local_irq_restore(flags);
67} 67}
68 68
69static void x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, 69static void
70 int vector) 70 x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
71{ 71{
72 unsigned long flags;
73 unsigned long query_cpu;
74 unsigned long this_cpu = smp_processor_id(); 72 unsigned long this_cpu = smp_processor_id();
73 unsigned long query_cpu;
74 unsigned long flags;
75 75
76 local_irq_save(flags); 76 local_irq_save(flags);
77 for_each_cpu(query_cpu, mask) { 77 for_each_cpu(query_cpu, mask) {
@@ -85,16 +85,17 @@ static void x2apic_send_IPI_mask_allbutself(const struct cpumask *mask,
85 85
86static void x2apic_send_IPI_allbutself(int vector) 86static void x2apic_send_IPI_allbutself(int vector)
87{ 87{
88 unsigned long flags;
89 unsigned long query_cpu;
90 unsigned long this_cpu = smp_processor_id(); 88 unsigned long this_cpu = smp_processor_id();
89 unsigned long query_cpu;
90 unsigned long flags;
91 91
92 local_irq_save(flags); 92 local_irq_save(flags);
93 for_each_online_cpu(query_cpu) 93 for_each_online_cpu(query_cpu) {
94 if (query_cpu != this_cpu) 94 if (query_cpu == this_cpu)
95 __x2apic_send_IPI_dest( 95 continue;
96 per_cpu(x86_cpu_to_apicid, query_cpu), 96 __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu),
97 vector, APIC_DEST_PHYSICAL); 97 vector, APIC_DEST_PHYSICAL);
98 }
98 local_irq_restore(flags); 99 local_irq_restore(flags);
99} 100}
100 101
@@ -110,21 +111,21 @@ static int x2apic_apic_id_registered(void)
110 111
111static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask) 112static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask)
112{ 113{
113 int cpu;
114
115 /* 114 /*
116 * We're using fixed IRQ delivery, can only return one phys APIC ID. 115 * We're using fixed IRQ delivery, can only return one phys APIC ID.
117 * May as well be the first. 116 * May as well be the first.
118 */ 117 */
119 cpu = cpumask_first(cpumask); 118 int cpu = cpumask_first(cpumask);
119
120 if ((unsigned)cpu < nr_cpu_ids) 120 if ((unsigned)cpu < nr_cpu_ids)
121 return per_cpu(x86_cpu_to_apicid, cpu); 121 return per_cpu(x86_cpu_to_apicid, cpu);
122 else 122 else
123 return BAD_APICID; 123 return BAD_APICID;
124} 124}
125 125
126static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask, 126static unsigned int
127 const struct cpumask *andmask) 127x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
128 const struct cpumask *andmask)
128{ 129{
129 int cpu; 130 int cpu;
130 131
@@ -132,31 +133,28 @@ static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
132 * We're using fixed IRQ delivery, can only return one phys APIC ID. 133 * We're using fixed IRQ delivery, can only return one phys APIC ID.
133 * May as well be the first. 134 * May as well be the first.
134 */ 135 */
135 for_each_cpu_and(cpu, cpumask, andmask) 136 for_each_cpu_and(cpu, cpumask, andmask) {
136 if (cpumask_test_cpu(cpu, cpu_online_mask)) 137 if (cpumask_test_cpu(cpu, cpu_online_mask))
137 break; 138 break;
139 }
140
138 if (cpu < nr_cpu_ids) 141 if (cpu < nr_cpu_ids)
139 return per_cpu(x86_cpu_to_apicid, cpu); 142 return per_cpu(x86_cpu_to_apicid, cpu);
143
140 return BAD_APICID; 144 return BAD_APICID;
141} 145}
142 146
143static unsigned int get_apic_id(unsigned long x) 147static unsigned int x2apic_phys_get_apic_id(unsigned long x)
144{ 148{
145 unsigned int id; 149 return x;
146
147 id = x;
148 return id;
149} 150}
150 151
151static unsigned long set_apic_id(unsigned int id) 152static unsigned long set_apic_id(unsigned int id)
152{ 153{
153 unsigned long x; 154 return id;
154
155 x = id;
156 return x;
157} 155}
158 156
159static unsigned int phys_pkg_id(int index_msb) 157static int x2apic_phys_pkg_id(int initial_apicid, int index_msb)
160{ 158{
161 return current_cpu_data.initial_apicid >> index_msb; 159 return current_cpu_data.initial_apicid >> index_msb;
162} 160}
@@ -168,27 +166,58 @@ static void x2apic_send_IPI_self(int vector)
168 166
169static void init_x2apic_ldr(void) 167static void init_x2apic_ldr(void)
170{ 168{
171 return;
172} 169}
173 170
174struct genapic apic_x2apic_phys = { 171struct genapic apic_x2apic_phys = {
175 .name = "physical x2apic", 172
176 .acpi_madt_oem_check = x2apic_acpi_madt_oem_check, 173 .name = "physical x2apic",
177 .int_delivery_mode = dest_Fixed, 174 .probe = NULL,
178 .int_dest_mode = (APIC_DEST_PHYSICAL != 0), 175 .acpi_madt_oem_check = x2apic_acpi_madt_oem_check,
179 .target_cpus = x2apic_target_cpus, 176 .apic_id_registered = x2apic_apic_id_registered,
180 .vector_allocation_domain = x2apic_vector_allocation_domain, 177
181 .apic_id_registered = x2apic_apic_id_registered, 178 .irq_delivery_mode = dest_Fixed,
182 .init_apic_ldr = init_x2apic_ldr, 179 .irq_dest_mode = 0, /* physical */
183 .send_IPI_all = x2apic_send_IPI_all, 180
184 .send_IPI_allbutself = x2apic_send_IPI_allbutself, 181 .target_cpus = x2apic_target_cpus,
185 .send_IPI_mask = x2apic_send_IPI_mask, 182 .disable_esr = 0,
186 .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself, 183 .dest_logical = 0,
187 .send_IPI_self = x2apic_send_IPI_self, 184 .check_apicid_used = NULL,
188 .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid, 185 .check_apicid_present = NULL,
189 .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and, 186
190 .phys_pkg_id = phys_pkg_id, 187 .vector_allocation_domain = x2apic_vector_allocation_domain,
191 .get_apic_id = get_apic_id, 188 .init_apic_ldr = init_x2apic_ldr,
192 .set_apic_id = set_apic_id, 189
193 .apic_id_mask = (0xFFFFFFFFu), 190 .ioapic_phys_id_map = NULL,
191 .setup_apic_routing = NULL,
192 .multi_timer_check = NULL,
193 .apicid_to_node = NULL,
194 .cpu_to_logical_apicid = NULL,
195 .cpu_present_to_apicid = default_cpu_present_to_apicid,
196 .apicid_to_cpu_present = NULL,
197 .setup_portio_remap = NULL,
198 .check_phys_apicid_present = default_check_phys_apicid_present,
199 .enable_apic_mode = NULL,
200 .phys_pkg_id = x2apic_phys_pkg_id,
201 .mps_oem_check = NULL,
202
203 .get_apic_id = x2apic_phys_get_apic_id,
204 .set_apic_id = set_apic_id,
205 .apic_id_mask = 0xFFFFFFFFu,
206
207 .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid,
208 .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and,
209
210 .send_IPI_mask = x2apic_send_IPI_mask,
211 .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself,
212 .send_IPI_allbutself = x2apic_send_IPI_allbutself,
213 .send_IPI_all = x2apic_send_IPI_all,
214 .send_IPI_self = x2apic_send_IPI_self,
215
216 .wakeup_cpu = NULL,
217 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
218 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
219 .wait_for_init_deassert = NULL,
220 .smp_callin_clear_local_apic = NULL,
221 .store_NMI_vector = NULL,
222 .inquire_remote_apic = NULL,
194}; 223};
diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c
index b193e082f6ce..6adb5e6f4d92 100644
--- a/arch/x86/kernel/genx2apic_uv_x.c
+++ b/arch/x86/kernel/genx2apic_uv_x.c
@@ -25,6 +25,7 @@
25#include <asm/ipi.h> 25#include <asm/ipi.h>
26#include <asm/genapic.h> 26#include <asm/genapic.h>
27#include <asm/pgtable.h> 27#include <asm/pgtable.h>
28#include <asm/uv/uv.h>
28#include <asm/uv/uv_mmrs.h> 29#include <asm/uv/uv_mmrs.h>
29#include <asm/uv/uv_hub.h> 30#include <asm/uv/uv_hub.h>
30#include <asm/uv/bios.h> 31#include <asm/uv/bios.h>
@@ -117,12 +118,13 @@ static void uv_send_IPI_one(int cpu, int vector)
117 int pnode; 118 int pnode;
118 119
119 apicid = per_cpu(x86_cpu_to_apicid, cpu); 120 apicid = per_cpu(x86_cpu_to_apicid, cpu);
120 lapicid = apicid & 0x3f; /* ZZZ macro needed */ 121 lapicid = apicid & 0x3f; /* ZZZ macro needed */
121 pnode = uv_apicid_to_pnode(apicid); 122 pnode = uv_apicid_to_pnode(apicid);
122 val = 123
123 (1UL << UVH_IPI_INT_SEND_SHFT) | (lapicid << 124 val = ( 1UL << UVH_IPI_INT_SEND_SHFT ) |
124 UVH_IPI_INT_APIC_ID_SHFT) | 125 ( lapicid << UVH_IPI_INT_APIC_ID_SHFT ) |
125 (vector << UVH_IPI_INT_VECTOR_SHFT); 126 ( vector << UVH_IPI_INT_VECTOR_SHFT );
127
126 uv_write_global_mmr64(pnode, UVH_IPI_INT, val); 128 uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
127} 129}
128 130
@@ -136,22 +138,24 @@ static void uv_send_IPI_mask(const struct cpumask *mask, int vector)
136 138
137static void uv_send_IPI_mask_allbutself(const struct cpumask *mask, int vector) 139static void uv_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
138{ 140{
139 unsigned int cpu;
140 unsigned int this_cpu = smp_processor_id(); 141 unsigned int this_cpu = smp_processor_id();
142 unsigned int cpu;
141 143
142 for_each_cpu(cpu, mask) 144 for_each_cpu(cpu, mask) {
143 if (cpu != this_cpu) 145 if (cpu != this_cpu)
144 uv_send_IPI_one(cpu, vector); 146 uv_send_IPI_one(cpu, vector);
147 }
145} 148}
146 149
147static void uv_send_IPI_allbutself(int vector) 150static void uv_send_IPI_allbutself(int vector)
148{ 151{
149 unsigned int cpu;
150 unsigned int this_cpu = smp_processor_id(); 152 unsigned int this_cpu = smp_processor_id();
153 unsigned int cpu;
151 154
152 for_each_online_cpu(cpu) 155 for_each_online_cpu(cpu) {
153 if (cpu != this_cpu) 156 if (cpu != this_cpu)
154 uv_send_IPI_one(cpu, vector); 157 uv_send_IPI_one(cpu, vector);
158 }
155} 159}
156 160
157static void uv_send_IPI_all(int vector) 161static void uv_send_IPI_all(int vector)
@@ -170,21 +174,21 @@ static void uv_init_apic_ldr(void)
170 174
171static unsigned int uv_cpu_mask_to_apicid(const struct cpumask *cpumask) 175static unsigned int uv_cpu_mask_to_apicid(const struct cpumask *cpumask)
172{ 176{
173 int cpu;
174
175 /* 177 /*
176 * We're using fixed IRQ delivery, can only return one phys APIC ID. 178 * We're using fixed IRQ delivery, can only return one phys APIC ID.
177 * May as well be the first. 179 * May as well be the first.
178 */ 180 */
179 cpu = cpumask_first(cpumask); 181 int cpu = cpumask_first(cpumask);
182
180 if ((unsigned)cpu < nr_cpu_ids) 183 if ((unsigned)cpu < nr_cpu_ids)
181 return per_cpu(x86_cpu_to_apicid, cpu); 184 return per_cpu(x86_cpu_to_apicid, cpu);
182 else 185 else
183 return BAD_APICID; 186 return BAD_APICID;
184} 187}
185 188
186static unsigned int uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask, 189static unsigned int
187 const struct cpumask *andmask) 190uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
191 const struct cpumask *andmask)
188{ 192{
189 int cpu; 193 int cpu;
190 194
@@ -192,15 +196,17 @@ static unsigned int uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
192 * We're using fixed IRQ delivery, can only return one phys APIC ID. 196 * We're using fixed IRQ delivery, can only return one phys APIC ID.
193 * May as well be the first. 197 * May as well be the first.
194 */ 198 */
195 for_each_cpu_and(cpu, cpumask, andmask) 199 for_each_cpu_and(cpu, cpumask, andmask) {
196 if (cpumask_test_cpu(cpu, cpu_online_mask)) 200 if (cpumask_test_cpu(cpu, cpu_online_mask))
197 break; 201 break;
202 }
198 if (cpu < nr_cpu_ids) 203 if (cpu < nr_cpu_ids)
199 return per_cpu(x86_cpu_to_apicid, cpu); 204 return per_cpu(x86_cpu_to_apicid, cpu);
205
200 return BAD_APICID; 206 return BAD_APICID;
201} 207}
202 208
203static unsigned int get_apic_id(unsigned long x) 209static unsigned int x2apic_get_apic_id(unsigned long x)
204{ 210{
205 unsigned int id; 211 unsigned int id;
206 212
@@ -222,10 +228,10 @@ static unsigned long set_apic_id(unsigned int id)
222static unsigned int uv_read_apic_id(void) 228static unsigned int uv_read_apic_id(void)
223{ 229{
224 230
225 return get_apic_id(apic_read(APIC_ID)); 231 return x2apic_get_apic_id(apic_read(APIC_ID));
226} 232}
227 233
228static unsigned int phys_pkg_id(int index_msb) 234static int uv_phys_pkg_id(int initial_apicid, int index_msb)
229{ 235{
230 return uv_read_apic_id() >> index_msb; 236 return uv_read_apic_id() >> index_msb;
231} 237}
@@ -236,25 +242,57 @@ static void uv_send_IPI_self(int vector)
236} 242}
237 243
238struct genapic apic_x2apic_uv_x = { 244struct genapic apic_x2apic_uv_x = {
239 .name = "UV large system", 245
240 .acpi_madt_oem_check = uv_acpi_madt_oem_check, 246 .name = "UV large system",
241 .int_delivery_mode = dest_Fixed, 247 .probe = NULL,
242 .int_dest_mode = (APIC_DEST_PHYSICAL != 0), 248 .acpi_madt_oem_check = uv_acpi_madt_oem_check,
243 .target_cpus = uv_target_cpus, 249 .apic_id_registered = uv_apic_id_registered,
244 .vector_allocation_domain = uv_vector_allocation_domain, 250
245 .apic_id_registered = uv_apic_id_registered, 251 .irq_delivery_mode = dest_Fixed,
246 .init_apic_ldr = uv_init_apic_ldr, 252 .irq_dest_mode = 1, /* logical */
247 .send_IPI_all = uv_send_IPI_all, 253
248 .send_IPI_allbutself = uv_send_IPI_allbutself, 254 .target_cpus = uv_target_cpus,
249 .send_IPI_mask = uv_send_IPI_mask, 255 .disable_esr = 0,
250 .send_IPI_mask_allbutself = uv_send_IPI_mask_allbutself, 256 .dest_logical = APIC_DEST_LOGICAL,
251 .send_IPI_self = uv_send_IPI_self, 257 .check_apicid_used = NULL,
252 .cpu_mask_to_apicid = uv_cpu_mask_to_apicid, 258 .check_apicid_present = NULL,
253 .cpu_mask_to_apicid_and = uv_cpu_mask_to_apicid_and, 259
254 .phys_pkg_id = phys_pkg_id, 260 .vector_allocation_domain = uv_vector_allocation_domain,
255 .get_apic_id = get_apic_id, 261 .init_apic_ldr = uv_init_apic_ldr,
256 .set_apic_id = set_apic_id, 262
257 .apic_id_mask = (0xFFFFFFFFu), 263 .ioapic_phys_id_map = NULL,
264 .setup_apic_routing = NULL,
265 .multi_timer_check = NULL,
266 .apicid_to_node = NULL,
267 .cpu_to_logical_apicid = NULL,
268 .cpu_present_to_apicid = default_cpu_present_to_apicid,
269 .apicid_to_cpu_present = NULL,
270 .setup_portio_remap = NULL,
271 .check_phys_apicid_present = default_check_phys_apicid_present,
272 .enable_apic_mode = NULL,
273 .phys_pkg_id = uv_phys_pkg_id,
274 .mps_oem_check = NULL,
275
276 .get_apic_id = x2apic_get_apic_id,
277 .set_apic_id = set_apic_id,
278 .apic_id_mask = 0xFFFFFFFFu,
279
280 .cpu_mask_to_apicid = uv_cpu_mask_to_apicid,
281 .cpu_mask_to_apicid_and = uv_cpu_mask_to_apicid_and,
282
283 .send_IPI_mask = uv_send_IPI_mask,
284 .send_IPI_mask_allbutself = uv_send_IPI_mask_allbutself,
285 .send_IPI_allbutself = uv_send_IPI_allbutself,
286 .send_IPI_all = uv_send_IPI_all,
287 .send_IPI_self = uv_send_IPI_self,
288
289 .wakeup_cpu = NULL,
290 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
291 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
292 .wait_for_init_deassert = NULL,
293 .smp_callin_clear_local_apic = NULL,
294 .store_NMI_vector = NULL,
295 .inquire_remote_apic = NULL,
258}; 296};
259 297
260static __cpuinit void set_x2apic_extra_bits(int pnode) 298static __cpuinit void set_x2apic_extra_bits(int pnode)
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index b9a4d8c4b935..f5b272247690 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -26,27 +26,6 @@
26#include <asm/bios_ebda.h> 26#include <asm/bios_ebda.h>
27#include <asm/trampoline.h> 27#include <asm/trampoline.h>
28 28
29/* boot cpu pda */
30static struct x8664_pda _boot_cpu_pda;
31
32#ifdef CONFIG_SMP
33/*
34 * We install an empty cpu_pda pointer table to indicate to early users
35 * (numa_set_node) that the cpu_pda pointer table for cpus other than
36 * the boot cpu is not yet setup.
37 */
38static struct x8664_pda *__cpu_pda[NR_CPUS] __initdata;
39#else
40static struct x8664_pda *__cpu_pda[NR_CPUS] __read_mostly;
41#endif
42
43void __init x86_64_init_pda(void)
44{
45 _cpu_pda = __cpu_pda;
46 cpu_pda(0) = &_boot_cpu_pda;
47 pda_init(0);
48}
49
50static void __init zap_identity_mappings(void) 29static void __init zap_identity_mappings(void)
51{ 30{
52 pgd_t *pgd = pgd_offset_k(0UL); 31 pgd_t *pgd = pgd_offset_k(0UL);
@@ -112,8 +91,6 @@ void __init x86_64_start_kernel(char * real_mode_data)
112 if (console_loglevel == 10) 91 if (console_loglevel == 10)
113 early_printk("Kernel alive\n"); 92 early_printk("Kernel alive\n");
114 93
115 x86_64_init_pda();
116
117 x86_64_start_reservations(real_mode_data); 94 x86_64_start_reservations(real_mode_data);
118} 95}
119 96
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index e835b4eea70b..722464c520cf 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -429,12 +429,14 @@ is386: movl $2,%ecx # set MP
429 ljmp $(__KERNEL_CS),$1f 429 ljmp $(__KERNEL_CS),$1f
4301: movl $(__KERNEL_DS),%eax # reload all the segment registers 4301: movl $(__KERNEL_DS),%eax # reload all the segment registers
431 movl %eax,%ss # after changing gdt. 431 movl %eax,%ss # after changing gdt.
432 movl %eax,%fs # gets reset once there's real percpu
433 432
434 movl $(__USER_DS),%eax # DS/ES contains default USER segment 433 movl $(__USER_DS),%eax # DS/ES contains default USER segment
435 movl %eax,%ds 434 movl %eax,%ds
436 movl %eax,%es 435 movl %eax,%es
437 436
437 movl $(__KERNEL_PERCPU), %eax
438 movl %eax,%fs # set this cpu's percpu
439
438 xorl %eax,%eax # Clear GS and LDT 440 xorl %eax,%eax # Clear GS and LDT
439 movl %eax,%gs 441 movl %eax,%gs
440 lldt %ax 442 lldt %ax
@@ -446,8 +448,6 @@ is386: movl $2,%ecx # set MP
446 movb $1, ready 448 movb $1, ready
447 cmpb $0,%cl # the first CPU calls start_kernel 449 cmpb $0,%cl # the first CPU calls start_kernel
448 je 1f 450 je 1f
449 movl $(__KERNEL_PERCPU), %eax
450 movl %eax,%fs # set this cpu's percpu
451 movl (stack_start), %esp 451 movl (stack_start), %esp
4521: 4521:
453#endif /* CONFIG_SMP */ 453#endif /* CONFIG_SMP */
@@ -548,12 +548,8 @@ early_fault:
548 pushl %eax 548 pushl %eax
549 pushl %edx /* trapno */ 549 pushl %edx /* trapno */
550 pushl $fault_msg 550 pushl $fault_msg
551#ifdef CONFIG_EARLY_PRINTK
552 call early_printk
553#else
554 call printk 551 call printk
555#endif 552#endif
556#endif
557 call dump_stack 553 call dump_stack
558hlt_loop: 554hlt_loop:
559 hlt 555 hlt
@@ -580,11 +576,10 @@ ignore_int:
580 pushl 32(%esp) 576 pushl 32(%esp)
581 pushl 40(%esp) 577 pushl 40(%esp)
582 pushl $int_msg 578 pushl $int_msg
583#ifdef CONFIG_EARLY_PRINTK
584 call early_printk
585#else
586 call printk 579 call printk
587#endif 580
581 call dump_stack
582
588 addl $(5*4),%esp 583 addl $(5*4),%esp
589 popl %ds 584 popl %ds
590 popl %es 585 popl %es
@@ -660,7 +655,7 @@ early_recursion_flag:
660 .long 0 655 .long 0
661 656
662int_msg: 657int_msg:
663 .asciz "Unknown interrupt or fault at EIP %p %p %p\n" 658 .asciz "Unknown interrupt or fault at: %p %p %p\n"
664 659
665fault_msg: 660fault_msg:
666/* fault info: */ 661/* fault info: */
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 0e275d495563..a0a2b5ca9b7d 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -19,6 +19,7 @@
19#include <asm/msr.h> 19#include <asm/msr.h>
20#include <asm/cache.h> 20#include <asm/cache.h>
21#include <asm/processor-flags.h> 21#include <asm/processor-flags.h>
22#include <asm/percpu.h>
22 23
23#ifdef CONFIG_PARAVIRT 24#ifdef CONFIG_PARAVIRT
24#include <asm/asm-offsets.h> 25#include <asm/asm-offsets.h>
@@ -204,6 +205,19 @@ ENTRY(secondary_startup_64)
204 pushq $0 205 pushq $0
205 popfq 206 popfq
206 207
208#ifdef CONFIG_SMP
209 /*
210 * Fix up static pointers that need __per_cpu_load added. The assembler
211 * is unable to do this directly. This is only needed for the boot cpu.
212 * These values are set up with the correct base addresses by C code for
213 * secondary cpus.
214 */
215 movq initial_gs(%rip), %rax
216 cmpl $0, per_cpu__cpu_number(%rax)
217 jne 1f
218 addq %rax, early_gdt_descr_base(%rip)
2191:
220#endif
207 /* 221 /*
208 * We must switch to a new descriptor in kernel space for the GDT 222 * We must switch to a new descriptor in kernel space for the GDT
209 * because soon the kernel won't have access anymore to the userspace 223 * because soon the kernel won't have access anymore to the userspace
@@ -226,12 +240,15 @@ ENTRY(secondary_startup_64)
226 movl %eax,%fs 240 movl %eax,%fs
227 movl %eax,%gs 241 movl %eax,%gs
228 242
229 /* 243 /* Set up %gs.
230 * Setup up a dummy PDA. this is just for some early bootup code 244 *
231 * that does in_interrupt() 245 * The base of %gs always points to the bottom of the irqstack
232 */ 246 * union. If the stack protector canary is enabled, it is
247 * located at %gs:40. Note that, on SMP, the boot cpu uses
248 * init data section till per cpu areas are set up.
249 */
233 movl $MSR_GS_BASE,%ecx 250 movl $MSR_GS_BASE,%ecx
234 movq $empty_zero_page,%rax 251 movq initial_gs(%rip),%rax
235 movq %rax,%rdx 252 movq %rax,%rdx
236 shrq $32,%rdx 253 shrq $32,%rdx
237 wrmsr 254 wrmsr
@@ -257,6 +274,12 @@ ENTRY(secondary_startup_64)
257 .align 8 274 .align 8
258 ENTRY(initial_code) 275 ENTRY(initial_code)
259 .quad x86_64_start_kernel 276 .quad x86_64_start_kernel
277 ENTRY(initial_gs)
278#ifdef CONFIG_SMP
279 .quad __per_cpu_load
280#else
281 .quad PER_CPU_VAR(irq_stack_union)
282#endif
260 __FINITDATA 283 __FINITDATA
261 284
262 ENTRY(stack_start) 285 ENTRY(stack_start)
@@ -401,7 +424,8 @@ NEXT_PAGE(level2_spare_pgt)
401 .globl early_gdt_descr 424 .globl early_gdt_descr
402early_gdt_descr: 425early_gdt_descr:
403 .word GDT_ENTRIES*8-1 426 .word GDT_ENTRIES*8-1
404 .quad per_cpu__gdt_page 427early_gdt_descr_base:
428 .quad per_cpu__gdt_page
405 429
406ENTRY(phys_base) 430ENTRY(phys_base)
407 /* This must match the first entry in level2_kernel_pgt */ 431 /* This must match the first entry in level2_kernel_pgt */
diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c
index bc7ac4da90d7..7248ca11bdcd 100644
--- a/arch/x86/kernel/io_apic.c
+++ b/arch/x86/kernel/io_apic.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Intel IO-APIC support for multi-Pentium hosts. 2 * Intel IO-APIC support for multi-Pentium hosts.
3 * 3 *
4 * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo 4 * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
5 * 5 *
6 * Many thanks to Stig Venaas for trying out countless experimental 6 * Many thanks to Stig Venaas for trying out countless experimental
7 * patches and reporting/debugging problems patiently! 7 * patches and reporting/debugging problems patiently!
@@ -46,6 +46,7 @@
46#include <asm/idle.h> 46#include <asm/idle.h>
47#include <asm/io.h> 47#include <asm/io.h>
48#include <asm/smp.h> 48#include <asm/smp.h>
49#include <asm/cpu.h>
49#include <asm/desc.h> 50#include <asm/desc.h>
50#include <asm/proto.h> 51#include <asm/proto.h>
51#include <asm/acpi.h> 52#include <asm/acpi.h>
@@ -61,9 +62,7 @@
61#include <asm/uv/uv_hub.h> 62#include <asm/uv/uv_hub.h>
62#include <asm/uv/uv_irq.h> 63#include <asm/uv/uv_irq.h>
63 64
64#include <mach_ipi.h> 65#include <asm/genapic.h>
65#include <mach_apic.h>
66#include <mach_apicdef.h>
67 66
68#define __apicdebuginit(type) static type __init 67#define __apicdebuginit(type) static type __init
69 68
@@ -82,11 +81,11 @@ static DEFINE_SPINLOCK(vector_lock);
82int nr_ioapic_registers[MAX_IO_APICS]; 81int nr_ioapic_registers[MAX_IO_APICS];
83 82
84/* I/O APIC entries */ 83/* I/O APIC entries */
85struct mp_config_ioapic mp_ioapics[MAX_IO_APICS]; 84struct mpc_ioapic mp_ioapics[MAX_IO_APICS];
86int nr_ioapics; 85int nr_ioapics;
87 86
88/* MP IRQ source entries */ 87/* MP IRQ source entries */
89struct mp_config_intsrc mp_irqs[MAX_IRQ_SOURCES]; 88struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES];
90 89
91/* # of MP IRQ source entries */ 90/* # of MP IRQ source entries */
92int mp_irq_entries; 91int mp_irq_entries;
@@ -99,10 +98,19 @@ DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
99 98
100int skip_ioapic_setup; 99int skip_ioapic_setup;
101 100
101void arch_disable_smp_support(void)
102{
103#ifdef CONFIG_PCI
104 noioapicquirk = 1;
105 noioapicreroute = -1;
106#endif
107 skip_ioapic_setup = 1;
108}
109
102static int __init parse_noapic(char *str) 110static int __init parse_noapic(char *str)
103{ 111{
104 /* disable IO-APIC */ 112 /* disable IO-APIC */
105 disable_ioapic_setup(); 113 arch_disable_smp_support();
106 return 0; 114 return 0;
107} 115}
108early_param("noapic", parse_noapic); 116early_param("noapic", parse_noapic);
@@ -356,7 +364,7 @@ set_extra_move_desc(struct irq_desc *desc, const struct cpumask *mask)
356 364
357 if (!cfg->move_in_progress) { 365 if (!cfg->move_in_progress) {
358 /* it means that domain is not changed */ 366 /* it means that domain is not changed */
359 if (!cpumask_intersects(&desc->affinity, mask)) 367 if (!cpumask_intersects(desc->affinity, mask))
360 cfg->move_desc_pending = 1; 368 cfg->move_desc_pending = 1;
361 } 369 }
362} 370}
@@ -386,7 +394,7 @@ struct io_apic {
386static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx) 394static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
387{ 395{
388 return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx) 396 return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx)
389 + (mp_ioapics[idx].mp_apicaddr & ~PAGE_MASK); 397 + (mp_ioapics[idx].apicaddr & ~PAGE_MASK);
390} 398}
391 399
392static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg) 400static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
@@ -478,7 +486,7 @@ __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
478 io_apic_write(apic, 0x10 + 2*pin, eu.w1); 486 io_apic_write(apic, 0x10 + 2*pin, eu.w1);
479} 487}
480 488
481static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) 489void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
482{ 490{
483 unsigned long flags; 491 unsigned long flags;
484 spin_lock_irqsave(&ioapic_lock, flags); 492 spin_lock_irqsave(&ioapic_lock, flags);
@@ -513,11 +521,11 @@ static void send_cleanup_vector(struct irq_cfg *cfg)
513 for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) 521 for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
514 cfg->move_cleanup_count++; 522 cfg->move_cleanup_count++;
515 for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) 523 for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
516 send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR); 524 apic->send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR);
517 } else { 525 } else {
518 cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask); 526 cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask);
519 cfg->move_cleanup_count = cpumask_weight(cleanup_mask); 527 cfg->move_cleanup_count = cpumask_weight(cleanup_mask);
520 send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); 528 apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
521 free_cpumask_var(cleanup_mask); 529 free_cpumask_var(cleanup_mask);
522 } 530 }
523 cfg->move_in_progress = 0; 531 cfg->move_in_progress = 0;
@@ -562,8 +570,9 @@ static int
562assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask); 570assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask);
563 571
564/* 572/*
565 * Either sets desc->affinity to a valid value, and returns cpu_mask_to_apicid 573 * Either sets desc->affinity to a valid value, and returns
566 * of that, or returns BAD_APICID and leaves desc->affinity untouched. 574 * ->cpu_mask_to_apicid of that, or returns BAD_APICID and
575 * leaves desc->affinity untouched.
567 */ 576 */
568static unsigned int 577static unsigned int
569set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask) 578set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask)
@@ -579,9 +588,10 @@ set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask)
579 if (assign_irq_vector(irq, cfg, mask)) 588 if (assign_irq_vector(irq, cfg, mask))
580 return BAD_APICID; 589 return BAD_APICID;
581 590
582 cpumask_and(&desc->affinity, cfg->domain, mask); 591 cpumask_and(desc->affinity, cfg->domain, mask);
583 set_extra_move_desc(desc, mask); 592 set_extra_move_desc(desc, mask);
584 return cpu_mask_to_apicid_and(&desc->affinity, cpu_online_mask); 593
594 return apic->cpu_mask_to_apicid_and(desc->affinity, cpu_online_mask);
585} 595}
586 596
587static void 597static void
@@ -796,23 +806,6 @@ static void clear_IO_APIC (void)
796 clear_IO_APIC_pin(apic, pin); 806 clear_IO_APIC_pin(apic, pin);
797} 807}
798 808
799#if !defined(CONFIG_SMP) && defined(CONFIG_X86_32)
800void send_IPI_self(int vector)
801{
802 unsigned int cfg;
803
804 /*
805 * Wait for idle.
806 */
807 apic_wait_icr_idle();
808 cfg = APIC_DM_FIXED | APIC_DEST_SELF | vector | APIC_DEST_LOGICAL;
809 /*
810 * Send the IPI. The write to APIC_ICR fires this off.
811 */
812 apic_write(APIC_ICR, cfg);
813}
814#endif /* !CONFIG_SMP && CONFIG_X86_32*/
815
816#ifdef CONFIG_X86_32 809#ifdef CONFIG_X86_32
817/* 810/*
818 * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to 811 * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
@@ -944,10 +937,10 @@ static int find_irq_entry(int apic, int pin, int type)
944 int i; 937 int i;
945 938
946 for (i = 0; i < mp_irq_entries; i++) 939 for (i = 0; i < mp_irq_entries; i++)
947 if (mp_irqs[i].mp_irqtype == type && 940 if (mp_irqs[i].irqtype == type &&
948 (mp_irqs[i].mp_dstapic == mp_ioapics[apic].mp_apicid || 941 (mp_irqs[i].dstapic == mp_ioapics[apic].apicid ||
949 mp_irqs[i].mp_dstapic == MP_APIC_ALL) && 942 mp_irqs[i].dstapic == MP_APIC_ALL) &&
950 mp_irqs[i].mp_dstirq == pin) 943 mp_irqs[i].dstirq == pin)
951 return i; 944 return i;
952 945
953 return -1; 946 return -1;
@@ -961,13 +954,13 @@ static int __init find_isa_irq_pin(int irq, int type)
961 int i; 954 int i;
962 955
963 for (i = 0; i < mp_irq_entries; i++) { 956 for (i = 0; i < mp_irq_entries; i++) {
964 int lbus = mp_irqs[i].mp_srcbus; 957 int lbus = mp_irqs[i].srcbus;
965 958
966 if (test_bit(lbus, mp_bus_not_pci) && 959 if (test_bit(lbus, mp_bus_not_pci) &&
967 (mp_irqs[i].mp_irqtype == type) && 960 (mp_irqs[i].irqtype == type) &&
968 (mp_irqs[i].mp_srcbusirq == irq)) 961 (mp_irqs[i].srcbusirq == irq))
969 962
970 return mp_irqs[i].mp_dstirq; 963 return mp_irqs[i].dstirq;
971 } 964 }
972 return -1; 965 return -1;
973} 966}
@@ -977,17 +970,17 @@ static int __init find_isa_irq_apic(int irq, int type)
977 int i; 970 int i;
978 971
979 for (i = 0; i < mp_irq_entries; i++) { 972 for (i = 0; i < mp_irq_entries; i++) {
980 int lbus = mp_irqs[i].mp_srcbus; 973 int lbus = mp_irqs[i].srcbus;
981 974
982 if (test_bit(lbus, mp_bus_not_pci) && 975 if (test_bit(lbus, mp_bus_not_pci) &&
983 (mp_irqs[i].mp_irqtype == type) && 976 (mp_irqs[i].irqtype == type) &&
984 (mp_irqs[i].mp_srcbusirq == irq)) 977 (mp_irqs[i].srcbusirq == irq))
985 break; 978 break;
986 } 979 }
987 if (i < mp_irq_entries) { 980 if (i < mp_irq_entries) {
988 int apic; 981 int apic;
989 for(apic = 0; apic < nr_ioapics; apic++) { 982 for(apic = 0; apic < nr_ioapics; apic++) {
990 if (mp_ioapics[apic].mp_apicid == mp_irqs[i].mp_dstapic) 983 if (mp_ioapics[apic].apicid == mp_irqs[i].dstapic)
991 return apic; 984 return apic;
992 } 985 }
993 } 986 }
@@ -1012,23 +1005,23 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
1012 return -1; 1005 return -1;
1013 } 1006 }
1014 for (i = 0; i < mp_irq_entries; i++) { 1007 for (i = 0; i < mp_irq_entries; i++) {
1015 int lbus = mp_irqs[i].mp_srcbus; 1008 int lbus = mp_irqs[i].srcbus;
1016 1009
1017 for (apic = 0; apic < nr_ioapics; apic++) 1010 for (apic = 0; apic < nr_ioapics; apic++)
1018 if (mp_ioapics[apic].mp_apicid == mp_irqs[i].mp_dstapic || 1011 if (mp_ioapics[apic].apicid == mp_irqs[i].dstapic ||
1019 mp_irqs[i].mp_dstapic == MP_APIC_ALL) 1012 mp_irqs[i].dstapic == MP_APIC_ALL)
1020 break; 1013 break;
1021 1014
1022 if (!test_bit(lbus, mp_bus_not_pci) && 1015 if (!test_bit(lbus, mp_bus_not_pci) &&
1023 !mp_irqs[i].mp_irqtype && 1016 !mp_irqs[i].irqtype &&
1024 (bus == lbus) && 1017 (bus == lbus) &&
1025 (slot == ((mp_irqs[i].mp_srcbusirq >> 2) & 0x1f))) { 1018 (slot == ((mp_irqs[i].srcbusirq >> 2) & 0x1f))) {
1026 int irq = pin_2_irq(i,apic,mp_irqs[i].mp_dstirq); 1019 int irq = pin_2_irq(i, apic, mp_irqs[i].dstirq);
1027 1020
1028 if (!(apic || IO_APIC_IRQ(irq))) 1021 if (!(apic || IO_APIC_IRQ(irq)))
1029 continue; 1022 continue;
1030 1023
1031 if (pin == (mp_irqs[i].mp_srcbusirq & 3)) 1024 if (pin == (mp_irqs[i].srcbusirq & 3))
1032 return irq; 1025 return irq;
1033 /* 1026 /*
1034 * Use the first all-but-pin matching entry as a 1027 * Use the first all-but-pin matching entry as a
@@ -1071,7 +1064,7 @@ static int EISA_ELCR(unsigned int irq)
1071 * EISA conforming in the MP table, that means its trigger type must 1064 * EISA conforming in the MP table, that means its trigger type must
1072 * be read in from the ELCR */ 1065 * be read in from the ELCR */
1073 1066
1074#define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].mp_srcbusirq)) 1067#define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].srcbusirq))
1075#define default_EISA_polarity(idx) default_ISA_polarity(idx) 1068#define default_EISA_polarity(idx) default_ISA_polarity(idx)
1076 1069
1077/* PCI interrupts are always polarity one level triggered, 1070/* PCI interrupts are always polarity one level triggered,
@@ -1088,13 +1081,13 @@ static int EISA_ELCR(unsigned int irq)
1088 1081
1089static int MPBIOS_polarity(int idx) 1082static int MPBIOS_polarity(int idx)
1090{ 1083{
1091 int bus = mp_irqs[idx].mp_srcbus; 1084 int bus = mp_irqs[idx].srcbus;
1092 int polarity; 1085 int polarity;
1093 1086
1094 /* 1087 /*
1095 * Determine IRQ line polarity (high active or low active): 1088 * Determine IRQ line polarity (high active or low active):
1096 */ 1089 */
1097 switch (mp_irqs[idx].mp_irqflag & 3) 1090 switch (mp_irqs[idx].irqflag & 3)
1098 { 1091 {
1099 case 0: /* conforms, ie. bus-type dependent polarity */ 1092 case 0: /* conforms, ie. bus-type dependent polarity */
1100 if (test_bit(bus, mp_bus_not_pci)) 1093 if (test_bit(bus, mp_bus_not_pci))
@@ -1130,13 +1123,13 @@ static int MPBIOS_polarity(int idx)
1130 1123
1131static int MPBIOS_trigger(int idx) 1124static int MPBIOS_trigger(int idx)
1132{ 1125{
1133 int bus = mp_irqs[idx].mp_srcbus; 1126 int bus = mp_irqs[idx].srcbus;
1134 int trigger; 1127 int trigger;
1135 1128
1136 /* 1129 /*
1137 * Determine IRQ trigger mode (edge or level sensitive): 1130 * Determine IRQ trigger mode (edge or level sensitive):
1138 */ 1131 */
1139 switch ((mp_irqs[idx].mp_irqflag>>2) & 3) 1132 switch ((mp_irqs[idx].irqflag>>2) & 3)
1140 { 1133 {
1141 case 0: /* conforms, ie. bus-type dependent */ 1134 case 0: /* conforms, ie. bus-type dependent */
1142 if (test_bit(bus, mp_bus_not_pci)) 1135 if (test_bit(bus, mp_bus_not_pci))
@@ -1214,16 +1207,16 @@ int (*ioapic_renumber_irq)(int ioapic, int irq);
1214static int pin_2_irq(int idx, int apic, int pin) 1207static int pin_2_irq(int idx, int apic, int pin)
1215{ 1208{
1216 int irq, i; 1209 int irq, i;
1217 int bus = mp_irqs[idx].mp_srcbus; 1210 int bus = mp_irqs[idx].srcbus;
1218 1211
1219 /* 1212 /*
1220 * Debugging check, we are in big trouble if this message pops up! 1213 * Debugging check, we are in big trouble if this message pops up!
1221 */ 1214 */
1222 if (mp_irqs[idx].mp_dstirq != pin) 1215 if (mp_irqs[idx].dstirq != pin)
1223 printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n"); 1216 printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
1224 1217
1225 if (test_bit(bus, mp_bus_not_pci)) { 1218 if (test_bit(bus, mp_bus_not_pci)) {
1226 irq = mp_irqs[idx].mp_srcbusirq; 1219 irq = mp_irqs[idx].srcbusirq;
1227 } else { 1220 } else {
1228 /* 1221 /*
1229 * PCI IRQs are mapped in order 1222 * PCI IRQs are mapped in order
@@ -1315,7 +1308,7 @@ __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1315 int new_cpu; 1308 int new_cpu;
1316 int vector, offset; 1309 int vector, offset;
1317 1310
1318 vector_allocation_domain(cpu, tmp_mask); 1311 apic->vector_allocation_domain(cpu, tmp_mask);
1319 1312
1320 vector = current_vector; 1313 vector = current_vector;
1321 offset = current_offset; 1314 offset = current_offset;
@@ -1485,10 +1478,10 @@ static void ioapic_register_intr(int irq, struct irq_desc *desc, unsigned long t
1485 handle_edge_irq, "edge"); 1478 handle_edge_irq, "edge");
1486} 1479}
1487 1480
1488static int setup_ioapic_entry(int apic, int irq, 1481int setup_ioapic_entry(int apic_id, int irq,
1489 struct IO_APIC_route_entry *entry, 1482 struct IO_APIC_route_entry *entry,
1490 unsigned int destination, int trigger, 1483 unsigned int destination, int trigger,
1491 int polarity, int vector) 1484 int polarity, int vector)
1492{ 1485{
1493 /* 1486 /*
1494 * add it to the IO-APIC irq-routing table: 1487 * add it to the IO-APIC irq-routing table:
@@ -1497,25 +1490,25 @@ static int setup_ioapic_entry(int apic, int irq,
1497 1490
1498#ifdef CONFIG_INTR_REMAP 1491#ifdef CONFIG_INTR_REMAP
1499 if (intr_remapping_enabled) { 1492 if (intr_remapping_enabled) {
1500 struct intel_iommu *iommu = map_ioapic_to_ir(apic); 1493 struct intel_iommu *iommu = map_ioapic_to_ir(apic_id);
1501 struct irte irte; 1494 struct irte irte;
1502 struct IR_IO_APIC_route_entry *ir_entry = 1495 struct IR_IO_APIC_route_entry *ir_entry =
1503 (struct IR_IO_APIC_route_entry *) entry; 1496 (struct IR_IO_APIC_route_entry *) entry;
1504 int index; 1497 int index;
1505 1498
1506 if (!iommu) 1499 if (!iommu)
1507 panic("No mapping iommu for ioapic %d\n", apic); 1500 panic("No mapping iommu for ioapic %d\n", apic_id);
1508 1501
1509 index = alloc_irte(iommu, irq, 1); 1502 index = alloc_irte(iommu, irq, 1);
1510 if (index < 0) 1503 if (index < 0)
1511 panic("Failed to allocate IRTE for ioapic %d\n", apic); 1504 panic("Failed to allocate IRTE for ioapic %d\n", apic_id);
1512 1505
1513 memset(&irte, 0, sizeof(irte)); 1506 memset(&irte, 0, sizeof(irte));
1514 1507
1515 irte.present = 1; 1508 irte.present = 1;
1516 irte.dst_mode = INT_DEST_MODE; 1509 irte.dst_mode = apic->irq_dest_mode;
1517 irte.trigger_mode = trigger; 1510 irte.trigger_mode = trigger;
1518 irte.dlvry_mode = INT_DELIVERY_MODE; 1511 irte.dlvry_mode = apic->irq_delivery_mode;
1519 irte.vector = vector; 1512 irte.vector = vector;
1520 irte.dest_id = IRTE_DEST(destination); 1513 irte.dest_id = IRTE_DEST(destination);
1521 1514
@@ -1528,8 +1521,8 @@ static int setup_ioapic_entry(int apic, int irq,
1528 } else 1521 } else
1529#endif 1522#endif
1530 { 1523 {
1531 entry->delivery_mode = INT_DELIVERY_MODE; 1524 entry->delivery_mode = apic->irq_delivery_mode;
1532 entry->dest_mode = INT_DEST_MODE; 1525 entry->dest_mode = apic->irq_dest_mode;
1533 entry->dest = destination; 1526 entry->dest = destination;
1534 } 1527 }
1535 1528
@@ -1546,7 +1539,7 @@ static int setup_ioapic_entry(int apic, int irq,
1546 return 0; 1539 return 0;
1547} 1540}
1548 1541
1549static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, struct irq_desc *desc, 1542static void setup_IO_APIC_irq(int apic_id, int pin, unsigned int irq, struct irq_desc *desc,
1550 int trigger, int polarity) 1543 int trigger, int polarity)
1551{ 1544{
1552 struct irq_cfg *cfg; 1545 struct irq_cfg *cfg;
@@ -1558,22 +1551,22 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, struct irq_de
1558 1551
1559 cfg = desc->chip_data; 1552 cfg = desc->chip_data;
1560 1553
1561 if (assign_irq_vector(irq, cfg, TARGET_CPUS)) 1554 if (assign_irq_vector(irq, cfg, apic->target_cpus()))
1562 return; 1555 return;
1563 1556
1564 dest = cpu_mask_to_apicid_and(cfg->domain, TARGET_CPUS); 1557 dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus());
1565 1558
1566 apic_printk(APIC_VERBOSE,KERN_DEBUG 1559 apic_printk(APIC_VERBOSE,KERN_DEBUG
1567 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> " 1560 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
1568 "IRQ %d Mode:%i Active:%i)\n", 1561 "IRQ %d Mode:%i Active:%i)\n",
1569 apic, mp_ioapics[apic].mp_apicid, pin, cfg->vector, 1562 apic_id, mp_ioapics[apic_id].apicid, pin, cfg->vector,
1570 irq, trigger, polarity); 1563 irq, trigger, polarity);
1571 1564
1572 1565
1573 if (setup_ioapic_entry(mp_ioapics[apic].mp_apicid, irq, &entry, 1566 if (setup_ioapic_entry(mp_ioapics[apic_id].apicid, irq, &entry,
1574 dest, trigger, polarity, cfg->vector)) { 1567 dest, trigger, polarity, cfg->vector)) {
1575 printk("Failed to setup ioapic entry for ioapic %d, pin %d\n", 1568 printk("Failed to setup ioapic entry for ioapic %d, pin %d\n",
1576 mp_ioapics[apic].mp_apicid, pin); 1569 mp_ioapics[apic_id].apicid, pin);
1577 __clear_irq_vector(irq, cfg); 1570 __clear_irq_vector(irq, cfg);
1578 return; 1571 return;
1579 } 1572 }
@@ -1582,12 +1575,12 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, struct irq_de
1582 if (irq < NR_IRQS_LEGACY) 1575 if (irq < NR_IRQS_LEGACY)
1583 disable_8259A_irq(irq); 1576 disable_8259A_irq(irq);
1584 1577
1585 ioapic_write_entry(apic, pin, entry); 1578 ioapic_write_entry(apic_id, pin, entry);
1586} 1579}
1587 1580
1588static void __init setup_IO_APIC_irqs(void) 1581static void __init setup_IO_APIC_irqs(void)
1589{ 1582{
1590 int apic, pin, idx, irq; 1583 int apic_id, pin, idx, irq;
1591 int notcon = 0; 1584 int notcon = 0;
1592 struct irq_desc *desc; 1585 struct irq_desc *desc;
1593 struct irq_cfg *cfg; 1586 struct irq_cfg *cfg;
@@ -1595,21 +1588,19 @@ static void __init setup_IO_APIC_irqs(void)
1595 1588
1596 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); 1589 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
1597 1590
1598 for (apic = 0; apic < nr_ioapics; apic++) { 1591 for (apic_id = 0; apic_id < nr_ioapics; apic_id++) {
1599 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) { 1592 for (pin = 0; pin < nr_ioapic_registers[apic_id]; pin++) {
1600 1593
1601 idx = find_irq_entry(apic, pin, mp_INT); 1594 idx = find_irq_entry(apic_id, pin, mp_INT);
1602 if (idx == -1) { 1595 if (idx == -1) {
1603 if (!notcon) { 1596 if (!notcon) {
1604 notcon = 1; 1597 notcon = 1;
1605 apic_printk(APIC_VERBOSE, 1598 apic_printk(APIC_VERBOSE,
1606 KERN_DEBUG " %d-%d", 1599 KERN_DEBUG " %d-%d",
1607 mp_ioapics[apic].mp_apicid, 1600 mp_ioapics[apic_id].apicid, pin);
1608 pin);
1609 } else 1601 } else
1610 apic_printk(APIC_VERBOSE, " %d-%d", 1602 apic_printk(APIC_VERBOSE, " %d-%d",
1611 mp_ioapics[apic].mp_apicid, 1603 mp_ioapics[apic_id].apicid, pin);
1612 pin);
1613 continue; 1604 continue;
1614 } 1605 }
1615 if (notcon) { 1606 if (notcon) {
@@ -1618,20 +1609,25 @@ static void __init setup_IO_APIC_irqs(void)
1618 notcon = 0; 1609 notcon = 0;
1619 } 1610 }
1620 1611
1621 irq = pin_2_irq(idx, apic, pin); 1612 irq = pin_2_irq(idx, apic_id, pin);
1622#ifdef CONFIG_X86_32 1613
1623 if (multi_timer_check(apic, irq)) 1614 /*
1615 * Skip the timer IRQ if there's a quirk handler
1616 * installed and if it returns 1:
1617 */
1618 if (apic->multi_timer_check &&
1619 apic->multi_timer_check(apic_id, irq))
1624 continue; 1620 continue;
1625#endif 1621
1626 desc = irq_to_desc_alloc_cpu(irq, cpu); 1622 desc = irq_to_desc_alloc_cpu(irq, cpu);
1627 if (!desc) { 1623 if (!desc) {
1628 printk(KERN_INFO "can not get irq_desc for %d\n", irq); 1624 printk(KERN_INFO "can not get irq_desc for %d\n", irq);
1629 continue; 1625 continue;
1630 } 1626 }
1631 cfg = desc->chip_data; 1627 cfg = desc->chip_data;
1632 add_pin_to_irq_cpu(cfg, cpu, apic, pin); 1628 add_pin_to_irq_cpu(cfg, cpu, apic_id, pin);
1633 1629
1634 setup_IO_APIC_irq(apic, pin, irq, desc, 1630 setup_IO_APIC_irq(apic_id, pin, irq, desc,
1635 irq_trigger(idx), irq_polarity(idx)); 1631 irq_trigger(idx), irq_polarity(idx));
1636 } 1632 }
1637 } 1633 }
@@ -1644,7 +1640,7 @@ static void __init setup_IO_APIC_irqs(void)
1644/* 1640/*
1645 * Set up the timer pin, possibly with the 8259A-master behind. 1641 * Set up the timer pin, possibly with the 8259A-master behind.
1646 */ 1642 */
1647static void __init setup_timer_IRQ0_pin(unsigned int apic, unsigned int pin, 1643static void __init setup_timer_IRQ0_pin(unsigned int apic_id, unsigned int pin,
1648 int vector) 1644 int vector)
1649{ 1645{
1650 struct IO_APIC_route_entry entry; 1646 struct IO_APIC_route_entry entry;
@@ -1660,10 +1656,10 @@ static void __init setup_timer_IRQ0_pin(unsigned int apic, unsigned int pin,
1660 * We use logical delivery to get the timer IRQ 1656 * We use logical delivery to get the timer IRQ
1661 * to the first CPU. 1657 * to the first CPU.
1662 */ 1658 */
1663 entry.dest_mode = INT_DEST_MODE; 1659 entry.dest_mode = apic->irq_dest_mode;
1664 entry.mask = 1; /* mask IRQ now */ 1660 entry.mask = 0; /* don't mask IRQ for edge */
1665 entry.dest = cpu_mask_to_apicid(TARGET_CPUS); 1661 entry.dest = apic->cpu_mask_to_apicid(apic->target_cpus());
1666 entry.delivery_mode = INT_DELIVERY_MODE; 1662 entry.delivery_mode = apic->irq_delivery_mode;
1667 entry.polarity = 0; 1663 entry.polarity = 0;
1668 entry.trigger = 0; 1664 entry.trigger = 0;
1669 entry.vector = vector; 1665 entry.vector = vector;
@@ -1677,7 +1673,7 @@ static void __init setup_timer_IRQ0_pin(unsigned int apic, unsigned int pin,
1677 /* 1673 /*
1678 * Add it to the IO-APIC irq-routing table: 1674 * Add it to the IO-APIC irq-routing table:
1679 */ 1675 */
1680 ioapic_write_entry(apic, pin, entry); 1676 ioapic_write_entry(apic_id, pin, entry);
1681} 1677}
1682 1678
1683 1679
@@ -1699,7 +1695,7 @@ __apicdebuginit(void) print_IO_APIC(void)
1699 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries); 1695 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
1700 for (i = 0; i < nr_ioapics; i++) 1696 for (i = 0; i < nr_ioapics; i++)
1701 printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n", 1697 printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
1702 mp_ioapics[i].mp_apicid, nr_ioapic_registers[i]); 1698 mp_ioapics[i].apicid, nr_ioapic_registers[i]);
1703 1699
1704 /* 1700 /*
1705 * We are a bit conservative about what we expect. We have to 1701 * We are a bit conservative about what we expect. We have to
@@ -1719,7 +1715,7 @@ __apicdebuginit(void) print_IO_APIC(void)
1719 spin_unlock_irqrestore(&ioapic_lock, flags); 1715 spin_unlock_irqrestore(&ioapic_lock, flags);
1720 1716
1721 printk("\n"); 1717 printk("\n");
1722 printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mp_apicid); 1718 printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].apicid);
1723 printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw); 1719 printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
1724 printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID); 1720 printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
1725 printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type); 1721 printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type);
@@ -2090,7 +2086,7 @@ static void __init setup_ioapic_ids_from_mpc(void)
2090{ 2086{
2091 union IO_APIC_reg_00 reg_00; 2087 union IO_APIC_reg_00 reg_00;
2092 physid_mask_t phys_id_present_map; 2088 physid_mask_t phys_id_present_map;
2093 int apic; 2089 int apic_id;
2094 int i; 2090 int i;
2095 unsigned char old_id; 2091 unsigned char old_id;
2096 unsigned long flags; 2092 unsigned long flags;
@@ -2109,26 +2105,26 @@ static void __init setup_ioapic_ids_from_mpc(void)
2109 * This is broken; anything with a real cpu count has to 2105 * This is broken; anything with a real cpu count has to
2110 * circumvent this idiocy regardless. 2106 * circumvent this idiocy regardless.
2111 */ 2107 */
2112 phys_id_present_map = ioapic_phys_id_map(phys_cpu_present_map); 2108 phys_id_present_map = apic->ioapic_phys_id_map(phys_cpu_present_map);
2113 2109
2114 /* 2110 /*
2115 * Set the IOAPIC ID to the value stored in the MPC table. 2111 * Set the IOAPIC ID to the value stored in the MPC table.
2116 */ 2112 */
2117 for (apic = 0; apic < nr_ioapics; apic++) { 2113 for (apic_id = 0; apic_id < nr_ioapics; apic_id++) {
2118 2114
2119 /* Read the register 0 value */ 2115 /* Read the register 0 value */
2120 spin_lock_irqsave(&ioapic_lock, flags); 2116 spin_lock_irqsave(&ioapic_lock, flags);
2121 reg_00.raw = io_apic_read(apic, 0); 2117 reg_00.raw = io_apic_read(apic_id, 0);
2122 spin_unlock_irqrestore(&ioapic_lock, flags); 2118 spin_unlock_irqrestore(&ioapic_lock, flags);
2123 2119
2124 old_id = mp_ioapics[apic].mp_apicid; 2120 old_id = mp_ioapics[apic_id].apicid;
2125 2121
2126 if (mp_ioapics[apic].mp_apicid >= get_physical_broadcast()) { 2122 if (mp_ioapics[apic_id].apicid >= get_physical_broadcast()) {
2127 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n", 2123 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
2128 apic, mp_ioapics[apic].mp_apicid); 2124 apic_id, mp_ioapics[apic_id].apicid);
2129 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n", 2125 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
2130 reg_00.bits.ID); 2126 reg_00.bits.ID);
2131 mp_ioapics[apic].mp_apicid = reg_00.bits.ID; 2127 mp_ioapics[apic_id].apicid = reg_00.bits.ID;
2132 } 2128 }
2133 2129
2134 /* 2130 /*
@@ -2136,10 +2132,10 @@ static void __init setup_ioapic_ids_from_mpc(void)
2136 * system must have a unique ID or we get lots of nice 2132 * system must have a unique ID or we get lots of nice
2137 * 'stuck on smp_invalidate_needed IPI wait' messages. 2133 * 'stuck on smp_invalidate_needed IPI wait' messages.
2138 */ 2134 */
2139 if (check_apicid_used(phys_id_present_map, 2135 if (apic->check_apicid_used(phys_id_present_map,
2140 mp_ioapics[apic].mp_apicid)) { 2136 mp_ioapics[apic_id].apicid)) {
2141 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n", 2137 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
2142 apic, mp_ioapics[apic].mp_apicid); 2138 apic_id, mp_ioapics[apic_id].apicid);
2143 for (i = 0; i < get_physical_broadcast(); i++) 2139 for (i = 0; i < get_physical_broadcast(); i++)
2144 if (!physid_isset(i, phys_id_present_map)) 2140 if (!physid_isset(i, phys_id_present_map))
2145 break; 2141 break;
@@ -2148,13 +2144,13 @@ static void __init setup_ioapic_ids_from_mpc(void)
2148 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n", 2144 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
2149 i); 2145 i);
2150 physid_set(i, phys_id_present_map); 2146 physid_set(i, phys_id_present_map);
2151 mp_ioapics[apic].mp_apicid = i; 2147 mp_ioapics[apic_id].apicid = i;
2152 } else { 2148 } else {
2153 physid_mask_t tmp; 2149 physid_mask_t tmp;
2154 tmp = apicid_to_cpu_present(mp_ioapics[apic].mp_apicid); 2150 tmp = apic->apicid_to_cpu_present(mp_ioapics[apic_id].apicid);
2155 apic_printk(APIC_VERBOSE, "Setting %d in the " 2151 apic_printk(APIC_VERBOSE, "Setting %d in the "
2156 "phys_id_present_map\n", 2152 "phys_id_present_map\n",
2157 mp_ioapics[apic].mp_apicid); 2153 mp_ioapics[apic_id].apicid);
2158 physids_or(phys_id_present_map, phys_id_present_map, tmp); 2154 physids_or(phys_id_present_map, phys_id_present_map, tmp);
2159 } 2155 }
2160 2156
@@ -2163,11 +2159,11 @@ static void __init setup_ioapic_ids_from_mpc(void)
2163 * We need to adjust the IRQ routing table 2159 * We need to adjust the IRQ routing table
2164 * if the ID changed. 2160 * if the ID changed.
2165 */ 2161 */
2166 if (old_id != mp_ioapics[apic].mp_apicid) 2162 if (old_id != mp_ioapics[apic_id].apicid)
2167 for (i = 0; i < mp_irq_entries; i++) 2163 for (i = 0; i < mp_irq_entries; i++)
2168 if (mp_irqs[i].mp_dstapic == old_id) 2164 if (mp_irqs[i].dstapic == old_id)
2169 mp_irqs[i].mp_dstapic 2165 mp_irqs[i].dstapic
2170 = mp_ioapics[apic].mp_apicid; 2166 = mp_ioapics[apic_id].apicid;
2171 2167
2172 /* 2168 /*
2173 * Read the right value from the MPC table and 2169 * Read the right value from the MPC table and
@@ -2175,20 +2171,20 @@ static void __init setup_ioapic_ids_from_mpc(void)
2175 */ 2171 */
2176 apic_printk(APIC_VERBOSE, KERN_INFO 2172 apic_printk(APIC_VERBOSE, KERN_INFO
2177 "...changing IO-APIC physical APIC ID to %d ...", 2173 "...changing IO-APIC physical APIC ID to %d ...",
2178 mp_ioapics[apic].mp_apicid); 2174 mp_ioapics[apic_id].apicid);
2179 2175
2180 reg_00.bits.ID = mp_ioapics[apic].mp_apicid; 2176 reg_00.bits.ID = mp_ioapics[apic_id].apicid;
2181 spin_lock_irqsave(&ioapic_lock, flags); 2177 spin_lock_irqsave(&ioapic_lock, flags);
2182 io_apic_write(apic, 0, reg_00.raw); 2178 io_apic_write(apic_id, 0, reg_00.raw);
2183 spin_unlock_irqrestore(&ioapic_lock, flags); 2179 spin_unlock_irqrestore(&ioapic_lock, flags);
2184 2180
2185 /* 2181 /*
2186 * Sanity check 2182 * Sanity check
2187 */ 2183 */
2188 spin_lock_irqsave(&ioapic_lock, flags); 2184 spin_lock_irqsave(&ioapic_lock, flags);
2189 reg_00.raw = io_apic_read(apic, 0); 2185 reg_00.raw = io_apic_read(apic_id, 0);
2190 spin_unlock_irqrestore(&ioapic_lock, flags); 2186 spin_unlock_irqrestore(&ioapic_lock, flags);
2191 if (reg_00.bits.ID != mp_ioapics[apic].mp_apicid) 2187 if (reg_00.bits.ID != mp_ioapics[apic_id].apicid)
2192 printk("could not set ID!\n"); 2188 printk("could not set ID!\n");
2193 else 2189 else
2194 apic_printk(APIC_VERBOSE, " ok.\n"); 2190 apic_printk(APIC_VERBOSE, " ok.\n");
@@ -2291,7 +2287,7 @@ static int ioapic_retrigger_irq(unsigned int irq)
2291 unsigned long flags; 2287 unsigned long flags;
2292 2288
2293 spin_lock_irqsave(&vector_lock, flags); 2289 spin_lock_irqsave(&vector_lock, flags);
2294 send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector); 2290 apic->send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector);
2295 spin_unlock_irqrestore(&vector_lock, flags); 2291 spin_unlock_irqrestore(&vector_lock, flags);
2296 2292
2297 return 1; 2293 return 1;
@@ -2299,7 +2295,7 @@ static int ioapic_retrigger_irq(unsigned int irq)
2299#else 2295#else
2300static int ioapic_retrigger_irq(unsigned int irq) 2296static int ioapic_retrigger_irq(unsigned int irq)
2301{ 2297{
2302 send_IPI_self(irq_cfg(irq)->vector); 2298 apic->send_IPI_self(irq_cfg(irq)->vector);
2303 2299
2304 return 1; 2300 return 1;
2305} 2301}
@@ -2363,7 +2359,7 @@ migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
2363 2359
2364 set_extra_move_desc(desc, mask); 2360 set_extra_move_desc(desc, mask);
2365 2361
2366 dest = cpu_mask_to_apicid_and(cfg->domain, mask); 2362 dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask);
2367 2363
2368 modify_ioapic_rte = desc->status & IRQ_LEVEL; 2364 modify_ioapic_rte = desc->status & IRQ_LEVEL;
2369 if (modify_ioapic_rte) { 2365 if (modify_ioapic_rte) {
@@ -2383,7 +2379,7 @@ migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
2383 if (cfg->move_in_progress) 2379 if (cfg->move_in_progress)
2384 send_cleanup_vector(cfg); 2380 send_cleanup_vector(cfg);
2385 2381
2386 cpumask_copy(&desc->affinity, mask); 2382 cpumask_copy(desc->affinity, mask);
2387} 2383}
2388 2384
2389static int migrate_irq_remapped_level_desc(struct irq_desc *desc) 2385static int migrate_irq_remapped_level_desc(struct irq_desc *desc)
@@ -2405,11 +2401,11 @@ static int migrate_irq_remapped_level_desc(struct irq_desc *desc)
2405 } 2401 }
2406 2402
2407 /* everthing is clear. we have right of way */ 2403 /* everthing is clear. we have right of way */
2408 migrate_ioapic_irq_desc(desc, &desc->pending_mask); 2404 migrate_ioapic_irq_desc(desc, desc->pending_mask);
2409 2405
2410 ret = 0; 2406 ret = 0;
2411 desc->status &= ~IRQ_MOVE_PENDING; 2407 desc->status &= ~IRQ_MOVE_PENDING;
2412 cpumask_clear(&desc->pending_mask); 2408 cpumask_clear(desc->pending_mask);
2413 2409
2414unmask: 2410unmask:
2415 unmask_IO_APIC_irq_desc(desc); 2411 unmask_IO_APIC_irq_desc(desc);
@@ -2434,7 +2430,7 @@ static void ir_irq_migration(struct work_struct *work)
2434 continue; 2430 continue;
2435 } 2431 }
2436 2432
2437 desc->chip->set_affinity(irq, &desc->pending_mask); 2433 desc->chip->set_affinity(irq, desc->pending_mask);
2438 spin_unlock_irqrestore(&desc->lock, flags); 2434 spin_unlock_irqrestore(&desc->lock, flags);
2439 } 2435 }
2440 } 2436 }
@@ -2448,7 +2444,7 @@ static void set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc,
2448{ 2444{
2449 if (desc->status & IRQ_LEVEL) { 2445 if (desc->status & IRQ_LEVEL) {
2450 desc->status |= IRQ_MOVE_PENDING; 2446 desc->status |= IRQ_MOVE_PENDING;
2451 cpumask_copy(&desc->pending_mask, mask); 2447 cpumask_copy(desc->pending_mask, mask);
2452 migrate_irq_remapped_level_desc(desc); 2448 migrate_irq_remapped_level_desc(desc);
2453 return; 2449 return;
2454 } 2450 }
@@ -2516,7 +2512,7 @@ static void irq_complete_move(struct irq_desc **descp)
2516 2512
2517 /* domain has not changed, but affinity did */ 2513 /* domain has not changed, but affinity did */
2518 me = smp_processor_id(); 2514 me = smp_processor_id();
2519 if (cpu_isset(me, desc->affinity)) { 2515 if (cpumask_test_cpu(me, desc->affinity)) {
2520 *descp = desc = move_irq_desc(desc, me); 2516 *descp = desc = move_irq_desc(desc, me);
2521 /* get the new one */ 2517 /* get the new one */
2522 cfg = desc->chip_data; 2518 cfg = desc->chip_data;
@@ -2867,19 +2863,15 @@ static inline void __init check_timer(void)
2867 int cpu = boot_cpu_id; 2863 int cpu = boot_cpu_id;
2868 int apic1, pin1, apic2, pin2; 2864 int apic1, pin1, apic2, pin2;
2869 unsigned long flags; 2865 unsigned long flags;
2870 unsigned int ver;
2871 int no_pin1 = 0; 2866 int no_pin1 = 0;
2872 2867
2873 local_irq_save(flags); 2868 local_irq_save(flags);
2874 2869
2875 ver = apic_read(APIC_LVR);
2876 ver = GET_APIC_VERSION(ver);
2877
2878 /* 2870 /*
2879 * get/set the timer IRQ vector: 2871 * get/set the timer IRQ vector:
2880 */ 2872 */
2881 disable_8259A_irq(0); 2873 disable_8259A_irq(0);
2882 assign_irq_vector(0, cfg, TARGET_CPUS); 2874 assign_irq_vector(0, cfg, apic->target_cpus());
2883 2875
2884 /* 2876 /*
2885 * As IRQ0 is to be enabled in the 8259A, the virtual 2877 * As IRQ0 is to be enabled in the 8259A, the virtual
@@ -2893,7 +2885,13 @@ static inline void __init check_timer(void)
2893 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT); 2885 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
2894 init_8259A(1); 2886 init_8259A(1);
2895#ifdef CONFIG_X86_32 2887#ifdef CONFIG_X86_32
2896 timer_ack = (nmi_watchdog == NMI_IO_APIC && !APIC_INTEGRATED(ver)); 2888 {
2889 unsigned int ver;
2890
2891 ver = apic_read(APIC_LVR);
2892 ver = GET_APIC_VERSION(ver);
2893 timer_ack = (nmi_watchdog == NMI_IO_APIC && !APIC_INTEGRATED(ver));
2894 }
2897#endif 2895#endif
2898 2896
2899 pin1 = find_isa_irq_pin(0, mp_INT); 2897 pin1 = find_isa_irq_pin(0, mp_INT);
@@ -2932,8 +2930,17 @@ static inline void __init check_timer(void)
2932 if (no_pin1) { 2930 if (no_pin1) {
2933 add_pin_to_irq_cpu(cfg, cpu, apic1, pin1); 2931 add_pin_to_irq_cpu(cfg, cpu, apic1, pin1);
2934 setup_timer_IRQ0_pin(apic1, pin1, cfg->vector); 2932 setup_timer_IRQ0_pin(apic1, pin1, cfg->vector);
2933 } else {
2934 /* for edge trigger, setup_IO_APIC_irq already
2935 * leave it unmasked.
2936 * so only need to unmask if it is level-trigger
2937 * do we really have level trigger timer?
2938 */
2939 int idx;
2940 idx = find_irq_entry(apic1, pin1, mp_INT);
2941 if (idx != -1 && irq_trigger(idx))
2942 unmask_IO_APIC_irq_desc(desc);
2935 } 2943 }
2936 unmask_IO_APIC_irq_desc(desc);
2937 if (timer_irq_works()) { 2944 if (timer_irq_works()) {
2938 if (nmi_watchdog == NMI_IO_APIC) { 2945 if (nmi_watchdog == NMI_IO_APIC) {
2939 setup_nmi(); 2946 setup_nmi();
@@ -2947,6 +2954,7 @@ static inline void __init check_timer(void)
2947 if (intr_remapping_enabled) 2954 if (intr_remapping_enabled)
2948 panic("timer doesn't work through Interrupt-remapped IO-APIC"); 2955 panic("timer doesn't work through Interrupt-remapped IO-APIC");
2949#endif 2956#endif
2957 local_irq_disable();
2950 clear_IO_APIC_pin(apic1, pin1); 2958 clear_IO_APIC_pin(apic1, pin1);
2951 if (!no_pin1) 2959 if (!no_pin1)
2952 apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: " 2960 apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: "
@@ -2961,7 +2969,6 @@ static inline void __init check_timer(void)
2961 */ 2969 */
2962 replace_pin_at_irq_cpu(cfg, cpu, apic1, pin1, apic2, pin2); 2970 replace_pin_at_irq_cpu(cfg, cpu, apic1, pin1, apic2, pin2);
2963 setup_timer_IRQ0_pin(apic2, pin2, cfg->vector); 2971 setup_timer_IRQ0_pin(apic2, pin2, cfg->vector);
2964 unmask_IO_APIC_irq_desc(desc);
2965 enable_8259A_irq(0); 2972 enable_8259A_irq(0);
2966 if (timer_irq_works()) { 2973 if (timer_irq_works()) {
2967 apic_printk(APIC_QUIET, KERN_INFO "....... works.\n"); 2974 apic_printk(APIC_QUIET, KERN_INFO "....... works.\n");
@@ -2976,6 +2983,7 @@ static inline void __init check_timer(void)
2976 /* 2983 /*
2977 * Cleanup, just in case ... 2984 * Cleanup, just in case ...
2978 */ 2985 */
2986 local_irq_disable();
2979 disable_8259A_irq(0); 2987 disable_8259A_irq(0);
2980 clear_IO_APIC_pin(apic2, pin2); 2988 clear_IO_APIC_pin(apic2, pin2);
2981 apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n"); 2989 apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n");
@@ -3001,6 +3009,7 @@ static inline void __init check_timer(void)
3001 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n"); 3009 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
3002 goto out; 3010 goto out;
3003 } 3011 }
3012 local_irq_disable();
3004 disable_8259A_irq(0); 3013 disable_8259A_irq(0);
3005 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector); 3014 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector);
3006 apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n"); 3015 apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n");
@@ -3018,6 +3027,7 @@ static inline void __init check_timer(void)
3018 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n"); 3027 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
3019 goto out; 3028 goto out;
3020 } 3029 }
3030 local_irq_disable();
3021 apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n"); 3031 apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n");
3022 panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a " 3032 panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a "
3023 "report. Then try booting with the 'noapic' option.\n"); 3033 "report. Then try booting with the 'noapic' option.\n");
@@ -3118,8 +3128,8 @@ static int ioapic_resume(struct sys_device *dev)
3118 3128
3119 spin_lock_irqsave(&ioapic_lock, flags); 3129 spin_lock_irqsave(&ioapic_lock, flags);
3120 reg_00.raw = io_apic_read(dev->id, 0); 3130 reg_00.raw = io_apic_read(dev->id, 0);
3121 if (reg_00.bits.ID != mp_ioapics[dev->id].mp_apicid) { 3131 if (reg_00.bits.ID != mp_ioapics[dev->id].apicid) {
3122 reg_00.bits.ID = mp_ioapics[dev->id].mp_apicid; 3132 reg_00.bits.ID = mp_ioapics[dev->id].apicid;
3123 io_apic_write(dev->id, 0, reg_00.raw); 3133 io_apic_write(dev->id, 0, reg_00.raw);
3124 } 3134 }
3125 spin_unlock_irqrestore(&ioapic_lock, flags); 3135 spin_unlock_irqrestore(&ioapic_lock, flags);
@@ -3169,6 +3179,7 @@ static int __init ioapic_init_sysfs(void)
3169 3179
3170device_initcall(ioapic_init_sysfs); 3180device_initcall(ioapic_init_sysfs);
3171 3181
3182static int nr_irqs_gsi = NR_IRQS_LEGACY;
3172/* 3183/*
3173 * Dynamic irq allocate and deallocation 3184 * Dynamic irq allocate and deallocation
3174 */ 3185 */
@@ -3183,11 +3194,11 @@ unsigned int create_irq_nr(unsigned int irq_want)
3183 struct irq_desc *desc_new = NULL; 3194 struct irq_desc *desc_new = NULL;
3184 3195
3185 irq = 0; 3196 irq = 0;
3186 spin_lock_irqsave(&vector_lock, flags); 3197 if (irq_want < nr_irqs_gsi)
3187 for (new = irq_want; new < NR_IRQS; new++) { 3198 irq_want = nr_irqs_gsi;
3188 if (platform_legacy_irq(new))
3189 continue;
3190 3199
3200 spin_lock_irqsave(&vector_lock, flags);
3201 for (new = irq_want; new < nr_irqs; new++) {
3191 desc_new = irq_to_desc_alloc_cpu(new, cpu); 3202 desc_new = irq_to_desc_alloc_cpu(new, cpu);
3192 if (!desc_new) { 3203 if (!desc_new) {
3193 printk(KERN_INFO "can not get irq_desc for %d\n", new); 3204 printk(KERN_INFO "can not get irq_desc for %d\n", new);
@@ -3197,7 +3208,7 @@ unsigned int create_irq_nr(unsigned int irq_want)
3197 3208
3198 if (cfg_new->vector != 0) 3209 if (cfg_new->vector != 0)
3199 continue; 3210 continue;
3200 if (__assign_irq_vector(new, cfg_new, TARGET_CPUS) == 0) 3211 if (__assign_irq_vector(new, cfg_new, apic->target_cpus()) == 0)
3201 irq = new; 3212 irq = new;
3202 break; 3213 break;
3203 } 3214 }
@@ -3212,7 +3223,6 @@ unsigned int create_irq_nr(unsigned int irq_want)
3212 return irq; 3223 return irq;
3213} 3224}
3214 3225
3215static int nr_irqs_gsi = NR_IRQS_LEGACY;
3216int create_irq(void) 3226int create_irq(void)
3217{ 3227{
3218 unsigned int irq_want; 3228 unsigned int irq_want;
@@ -3259,12 +3269,15 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms
3259 int err; 3269 int err;
3260 unsigned dest; 3270 unsigned dest;
3261 3271
3272 if (disable_apic)
3273 return -ENXIO;
3274
3262 cfg = irq_cfg(irq); 3275 cfg = irq_cfg(irq);
3263 err = assign_irq_vector(irq, cfg, TARGET_CPUS); 3276 err = assign_irq_vector(irq, cfg, apic->target_cpus());
3264 if (err) 3277 if (err)
3265 return err; 3278 return err;
3266 3279
3267 dest = cpu_mask_to_apicid_and(cfg->domain, TARGET_CPUS); 3280 dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus());
3268 3281
3269#ifdef CONFIG_INTR_REMAP 3282#ifdef CONFIG_INTR_REMAP
3270 if (irq_remapped(irq)) { 3283 if (irq_remapped(irq)) {
@@ -3278,9 +3291,9 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms
3278 memset (&irte, 0, sizeof(irte)); 3291 memset (&irte, 0, sizeof(irte));
3279 3292
3280 irte.present = 1; 3293 irte.present = 1;
3281 irte.dst_mode = INT_DEST_MODE; 3294 irte.dst_mode = apic->irq_dest_mode;
3282 irte.trigger_mode = 0; /* edge */ 3295 irte.trigger_mode = 0; /* edge */
3283 irte.dlvry_mode = INT_DELIVERY_MODE; 3296 irte.dlvry_mode = apic->irq_delivery_mode;
3284 irte.vector = cfg->vector; 3297 irte.vector = cfg->vector;
3285 irte.dest_id = IRTE_DEST(dest); 3298 irte.dest_id = IRTE_DEST(dest);
3286 3299
@@ -3298,10 +3311,10 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms
3298 msg->address_hi = MSI_ADDR_BASE_HI; 3311 msg->address_hi = MSI_ADDR_BASE_HI;
3299 msg->address_lo = 3312 msg->address_lo =
3300 MSI_ADDR_BASE_LO | 3313 MSI_ADDR_BASE_LO |
3301 ((INT_DEST_MODE == 0) ? 3314 ((apic->irq_dest_mode == 0) ?
3302 MSI_ADDR_DEST_MODE_PHYSICAL: 3315 MSI_ADDR_DEST_MODE_PHYSICAL:
3303 MSI_ADDR_DEST_MODE_LOGICAL) | 3316 MSI_ADDR_DEST_MODE_LOGICAL) |
3304 ((INT_DELIVERY_MODE != dest_LowestPrio) ? 3317 ((apic->irq_delivery_mode != dest_LowestPrio) ?
3305 MSI_ADDR_REDIRECTION_CPU: 3318 MSI_ADDR_REDIRECTION_CPU:
3306 MSI_ADDR_REDIRECTION_LOWPRI) | 3319 MSI_ADDR_REDIRECTION_LOWPRI) |
3307 MSI_ADDR_DEST_ID(dest); 3320 MSI_ADDR_DEST_ID(dest);
@@ -3309,7 +3322,7 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms
3309 msg->data = 3322 msg->data =
3310 MSI_DATA_TRIGGER_EDGE | 3323 MSI_DATA_TRIGGER_EDGE |
3311 MSI_DATA_LEVEL_ASSERT | 3324 MSI_DATA_LEVEL_ASSERT |
3312 ((INT_DELIVERY_MODE != dest_LowestPrio) ? 3325 ((apic->irq_delivery_mode != dest_LowestPrio) ?
3313 MSI_DATA_DELIVERY_FIXED: 3326 MSI_DATA_DELIVERY_FIXED:
3314 MSI_DATA_DELIVERY_LOWPRI) | 3327 MSI_DATA_DELIVERY_LOWPRI) |
3315 MSI_DATA_VECTOR(cfg->vector); 3328 MSI_DATA_VECTOR(cfg->vector);
@@ -3464,40 +3477,6 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
3464 return 0; 3477 return 0;
3465} 3478}
3466 3479
3467int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc)
3468{
3469 unsigned int irq;
3470 int ret;
3471 unsigned int irq_want;
3472
3473 irq_want = nr_irqs_gsi;
3474 irq = create_irq_nr(irq_want);
3475 if (irq == 0)
3476 return -1;
3477
3478#ifdef CONFIG_INTR_REMAP
3479 if (!intr_remapping_enabled)
3480 goto no_ir;
3481
3482 ret = msi_alloc_irte(dev, irq, 1);
3483 if (ret < 0)
3484 goto error;
3485no_ir:
3486#endif
3487 ret = setup_msi_irq(dev, msidesc, irq);
3488 if (ret < 0) {
3489 destroy_irq(irq);
3490 return ret;
3491 }
3492 return 0;
3493
3494#ifdef CONFIG_INTR_REMAP
3495error:
3496 destroy_irq(irq);
3497 return ret;
3498#endif
3499}
3500
3501int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) 3480int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
3502{ 3481{
3503 unsigned int irq; 3482 unsigned int irq;
@@ -3514,9 +3493,9 @@ int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
3514 sub_handle = 0; 3493 sub_handle = 0;
3515 list_for_each_entry(msidesc, &dev->msi_list, list) { 3494 list_for_each_entry(msidesc, &dev->msi_list, list) {
3516 irq = create_irq_nr(irq_want); 3495 irq = create_irq_nr(irq_want);
3517 irq_want++;
3518 if (irq == 0) 3496 if (irq == 0)
3519 return -1; 3497 return -1;
3498 irq_want = irq + 1;
3520#ifdef CONFIG_INTR_REMAP 3499#ifdef CONFIG_INTR_REMAP
3521 if (!intr_remapping_enabled) 3500 if (!intr_remapping_enabled)
3522 goto no_ir; 3501 goto no_ir;
@@ -3727,13 +3706,17 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
3727 struct irq_cfg *cfg; 3706 struct irq_cfg *cfg;
3728 int err; 3707 int err;
3729 3708
3709 if (disable_apic)
3710 return -ENXIO;
3711
3730 cfg = irq_cfg(irq); 3712 cfg = irq_cfg(irq);
3731 err = assign_irq_vector(irq, cfg, TARGET_CPUS); 3713 err = assign_irq_vector(irq, cfg, apic->target_cpus());
3732 if (!err) { 3714 if (!err) {
3733 struct ht_irq_msg msg; 3715 struct ht_irq_msg msg;
3734 unsigned dest; 3716 unsigned dest;
3735 3717
3736 dest = cpu_mask_to_apicid_and(cfg->domain, TARGET_CPUS); 3718 dest = apic->cpu_mask_to_apicid_and(cfg->domain,
3719 apic->target_cpus());
3737 3720
3738 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest); 3721 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
3739 3722
@@ -3741,11 +3724,11 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
3741 HT_IRQ_LOW_BASE | 3724 HT_IRQ_LOW_BASE |
3742 HT_IRQ_LOW_DEST_ID(dest) | 3725 HT_IRQ_LOW_DEST_ID(dest) |
3743 HT_IRQ_LOW_VECTOR(cfg->vector) | 3726 HT_IRQ_LOW_VECTOR(cfg->vector) |
3744 ((INT_DEST_MODE == 0) ? 3727 ((apic->irq_dest_mode == 0) ?
3745 HT_IRQ_LOW_DM_PHYSICAL : 3728 HT_IRQ_LOW_DM_PHYSICAL :
3746 HT_IRQ_LOW_DM_LOGICAL) | 3729 HT_IRQ_LOW_DM_LOGICAL) |
3747 HT_IRQ_LOW_RQEOI_EDGE | 3730 HT_IRQ_LOW_RQEOI_EDGE |
3748 ((INT_DELIVERY_MODE != dest_LowestPrio) ? 3731 ((apic->irq_delivery_mode != dest_LowestPrio) ?
3749 HT_IRQ_LOW_MT_FIXED : 3732 HT_IRQ_LOW_MT_FIXED :
3750 HT_IRQ_LOW_MT_ARBITRATED) | 3733 HT_IRQ_LOW_MT_ARBITRATED) |
3751 HT_IRQ_LOW_IRQ_MASKED; 3734 HT_IRQ_LOW_IRQ_MASKED;
@@ -3761,7 +3744,7 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
3761} 3744}
3762#endif /* CONFIG_HT_IRQ */ 3745#endif /* CONFIG_HT_IRQ */
3763 3746
3764#ifdef CONFIG_X86_64 3747#ifdef CONFIG_X86_UV
3765/* 3748/*
3766 * Re-target the irq to the specified CPU and enable the specified MMR located 3749 * Re-target the irq to the specified CPU and enable the specified MMR located
3767 * on the specified blade to allow the sending of MSIs to the specified CPU. 3750 * on the specified blade to allow the sending of MSIs to the specified CPU.
@@ -3793,12 +3776,12 @@ int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
3793 BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long)); 3776 BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long));
3794 3777
3795 entry->vector = cfg->vector; 3778 entry->vector = cfg->vector;
3796 entry->delivery_mode = INT_DELIVERY_MODE; 3779 entry->delivery_mode = apic->irq_delivery_mode;
3797 entry->dest_mode = INT_DEST_MODE; 3780 entry->dest_mode = apic->irq_dest_mode;
3798 entry->polarity = 0; 3781 entry->polarity = 0;
3799 entry->trigger = 0; 3782 entry->trigger = 0;
3800 entry->mask = 0; 3783 entry->mask = 0;
3801 entry->dest = cpu_mask_to_apicid(eligible_cpu); 3784 entry->dest = apic->cpu_mask_to_apicid(eligible_cpu);
3802 3785
3803 mmr_pnode = uv_blade_to_pnode(mmr_blade); 3786 mmr_pnode = uv_blade_to_pnode(mmr_blade);
3804 uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); 3787 uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
@@ -3861,6 +3844,28 @@ void __init probe_nr_irqs_gsi(void)
3861 printk(KERN_DEBUG "nr_irqs_gsi: %d\n", nr_irqs_gsi); 3844 printk(KERN_DEBUG "nr_irqs_gsi: %d\n", nr_irqs_gsi);
3862} 3845}
3863 3846
3847#ifdef CONFIG_SPARSE_IRQ
3848int __init arch_probe_nr_irqs(void)
3849{
3850 int nr;
3851
3852 if (nr_irqs > (NR_VECTORS * nr_cpu_ids))
3853 nr_irqs = NR_VECTORS * nr_cpu_ids;
3854
3855 nr = nr_irqs_gsi + 8 * nr_cpu_ids;
3856#if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ)
3857 /*
3858 * for MSI and HT dyn irq
3859 */
3860 nr += nr_irqs_gsi * 16;
3861#endif
3862 if (nr < nr_irqs)
3863 nr_irqs = nr;
3864
3865 return 0;
3866}
3867#endif
3868
3864/* -------------------------------------------------------------------------- 3869/* --------------------------------------------------------------------------
3865 ACPI-based IOAPIC Configuration 3870 ACPI-based IOAPIC Configuration
3866 -------------------------------------------------------------------------- */ 3871 -------------------------------------------------------------------------- */
@@ -3886,7 +3891,7 @@ int __init io_apic_get_unique_id(int ioapic, int apic_id)
3886 */ 3891 */
3887 3892
3888 if (physids_empty(apic_id_map)) 3893 if (physids_empty(apic_id_map))
3889 apic_id_map = ioapic_phys_id_map(phys_cpu_present_map); 3894 apic_id_map = apic->ioapic_phys_id_map(phys_cpu_present_map);
3890 3895
3891 spin_lock_irqsave(&ioapic_lock, flags); 3896 spin_lock_irqsave(&ioapic_lock, flags);
3892 reg_00.raw = io_apic_read(ioapic, 0); 3897 reg_00.raw = io_apic_read(ioapic, 0);
@@ -3902,10 +3907,10 @@ int __init io_apic_get_unique_id(int ioapic, int apic_id)
3902 * Every APIC in a system must have a unique ID or we get lots of nice 3907 * Every APIC in a system must have a unique ID or we get lots of nice
3903 * 'stuck on smp_invalidate_needed IPI wait' messages. 3908 * 'stuck on smp_invalidate_needed IPI wait' messages.
3904 */ 3909 */
3905 if (check_apicid_used(apic_id_map, apic_id)) { 3910 if (apic->check_apicid_used(apic_id_map, apic_id)) {
3906 3911
3907 for (i = 0; i < get_physical_broadcast(); i++) { 3912 for (i = 0; i < get_physical_broadcast(); i++) {
3908 if (!check_apicid_used(apic_id_map, i)) 3913 if (!apic->check_apicid_used(apic_id_map, i))
3909 break; 3914 break;
3910 } 3915 }
3911 3916
@@ -3918,7 +3923,7 @@ int __init io_apic_get_unique_id(int ioapic, int apic_id)
3918 apic_id = i; 3923 apic_id = i;
3919 } 3924 }
3920 3925
3921 tmp = apicid_to_cpu_present(apic_id); 3926 tmp = apic->apicid_to_cpu_present(apic_id);
3922 physids_or(apic_id_map, apic_id_map, tmp); 3927 physids_or(apic_id_map, apic_id_map, tmp);
3923 3928
3924 if (reg_00.bits.ID != apic_id) { 3929 if (reg_00.bits.ID != apic_id) {
@@ -3995,8 +4000,8 @@ int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity)
3995 return -1; 4000 return -1;
3996 4001
3997 for (i = 0; i < mp_irq_entries; i++) 4002 for (i = 0; i < mp_irq_entries; i++)
3998 if (mp_irqs[i].mp_irqtype == mp_INT && 4003 if (mp_irqs[i].irqtype == mp_INT &&
3999 mp_irqs[i].mp_srcbusirq == bus_irq) 4004 mp_irqs[i].srcbusirq == bus_irq)
4000 break; 4005 break;
4001 if (i >= mp_irq_entries) 4006 if (i >= mp_irq_entries)
4002 return -1; 4007 return -1;
@@ -4011,7 +4016,7 @@ int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity)
4011/* 4016/*
4012 * This function currently is only a helper for the i386 smp boot process where 4017 * This function currently is only a helper for the i386 smp boot process where
4013 * we need to reprogram the ioredtbls to cater for the cpus which have come online 4018 * we need to reprogram the ioredtbls to cater for the cpus which have come online
4014 * so mask in all cases should simply be TARGET_CPUS 4019 * so mask in all cases should simply be apic->target_cpus()
4015 */ 4020 */
4016#ifdef CONFIG_SMP 4021#ifdef CONFIG_SMP
4017void __init setup_ioapic_dest(void) 4022void __init setup_ioapic_dest(void)
@@ -4050,9 +4055,9 @@ void __init setup_ioapic_dest(void)
4050 */ 4055 */
4051 if (desc->status & 4056 if (desc->status &
4052 (IRQ_NO_BALANCING | IRQ_AFFINITY_SET)) 4057 (IRQ_NO_BALANCING | IRQ_AFFINITY_SET))
4053 mask = &desc->affinity; 4058 mask = desc->affinity;
4054 else 4059 else
4055 mask = TARGET_CPUS; 4060 mask = apic->target_cpus();
4056 4061
4057#ifdef CONFIG_INTR_REMAP 4062#ifdef CONFIG_INTR_REMAP
4058 if (intr_remapping_enabled) 4063 if (intr_remapping_enabled)
@@ -4111,7 +4116,7 @@ void __init ioapic_init_mappings(void)
4111 ioapic_res = ioapic_setup_resources(); 4116 ioapic_res = ioapic_setup_resources();
4112 for (i = 0; i < nr_ioapics; i++) { 4117 for (i = 0; i < nr_ioapics; i++) {
4113 if (smp_found_config) { 4118 if (smp_found_config) {
4114 ioapic_phys = mp_ioapics[i].mp_apicaddr; 4119 ioapic_phys = mp_ioapics[i].apicaddr;
4115#ifdef CONFIG_X86_32 4120#ifdef CONFIG_X86_32
4116 if (!ioapic_phys) { 4121 if (!ioapic_phys) {
4117 printk(KERN_ERR 4122 printk(KERN_ERR
diff --git a/arch/x86/kernel/ipi.c b/arch/x86/kernel/ipi.c
index 285bbf8831fa..dbf5445727a9 100644
--- a/arch/x86/kernel/ipi.c
+++ b/arch/x86/kernel/ipi.c
@@ -17,147 +17,121 @@
17#include <asm/mmu_context.h> 17#include <asm/mmu_context.h>
18#include <asm/apic.h> 18#include <asm/apic.h>
19#include <asm/proto.h> 19#include <asm/proto.h>
20#include <asm/ipi.h>
20 21
21#ifdef CONFIG_X86_32 22void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector)
22#include <mach_apic.h>
23#include <mach_ipi.h>
24
25/*
26 * the following functions deal with sending IPIs between CPUs.
27 *
28 * We use 'broadcast', CPU->CPU IPIs and self-IPIs too.
29 */
30
31static inline int __prepare_ICR(unsigned int shortcut, int vector)
32{ 23{
33 unsigned int icr = shortcut | APIC_DEST_LOGICAL; 24 unsigned long query_cpu;
34 25 unsigned long flags;
35 switch (vector) { 26
36 default: 27 /*
37 icr |= APIC_DM_FIXED | vector; 28 * Hack. The clustered APIC addressing mode doesn't allow us to send
38 break; 29 * to an arbitrary mask, so I do a unicast to each CPU instead.
39 case NMI_VECTOR: 30 * - mbligh
40 icr |= APIC_DM_NMI; 31 */
41 break; 32 local_irq_save(flags);
33 for_each_cpu(query_cpu, mask) {
34 __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
35 query_cpu), vector, APIC_DEST_PHYSICAL);
42 } 36 }
43 return icr; 37 local_irq_restore(flags);
44} 38}
45 39
46static inline int __prepare_ICR2(unsigned int mask) 40void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
41 int vector)
47{ 42{
48 return SET_APIC_DEST_FIELD(mask); 43 unsigned int this_cpu = smp_processor_id();
49} 44 unsigned int query_cpu;
45 unsigned long flags;
50 46
51void __send_IPI_shortcut(unsigned int shortcut, int vector) 47 /* See Hack comment above */
52{
53 /*
54 * Subtle. In the case of the 'never do double writes' workaround
55 * we have to lock out interrupts to be safe. As we don't care
56 * of the value read we use an atomic rmw access to avoid costly
57 * cli/sti. Otherwise we use an even cheaper single atomic write
58 * to the APIC.
59 */
60 unsigned int cfg;
61 48
62 /* 49 local_irq_save(flags);
63 * Wait for idle. 50 for_each_cpu(query_cpu, mask) {
64 */ 51 if (query_cpu == this_cpu)
65 apic_wait_icr_idle(); 52 continue;
53 __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
54 query_cpu), vector, APIC_DEST_PHYSICAL);
55 }
56 local_irq_restore(flags);
57}
66 58
67 /* 59void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
68 * No need to touch the target chip field 60 int vector)
69 */ 61{
70 cfg = __prepare_ICR(shortcut, vector); 62 unsigned long flags;
63 unsigned int query_cpu;
71 64
72 /* 65 /*
73 * Send the IPI. The write to APIC_ICR fires this off. 66 * Hack. The clustered APIC addressing mode doesn't allow us to send
67 * to an arbitrary mask, so I do a unicasts to each CPU instead. This
68 * should be modified to do 1 message per cluster ID - mbligh
74 */ 69 */
75 apic_write(APIC_ICR, cfg);
76}
77 70
78void send_IPI_self(int vector) 71 local_irq_save(flags);
79{ 72 for_each_cpu(query_cpu, mask)
80 __send_IPI_shortcut(APIC_DEST_SELF, vector); 73 __default_send_IPI_dest_field(
74 apic->cpu_to_logical_apicid(query_cpu), vector,
75 apic->dest_logical);
76 local_irq_restore(flags);
81} 77}
82 78
83/* 79void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
84 * This is used to send an IPI with no shorthand notation (the destination is 80 int vector)
85 * specified in bits 56 to 63 of the ICR).
86 */
87static inline void __send_IPI_dest_field(unsigned long mask, int vector)
88{ 81{
89 unsigned long cfg; 82 unsigned long flags;
90 83 unsigned int query_cpu;
91 /* 84 unsigned int this_cpu = smp_processor_id();
92 * Wait for idle.
93 */
94 if (unlikely(vector == NMI_VECTOR))
95 safe_apic_wait_icr_idle();
96 else
97 apic_wait_icr_idle();
98
99 /*
100 * prepare target chip field
101 */
102 cfg = __prepare_ICR2(mask);
103 apic_write(APIC_ICR2, cfg);
104 85
105 /* 86 /* See Hack comment above */
106 * program the ICR
107 */
108 cfg = __prepare_ICR(0, vector);
109 87
110 /* 88 local_irq_save(flags);
111 * Send the IPI. The write to APIC_ICR fires this off. 89 for_each_cpu(query_cpu, mask) {
112 */ 90 if (query_cpu == this_cpu)
113 apic_write(APIC_ICR, cfg); 91 continue;
92 __default_send_IPI_dest_field(
93 apic->cpu_to_logical_apicid(query_cpu), vector,
94 apic->dest_logical);
95 }
96 local_irq_restore(flags);
114} 97}
115 98
99#ifdef CONFIG_X86_32
100
116/* 101/*
117 * This is only used on smaller machines. 102 * This is only used on smaller machines.
118 */ 103 */
119void send_IPI_mask_bitmask(const struct cpumask *cpumask, int vector) 104void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector)
120{ 105{
121 unsigned long mask = cpumask_bits(cpumask)[0]; 106 unsigned long mask = cpumask_bits(cpumask)[0];
122 unsigned long flags; 107 unsigned long flags;
123 108
124 local_irq_save(flags); 109 local_irq_save(flags);
125 WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]); 110 WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
126 __send_IPI_dest_field(mask, vector); 111 __default_send_IPI_dest_field(mask, vector, apic->dest_logical);
127 local_irq_restore(flags); 112 local_irq_restore(flags);
128} 113}
129 114
130void send_IPI_mask_sequence(const struct cpumask *mask, int vector) 115void default_send_IPI_allbutself(int vector)
131{ 116{
132 unsigned long flags;
133 unsigned int query_cpu;
134
135 /* 117 /*
136 * Hack. The clustered APIC addressing mode doesn't allow us to send 118 * if there are no other CPUs in the system then we get an APIC send
137 * to an arbitrary mask, so I do a unicasts to each CPU instead. This 119 * error if we try to broadcast, thus avoid sending IPIs in this case.
138 * should be modified to do 1 message per cluster ID - mbligh
139 */ 120 */
121 if (!(num_online_cpus() > 1))
122 return;
140 123
141 local_irq_save(flags); 124 __default_local_send_IPI_allbutself(vector);
142 for_each_cpu(query_cpu, mask)
143 __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu), vector);
144 local_irq_restore(flags);
145} 125}
146 126
147void send_IPI_mask_allbutself(const struct cpumask *mask, int vector) 127void default_send_IPI_all(int vector)
148{ 128{
149 unsigned long flags; 129 __default_local_send_IPI_all(vector);
150 unsigned int query_cpu; 130}
151 unsigned int this_cpu = smp_processor_id();
152
153 /* See Hack comment above */
154 131
155 local_irq_save(flags); 132void default_send_IPI_self(int vector)
156 for_each_cpu(query_cpu, mask) 133{
157 if (query_cpu != this_cpu) 134 __default_send_IPI_shortcut(APIC_DEST_SELF, vector, apic->dest_logical);
158 __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu),
159 vector);
160 local_irq_restore(flags);
161} 135}
162 136
163/* must come after the send_IPI functions above for inlining */ 137/* must come after the send_IPI functions above for inlining */
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 3973e2df7f87..f13ca1650aaf 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -6,10 +6,12 @@
6#include <linux/kernel_stat.h> 6#include <linux/kernel_stat.h>
7#include <linux/seq_file.h> 7#include <linux/seq_file.h>
8#include <linux/smp.h> 8#include <linux/smp.h>
9#include <linux/ftrace.h>
9 10
10#include <asm/apic.h> 11#include <asm/apic.h>
11#include <asm/io_apic.h> 12#include <asm/io_apic.h>
12#include <asm/irq.h> 13#include <asm/irq.h>
14#include <asm/idle.h>
13 15
14atomic_t irq_err_count; 16atomic_t irq_err_count;
15 17
@@ -36,11 +38,7 @@ void ack_bad_irq(unsigned int irq)
36#endif 38#endif
37} 39}
38 40
39#ifdef CONFIG_X86_32 41#define irq_stats(x) (&per_cpu(irq_stat, x))
40# define irq_stats(x) (&per_cpu(irq_stat, x))
41#else
42# define irq_stats(x) cpu_pda(x)
43#endif
44/* 42/*
45 * /proc/interrupts printing: 43 * /proc/interrupts printing:
46 */ 44 */
@@ -192,4 +190,40 @@ u64 arch_irq_stat(void)
192 return sum; 190 return sum;
193} 191}
194 192
193
194/*
195 * do_IRQ handles all normal device IRQ's (the special
196 * SMP cross-CPU interrupts have their own specific
197 * handlers).
198 */
199unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
200{
201 struct pt_regs *old_regs = set_irq_regs(regs);
202
203 /* high bit used in ret_from_ code */
204 unsigned vector = ~regs->orig_ax;
205 unsigned irq;
206
207 exit_idle();
208 irq_enter();
209
210 irq = __get_cpu_var(vector_irq)[vector];
211
212 if (!handle_irq(irq, regs)) {
213#ifdef CONFIG_X86_64
214 if (!disable_apic)
215 ack_APIC_irq();
216#endif
217
218 if (printk_ratelimit())
219 printk(KERN_EMERG "%s: %d.%d No irq handler for vector (irq %d)\n",
220 __func__, smp_processor_id(), vector, irq);
221 }
222
223 irq_exit();
224
225 set_irq_regs(old_regs);
226 return 1;
227}
228
195EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq); 229EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq);
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 74b9ff7341e9..4beb9a13873d 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -191,33 +191,16 @@ static inline int
191execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) { return 0; } 191execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) { return 0; }
192#endif 192#endif
193 193
194/* 194bool handle_irq(unsigned irq, struct pt_regs *regs)
195 * do_IRQ handles all normal device IRQ's (the special
196 * SMP cross-CPU interrupts have their own specific
197 * handlers).
198 */
199unsigned int do_IRQ(struct pt_regs *regs)
200{ 195{
201 struct pt_regs *old_regs;
202 /* high bit used in ret_from_ code */
203 int overflow;
204 unsigned vector = ~regs->orig_ax;
205 struct irq_desc *desc; 196 struct irq_desc *desc;
206 unsigned irq; 197 int overflow;
207
208
209 old_regs = set_irq_regs(regs);
210 irq_enter();
211 irq = __get_cpu_var(vector_irq)[vector];
212 198
213 overflow = check_stack_overflow(); 199 overflow = check_stack_overflow();
214 200
215 desc = irq_to_desc(irq); 201 desc = irq_to_desc(irq);
216 if (unlikely(!desc)) { 202 if (unlikely(!desc))
217 printk(KERN_EMERG "%s: cannot handle IRQ %d vector %#x cpu %d\n", 203 return false;
218 __func__, irq, vector, smp_processor_id());
219 BUG();
220 }
221 204
222 if (!execute_on_irq_stack(overflow, desc, irq)) { 205 if (!execute_on_irq_stack(overflow, desc, irq)) {
223 if (unlikely(overflow)) 206 if (unlikely(overflow))
@@ -225,13 +208,11 @@ unsigned int do_IRQ(struct pt_regs *regs)
225 desc->handle_irq(irq, desc); 208 desc->handle_irq(irq, desc);
226 } 209 }
227 210
228 irq_exit(); 211 return true;
229 set_irq_regs(old_regs);
230 return 1;
231} 212}
232 213
233#ifdef CONFIG_HOTPLUG_CPU 214#ifdef CONFIG_HOTPLUG_CPU
234#include <mach_apic.h> 215#include <asm/genapic.h>
235 216
236/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */ 217/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
237void fixup_irqs(void) 218void fixup_irqs(void)
@@ -248,7 +229,7 @@ void fixup_irqs(void)
248 if (irq == 2) 229 if (irq == 2)
249 continue; 230 continue;
250 231
251 affinity = &desc->affinity; 232 affinity = desc->affinity;
252 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { 233 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
253 printk("Breaking affinity for irq %i\n", irq); 234 printk("Breaking affinity for irq %i\n", irq);
254 affinity = cpu_all_mask; 235 affinity = cpu_all_mask;
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index 63c88e6ec025..977d8b43a0dd 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -18,6 +18,13 @@
18#include <linux/smp.h> 18#include <linux/smp.h>
19#include <asm/io_apic.h> 19#include <asm/io_apic.h>
20#include <asm/idle.h> 20#include <asm/idle.h>
21#include <asm/apic.h>
22
23DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
24EXPORT_PER_CPU_SYMBOL(irq_stat);
25
26DEFINE_PER_CPU(struct pt_regs *, irq_regs);
27EXPORT_PER_CPU_SYMBOL(irq_regs);
21 28
22/* 29/*
23 * Probabilistic stack overflow check: 30 * Probabilistic stack overflow check:
@@ -41,42 +48,18 @@ static inline void stack_overflow_check(struct pt_regs *regs)
41#endif 48#endif
42} 49}
43 50
44/* 51bool handle_irq(unsigned irq, struct pt_regs *regs)
45 * do_IRQ handles all normal device IRQ's (the special
46 * SMP cross-CPU interrupts have their own specific
47 * handlers).
48 */
49asmlinkage unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
50{ 52{
51 struct pt_regs *old_regs = set_irq_regs(regs);
52 struct irq_desc *desc; 53 struct irq_desc *desc;
53 54
54 /* high bit used in ret_from_ code */
55 unsigned vector = ~regs->orig_ax;
56 unsigned irq;
57
58 exit_idle();
59 irq_enter();
60 irq = __get_cpu_var(vector_irq)[vector];
61
62 stack_overflow_check(regs); 55 stack_overflow_check(regs);
63 56
64 desc = irq_to_desc(irq); 57 desc = irq_to_desc(irq);
65 if (likely(desc)) 58 if (unlikely(!desc))
66 generic_handle_irq_desc(irq, desc); 59 return false;
67 else {
68 if (!disable_apic)
69 ack_APIC_irq();
70
71 if (printk_ratelimit())
72 printk(KERN_EMERG "%s: %d.%d No irq handler for vector\n",
73 __func__, smp_processor_id(), vector);
74 }
75
76 irq_exit();
77 60
78 set_irq_regs(old_regs); 61 generic_handle_irq_desc(irq, desc);
79 return 1; 62 return true;
80} 63}
81 64
82#ifdef CONFIG_HOTPLUG_CPU 65#ifdef CONFIG_HOTPLUG_CPU
@@ -100,7 +83,7 @@ void fixup_irqs(void)
100 /* interrupt's are disabled at this point */ 83 /* interrupt's are disabled at this point */
101 spin_lock(&desc->lock); 84 spin_lock(&desc->lock);
102 85
103 affinity = &desc->affinity; 86 affinity = desc->affinity;
104 if (!irq_has_action(irq) || 87 if (!irq_has_action(irq) ||
105 cpumask_equal(affinity, cpu_online_mask)) { 88 cpumask_equal(affinity, cpu_online_mask)) {
106 spin_unlock(&desc->lock); 89 spin_unlock(&desc->lock);
diff --git a/arch/x86/kernel/irqinit_32.c b/arch/x86/kernel/irqinit_32.c
index 10a09c2f1828..bf629cadec1a 100644
--- a/arch/x86/kernel/irqinit_32.c
+++ b/arch/x86/kernel/irqinit_32.c
@@ -78,6 +78,15 @@ void __init init_ISA_irqs(void)
78 } 78 }
79} 79}
80 80
81/*
82 * IRQ2 is cascade interrupt to second interrupt controller
83 */
84static struct irqaction irq2 = {
85 .handler = no_action,
86 .mask = CPU_MASK_NONE,
87 .name = "cascade",
88};
89
81DEFINE_PER_CPU(vector_irq_t, vector_irq) = { 90DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
82 [0 ... IRQ0_VECTOR - 1] = -1, 91 [0 ... IRQ0_VECTOR - 1] = -1,
83 [IRQ0_VECTOR] = 0, 92 [IRQ0_VECTOR] = 0,
@@ -140,8 +149,15 @@ void __init native_init_IRQ(void)
140 */ 149 */
141 alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt); 150 alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
142 151
143 /* IPI for invalidation */ 152 /* IPIs for invalidation */
144 alloc_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt); 153 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+0, invalidate_interrupt0);
154 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+1, invalidate_interrupt1);
155 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+2, invalidate_interrupt2);
156 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+3, invalidate_interrupt3);
157 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+4, invalidate_interrupt4);
158 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+5, invalidate_interrupt5);
159 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+6, invalidate_interrupt6);
160 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+7, invalidate_interrupt7);
145 161
146 /* IPI for generic function call */ 162 /* IPI for generic function call */
147 alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); 163 alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
@@ -169,6 +185,9 @@ void __init native_init_IRQ(void)
169 alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); 185 alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
170#endif 186#endif
171 187
188 if (!acpi_ioapic)
189 setup_irq(2, &irq2);
190
172 /* setup after call gates are initialised (usually add in 191 /* setup after call gates are initialised (usually add in
173 * the architecture specific gates) 192 * the architecture specific gates)
174 */ 193 */
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index 10435a120d22..5c4f55483849 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -46,7 +46,7 @@
46#include <asm/apicdef.h> 46#include <asm/apicdef.h>
47#include <asm/system.h> 47#include <asm/system.h>
48 48
49#include <mach_ipi.h> 49#include <asm/genapic.h>
50 50
51/* 51/*
52 * Put the error code here just in case the user cares: 52 * Put the error code here just in case the user cares:
@@ -347,7 +347,7 @@ void kgdb_post_primary_code(struct pt_regs *regs, int e_vector, int err_code)
347 */ 347 */
348void kgdb_roundup_cpus(unsigned long flags) 348void kgdb_roundup_cpus(unsigned long flags)
349{ 349{
350 send_IPI_allbutself(APIC_DM_NMI); 350 apic->send_IPI_allbutself(APIC_DM_NMI);
351} 351}
352#endif 352#endif
353 353
diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
index b7f4c929e615..5e9f4fc51385 100644
--- a/arch/x86/kernel/microcode_intel.c
+++ b/arch/x86/kernel/microcode_intel.c
@@ -87,9 +87,9 @@
87#include <linux/cpu.h> 87#include <linux/cpu.h>
88#include <linux/firmware.h> 88#include <linux/firmware.h>
89#include <linux/platform_device.h> 89#include <linux/platform_device.h>
90#include <linux/uaccess.h>
90 91
91#include <asm/msr.h> 92#include <asm/msr.h>
92#include <asm/uaccess.h>
93#include <asm/processor.h> 93#include <asm/processor.h>
94#include <asm/microcode.h> 94#include <asm/microcode.h>
95 95
@@ -196,7 +196,7 @@ static inline int update_match_cpu(struct cpu_signature *csig, int sig, int pf)
196 return (!sigmatch(sig, csig->sig, pf, csig->pf)) ? 0 : 1; 196 return (!sigmatch(sig, csig->sig, pf, csig->pf)) ? 0 : 1;
197} 197}
198 198
199static inline int 199static inline int
200update_match_revision(struct microcode_header_intel *mc_header, int rev) 200update_match_revision(struct microcode_header_intel *mc_header, int rev)
201{ 201{
202 return (mc_header->rev <= rev) ? 0 : 1; 202 return (mc_header->rev <= rev) ? 0 : 1;
@@ -442,8 +442,8 @@ static int request_microcode_fw(int cpu, struct device *device)
442 return ret; 442 return ret;
443 } 443 }
444 444
445 ret = generic_load_microcode(cpu, (void*)firmware->data, firmware->size, 445 ret = generic_load_microcode(cpu, (void *)firmware->data,
446 &get_ucode_fw); 446 firmware->size, &get_ucode_fw);
447 447
448 release_firmware(firmware); 448 release_firmware(firmware);
449 449
@@ -460,7 +460,7 @@ static int request_microcode_user(int cpu, const void __user *buf, size_t size)
460 /* We should bind the task to the CPU */ 460 /* We should bind the task to the CPU */
461 BUG_ON(cpu != raw_smp_processor_id()); 461 BUG_ON(cpu != raw_smp_processor_id());
462 462
463 return generic_load_microcode(cpu, (void*)buf, size, &get_ucode_user); 463 return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
464} 464}
465 465
466static void microcode_fini_cpu(int cpu) 466static void microcode_fini_cpu(int cpu)
diff --git a/arch/x86/kernel/module_32.c b/arch/x86/kernel/module_32.c
index 3db0a5442eb1..0edd819050e7 100644
--- a/arch/x86/kernel/module_32.c
+++ b/arch/x86/kernel/module_32.c
@@ -42,7 +42,7 @@ void module_free(struct module *mod, void *module_region)
42{ 42{
43 vfree(module_region); 43 vfree(module_region);
44 /* FIXME: If module_region == mod->init_region, trim exception 44 /* FIXME: If module_region == mod->init_region, trim exception
45 table entries. */ 45 table entries. */
46} 46}
47 47
48/* We don't need anything special. */ 48/* We don't need anything special. */
@@ -113,13 +113,13 @@ int module_finalize(const Elf_Ehdr *hdr,
113 *para = NULL; 113 *para = NULL;
114 char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; 114 char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
115 115
116 for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) { 116 for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
117 if (!strcmp(".text", secstrings + s->sh_name)) 117 if (!strcmp(".text", secstrings + s->sh_name))
118 text = s; 118 text = s;
119 if (!strcmp(".altinstructions", secstrings + s->sh_name)) 119 if (!strcmp(".altinstructions", secstrings + s->sh_name))
120 alt = s; 120 alt = s;
121 if (!strcmp(".smp_locks", secstrings + s->sh_name)) 121 if (!strcmp(".smp_locks", secstrings + s->sh_name))
122 locks= s; 122 locks = s;
123 if (!strcmp(".parainstructions", secstrings + s->sh_name)) 123 if (!strcmp(".parainstructions", secstrings + s->sh_name))
124 para = s; 124 para = s;
125 } 125 }
diff --git a/arch/x86/kernel/module_64.c b/arch/x86/kernel/module_64.c
index 6ba87830d4b1..c23880b90b5c 100644
--- a/arch/x86/kernel/module_64.c
+++ b/arch/x86/kernel/module_64.c
@@ -30,14 +30,14 @@
30#include <asm/page.h> 30#include <asm/page.h>
31#include <asm/pgtable.h> 31#include <asm/pgtable.h>
32 32
33#define DEBUGP(fmt...) 33#define DEBUGP(fmt...)
34 34
35#ifndef CONFIG_UML 35#ifndef CONFIG_UML
36void module_free(struct module *mod, void *module_region) 36void module_free(struct module *mod, void *module_region)
37{ 37{
38 vfree(module_region); 38 vfree(module_region);
39 /* FIXME: If module_region == mod->init_region, trim exception 39 /* FIXME: If module_region == mod->init_region, trim exception
40 table entries. */ 40 table entries. */
41} 41}
42 42
43void *module_alloc(unsigned long size) 43void *module_alloc(unsigned long size)
@@ -77,7 +77,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
77 Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr; 77 Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
78 Elf64_Sym *sym; 78 Elf64_Sym *sym;
79 void *loc; 79 void *loc;
80 u64 val; 80 u64 val;
81 81
82 DEBUGP("Applying relocate section %u to %u\n", relsec, 82 DEBUGP("Applying relocate section %u to %u\n", relsec,
83 sechdrs[relsec].sh_info); 83 sechdrs[relsec].sh_info);
@@ -91,11 +91,11 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
91 sym = (Elf64_Sym *)sechdrs[symindex].sh_addr 91 sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
92 + ELF64_R_SYM(rel[i].r_info); 92 + ELF64_R_SYM(rel[i].r_info);
93 93
94 DEBUGP("type %d st_value %Lx r_addend %Lx loc %Lx\n", 94 DEBUGP("type %d st_value %Lx r_addend %Lx loc %Lx\n",
95 (int)ELF64_R_TYPE(rel[i].r_info), 95 (int)ELF64_R_TYPE(rel[i].r_info),
96 sym->st_value, rel[i].r_addend, (u64)loc); 96 sym->st_value, rel[i].r_addend, (u64)loc);
97 97
98 val = sym->st_value + rel[i].r_addend; 98 val = sym->st_value + rel[i].r_addend;
99 99
100 switch (ELF64_R_TYPE(rel[i].r_info)) { 100 switch (ELF64_R_TYPE(rel[i].r_info)) {
101 case R_X86_64_NONE: 101 case R_X86_64_NONE:
@@ -113,16 +113,16 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
113 if ((s64)val != *(s32 *)loc) 113 if ((s64)val != *(s32 *)loc)
114 goto overflow; 114 goto overflow;
115 break; 115 break;
116 case R_X86_64_PC32: 116 case R_X86_64_PC32:
117 val -= (u64)loc; 117 val -= (u64)loc;
118 *(u32 *)loc = val; 118 *(u32 *)loc = val;
119#if 0 119#if 0
120 if ((s64)val != *(s32 *)loc) 120 if ((s64)val != *(s32 *)loc)
121 goto overflow; 121 goto overflow;
122#endif 122#endif
123 break; 123 break;
124 default: 124 default:
125 printk(KERN_ERR "module %s: Unknown rela relocation: %Lu\n", 125 printk(KERN_ERR "module %s: Unknown rela relocation: %llu\n",
126 me->name, ELF64_R_TYPE(rel[i].r_info)); 126 me->name, ELF64_R_TYPE(rel[i].r_info));
127 return -ENOEXEC; 127 return -ENOEXEC;
128 } 128 }
@@ -130,7 +130,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
130 return 0; 130 return 0;
131 131
132overflow: 132overflow:
133 printk(KERN_ERR "overflow in relocation type %d val %Lx\n", 133 printk(KERN_ERR "overflow in relocation type %d val %Lx\n",
134 (int)ELF64_R_TYPE(rel[i].r_info), val); 134 (int)ELF64_R_TYPE(rel[i].r_info), val);
135 printk(KERN_ERR "`%s' likely not compiled with -mcmodel=kernel\n", 135 printk(KERN_ERR "`%s' likely not compiled with -mcmodel=kernel\n",
136 me->name); 136 me->name);
@@ -143,13 +143,13 @@ int apply_relocate(Elf_Shdr *sechdrs,
143 unsigned int relsec, 143 unsigned int relsec,
144 struct module *me) 144 struct module *me)
145{ 145{
146 printk("non add relocation not supported\n"); 146 printk(KERN_ERR "non add relocation not supported\n");
147 return -ENOSYS; 147 return -ENOSYS;
148} 148}
149 149
150int module_finalize(const Elf_Ehdr *hdr, 150int module_finalize(const Elf_Ehdr *hdr,
151 const Elf_Shdr *sechdrs, 151 const Elf_Shdr *sechdrs,
152 struct module *me) 152 struct module *me)
153{ 153{
154 const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL, 154 const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL,
155 *para = NULL; 155 *para = NULL;
@@ -161,7 +161,7 @@ int module_finalize(const Elf_Ehdr *hdr,
161 if (!strcmp(".altinstructions", secstrings + s->sh_name)) 161 if (!strcmp(".altinstructions", secstrings + s->sh_name))
162 alt = s; 162 alt = s;
163 if (!strcmp(".smp_locks", secstrings + s->sh_name)) 163 if (!strcmp(".smp_locks", secstrings + s->sh_name))
164 locks= s; 164 locks = s;
165 if (!strcmp(".parainstructions", secstrings + s->sh_name)) 165 if (!strcmp(".parainstructions", secstrings + s->sh_name))
166 para = s; 166 para = s;
167 } 167 }
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index a649a4ccad43..200764453195 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -3,7 +3,7 @@
3 * compliant MP-table parsing routines. 3 * compliant MP-table parsing routines.
4 * 4 *
5 * (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk> 5 * (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
6 * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com> 6 * (c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
7 * (c) 2008 Alexey Starikovskiy <astarikovskiy@suse.de> 7 * (c) 2008 Alexey Starikovskiy <astarikovskiy@suse.de>
8 */ 8 */
9 9
@@ -29,12 +29,7 @@
29#include <asm/setup.h> 29#include <asm/setup.h>
30#include <asm/smp.h> 30#include <asm/smp.h>
31 31
32#include <mach_apic.h> 32#include <asm/genapic.h>
33#ifdef CONFIG_X86_32
34#include <mach_apicdef.h>
35#include <mach_mpparse.h>
36#endif
37
38/* 33/*
39 * Checksum an MP configuration block. 34 * Checksum an MP configuration block.
40 */ 35 */
@@ -144,11 +139,11 @@ static void __init MP_ioapic_info(struct mpc_ioapic *m)
144 if (bad_ioapic(m->apicaddr)) 139 if (bad_ioapic(m->apicaddr))
145 return; 140 return;
146 141
147 mp_ioapics[nr_ioapics].mp_apicaddr = m->apicaddr; 142 mp_ioapics[nr_ioapics].apicaddr = m->apicaddr;
148 mp_ioapics[nr_ioapics].mp_apicid = m->apicid; 143 mp_ioapics[nr_ioapics].apicid = m->apicid;
149 mp_ioapics[nr_ioapics].mp_type = m->type; 144 mp_ioapics[nr_ioapics].type = m->type;
150 mp_ioapics[nr_ioapics].mp_apicver = m->apicver; 145 mp_ioapics[nr_ioapics].apicver = m->apicver;
151 mp_ioapics[nr_ioapics].mp_flags = m->flags; 146 mp_ioapics[nr_ioapics].flags = m->flags;
152 nr_ioapics++; 147 nr_ioapics++;
153} 148}
154 149
@@ -160,55 +155,55 @@ static void print_MP_intsrc_info(struct mpc_intsrc *m)
160 m->srcbusirq, m->dstapic, m->dstirq); 155 m->srcbusirq, m->dstapic, m->dstirq);
161} 156}
162 157
163static void __init print_mp_irq_info(struct mp_config_intsrc *mp_irq) 158static void __init print_mp_irq_info(struct mpc_intsrc *mp_irq)
164{ 159{
165 apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x," 160 apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x,"
166 " IRQ %02x, APIC ID %x, APIC INT %02x\n", 161 " IRQ %02x, APIC ID %x, APIC INT %02x\n",
167 mp_irq->mp_irqtype, mp_irq->mp_irqflag & 3, 162 mp_irq->irqtype, mp_irq->irqflag & 3,
168 (mp_irq->mp_irqflag >> 2) & 3, mp_irq->mp_srcbus, 163 (mp_irq->irqflag >> 2) & 3, mp_irq->srcbus,
169 mp_irq->mp_srcbusirq, mp_irq->mp_dstapic, mp_irq->mp_dstirq); 164 mp_irq->srcbusirq, mp_irq->dstapic, mp_irq->dstirq);
170} 165}
171 166
172static void __init assign_to_mp_irq(struct mpc_intsrc *m, 167static void __init assign_to_mp_irq(struct mpc_intsrc *m,
173 struct mp_config_intsrc *mp_irq) 168 struct mpc_intsrc *mp_irq)
174{ 169{
175 mp_irq->mp_dstapic = m->dstapic; 170 mp_irq->dstapic = m->dstapic;
176 mp_irq->mp_type = m->type; 171 mp_irq->type = m->type;
177 mp_irq->mp_irqtype = m->irqtype; 172 mp_irq->irqtype = m->irqtype;
178 mp_irq->mp_irqflag = m->irqflag; 173 mp_irq->irqflag = m->irqflag;
179 mp_irq->mp_srcbus = m->srcbus; 174 mp_irq->srcbus = m->srcbus;
180 mp_irq->mp_srcbusirq = m->srcbusirq; 175 mp_irq->srcbusirq = m->srcbusirq;
181 mp_irq->mp_dstirq = m->dstirq; 176 mp_irq->dstirq = m->dstirq;
182} 177}
183 178
184static void __init assign_to_mpc_intsrc(struct mp_config_intsrc *mp_irq, 179static void __init assign_to_mpc_intsrc(struct mpc_intsrc *mp_irq,
185 struct mpc_intsrc *m) 180 struct mpc_intsrc *m)
186{ 181{
187 m->dstapic = mp_irq->mp_dstapic; 182 m->dstapic = mp_irq->dstapic;
188 m->type = mp_irq->mp_type; 183 m->type = mp_irq->type;
189 m->irqtype = mp_irq->mp_irqtype; 184 m->irqtype = mp_irq->irqtype;
190 m->irqflag = mp_irq->mp_irqflag; 185 m->irqflag = mp_irq->irqflag;
191 m->srcbus = mp_irq->mp_srcbus; 186 m->srcbus = mp_irq->srcbus;
192 m->srcbusirq = mp_irq->mp_srcbusirq; 187 m->srcbusirq = mp_irq->srcbusirq;
193 m->dstirq = mp_irq->mp_dstirq; 188 m->dstirq = mp_irq->dstirq;
194} 189}
195 190
196static int __init mp_irq_mpc_intsrc_cmp(struct mp_config_intsrc *mp_irq, 191static int __init mp_irq_mpc_intsrc_cmp(struct mpc_intsrc *mp_irq,
197 struct mpc_intsrc *m) 192 struct mpc_intsrc *m)
198{ 193{
199 if (mp_irq->mp_dstapic != m->dstapic) 194 if (mp_irq->dstapic != m->dstapic)
200 return 1; 195 return 1;
201 if (mp_irq->mp_type != m->type) 196 if (mp_irq->type != m->type)
202 return 2; 197 return 2;
203 if (mp_irq->mp_irqtype != m->irqtype) 198 if (mp_irq->irqtype != m->irqtype)
204 return 3; 199 return 3;
205 if (mp_irq->mp_irqflag != m->irqflag) 200 if (mp_irq->irqflag != m->irqflag)
206 return 4; 201 return 4;
207 if (mp_irq->mp_srcbus != m->srcbus) 202 if (mp_irq->srcbus != m->srcbus)
208 return 5; 203 return 5;
209 if (mp_irq->mp_srcbusirq != m->srcbusirq) 204 if (mp_irq->srcbusirq != m->srcbusirq)
210 return 6; 205 return 6;
211 if (mp_irq->mp_dstirq != m->dstirq) 206 if (mp_irq->dstirq != m->dstirq)
212 return 7; 207 return 7;
213 208
214 return 0; 209 return 0;
@@ -292,16 +287,7 @@ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
292 return 0; 287 return 0;
293 288
294#ifdef CONFIG_X86_32 289#ifdef CONFIG_X86_32
295 /* 290 generic_mps_oem_check(mpc, oem, str);
296 * need to make sure summit and es7000's mps_oem_check is safe to be
297 * called early via genericarch 's mps_oem_check
298 */
299 if (early) {
300#ifdef CONFIG_X86_NUMAQ
301 numaq_mps_oem_check(mpc, oem, str);
302#endif
303 } else
304 mps_oem_check(mpc, oem, str);
305#endif 291#endif
306 /* save the local APIC address, it might be non-default */ 292 /* save the local APIC address, it might be non-default */
307 if (!acpi_lapic) 293 if (!acpi_lapic)
@@ -386,13 +372,13 @@ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
386 (*x86_quirks->mpc_record)++; 372 (*x86_quirks->mpc_record)++;
387 } 373 }
388 374
389#ifdef CONFIG_X86_GENERICARCH 375#ifdef CONFIG_X86_BIGSMP
390 generic_bigsmp_probe(); 376 generic_bigsmp_probe();
391#endif 377#endif
392 378
393#ifdef CONFIG_X86_32 379 if (apic->setup_apic_routing)
394 setup_apic_routing(); 380 apic->setup_apic_routing();
395#endif 381
396 if (!num_processors) 382 if (!num_processors)
397 printk(KERN_ERR "MPTABLE: no processors registered!\n"); 383 printk(KERN_ERR "MPTABLE: no processors registered!\n");
398 return num_processors; 384 return num_processors;
@@ -417,7 +403,7 @@ static void __init construct_default_ioirq_mptable(int mpc_default_type)
417 intsrc.type = MP_INTSRC; 403 intsrc.type = MP_INTSRC;
418 intsrc.irqflag = 0; /* conforming */ 404 intsrc.irqflag = 0; /* conforming */
419 intsrc.srcbus = 0; 405 intsrc.srcbus = 0;
420 intsrc.dstapic = mp_ioapics[0].mp_apicid; 406 intsrc.dstapic = mp_ioapics[0].apicid;
421 407
422 intsrc.irqtype = mp_INT; 408 intsrc.irqtype = mp_INT;
423 409
@@ -570,14 +556,14 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type)
570 } 556 }
571} 557}
572 558
573static struct intel_mp_floating *mpf_found; 559static struct mpf_intel *mpf_found;
574 560
575/* 561/*
576 * Scan the memory blocks for an SMP configuration block. 562 * Scan the memory blocks for an SMP configuration block.
577 */ 563 */
578static void __init __get_smp_config(unsigned int early) 564static void __init __get_smp_config(unsigned int early)
579{ 565{
580 struct intel_mp_floating *mpf = mpf_found; 566 struct mpf_intel *mpf = mpf_found;
581 567
582 if (!mpf) 568 if (!mpf)
583 return; 569 return;
@@ -598,9 +584,9 @@ static void __init __get_smp_config(unsigned int early)
598 } 584 }
599 585
600 printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", 586 printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n",
601 mpf->mpf_specification); 587 mpf->specification);
602#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32) 588#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
603 if (mpf->mpf_feature2 & (1 << 7)) { 589 if (mpf->feature2 & (1 << 7)) {
604 printk(KERN_INFO " IMCR and PIC compatibility mode.\n"); 590 printk(KERN_INFO " IMCR and PIC compatibility mode.\n");
605 pic_mode = 1; 591 pic_mode = 1;
606 } else { 592 } else {
@@ -611,7 +597,7 @@ static void __init __get_smp_config(unsigned int early)
611 /* 597 /*
612 * Now see if we need to read further. 598 * Now see if we need to read further.
613 */ 599 */
614 if (mpf->mpf_feature1 != 0) { 600 if (mpf->feature1 != 0) {
615 if (early) { 601 if (early) {
616 /* 602 /*
617 * local APIC has default address 603 * local APIC has default address
@@ -621,16 +607,16 @@ static void __init __get_smp_config(unsigned int early)
621 } 607 }
622 608
623 printk(KERN_INFO "Default MP configuration #%d\n", 609 printk(KERN_INFO "Default MP configuration #%d\n",
624 mpf->mpf_feature1); 610 mpf->feature1);
625 construct_default_ISA_mptable(mpf->mpf_feature1); 611 construct_default_ISA_mptable(mpf->feature1);
626 612
627 } else if (mpf->mpf_physptr) { 613 } else if (mpf->physptr) {
628 614
629 /* 615 /*
630 * Read the physical hardware table. Anything here will 616 * Read the physical hardware table. Anything here will
631 * override the defaults. 617 * override the defaults.
632 */ 618 */
633 if (!smp_read_mpc(phys_to_virt(mpf->mpf_physptr), early)) { 619 if (!smp_read_mpc(phys_to_virt(mpf->physptr), early)) {
634#ifdef CONFIG_X86_LOCAL_APIC 620#ifdef CONFIG_X86_LOCAL_APIC
635 smp_found_config = 0; 621 smp_found_config = 0;
636#endif 622#endif
@@ -688,32 +674,32 @@ static int __init smp_scan_config(unsigned long base, unsigned long length,
688 unsigned reserve) 674 unsigned reserve)
689{ 675{
690 unsigned int *bp = phys_to_virt(base); 676 unsigned int *bp = phys_to_virt(base);
691 struct intel_mp_floating *mpf; 677 struct mpf_intel *mpf;
692 678
693 apic_printk(APIC_VERBOSE, "Scan SMP from %p for %ld bytes.\n", 679 apic_printk(APIC_VERBOSE, "Scan SMP from %p for %ld bytes.\n",
694 bp, length); 680 bp, length);
695 BUILD_BUG_ON(sizeof(*mpf) != 16); 681 BUILD_BUG_ON(sizeof(*mpf) != 16);
696 682
697 while (length > 0) { 683 while (length > 0) {
698 mpf = (struct intel_mp_floating *)bp; 684 mpf = (struct mpf_intel *)bp;
699 if ((*bp == SMP_MAGIC_IDENT) && 685 if ((*bp == SMP_MAGIC_IDENT) &&
700 (mpf->mpf_length == 1) && 686 (mpf->length == 1) &&
701 !mpf_checksum((unsigned char *)bp, 16) && 687 !mpf_checksum((unsigned char *)bp, 16) &&
702 ((mpf->mpf_specification == 1) 688 ((mpf->specification == 1)
703 || (mpf->mpf_specification == 4))) { 689 || (mpf->specification == 4))) {
704#ifdef CONFIG_X86_LOCAL_APIC 690#ifdef CONFIG_X86_LOCAL_APIC
705 smp_found_config = 1; 691 smp_found_config = 1;
706#endif 692#endif
707 mpf_found = mpf; 693 mpf_found = mpf;
708 694
709 printk(KERN_INFO "found SMP MP-table at [%p] %08lx\n", 695 printk(KERN_INFO "found SMP MP-table at [%p] %llx\n",
710 mpf, virt_to_phys(mpf)); 696 mpf, (u64)virt_to_phys(mpf));
711 697
712 if (!reserve) 698 if (!reserve)
713 return 1; 699 return 1;
714 reserve_bootmem_generic(virt_to_phys(mpf), PAGE_SIZE, 700 reserve_bootmem_generic(virt_to_phys(mpf), PAGE_SIZE,
715 BOOTMEM_DEFAULT); 701 BOOTMEM_DEFAULT);
716 if (mpf->mpf_physptr) { 702 if (mpf->physptr) {
717 unsigned long size = PAGE_SIZE; 703 unsigned long size = PAGE_SIZE;
718#ifdef CONFIG_X86_32 704#ifdef CONFIG_X86_32
719 /* 705 /*
@@ -722,14 +708,14 @@ static int __init smp_scan_config(unsigned long base, unsigned long length,
722 * the bottom is mapped now. 708 * the bottom is mapped now.
723 * PC-9800's MPC table places on the very last 709 * PC-9800's MPC table places on the very last
724 * of physical memory; so that simply reserving 710 * of physical memory; so that simply reserving
725 * PAGE_SIZE from mpg->mpf_physptr yields BUG() 711 * PAGE_SIZE from mpf->physptr yields BUG()
726 * in reserve_bootmem. 712 * in reserve_bootmem.
727 */ 713 */
728 unsigned long end = max_low_pfn * PAGE_SIZE; 714 unsigned long end = max_low_pfn * PAGE_SIZE;
729 if (mpf->mpf_physptr + size > end) 715 if (mpf->physptr + size > end)
730 size = end - mpf->mpf_physptr; 716 size = end - mpf->physptr;
731#endif 717#endif
732 reserve_bootmem_generic(mpf->mpf_physptr, size, 718 reserve_bootmem_generic(mpf->physptr, size,
733 BOOTMEM_DEFAULT); 719 BOOTMEM_DEFAULT);
734 } 720 }
735 721
@@ -809,15 +795,15 @@ static int __init get_MP_intsrc_index(struct mpc_intsrc *m)
809 /* not legacy */ 795 /* not legacy */
810 796
811 for (i = 0; i < mp_irq_entries; i++) { 797 for (i = 0; i < mp_irq_entries; i++) {
812 if (mp_irqs[i].mp_irqtype != mp_INT) 798 if (mp_irqs[i].irqtype != mp_INT)
813 continue; 799 continue;
814 800
815 if (mp_irqs[i].mp_irqflag != 0x0f) 801 if (mp_irqs[i].irqflag != 0x0f)
816 continue; 802 continue;
817 803
818 if (mp_irqs[i].mp_srcbus != m->srcbus) 804 if (mp_irqs[i].srcbus != m->srcbus)
819 continue; 805 continue;
820 if (mp_irqs[i].mp_srcbusirq != m->srcbusirq) 806 if (mp_irqs[i].srcbusirq != m->srcbusirq)
821 continue; 807 continue;
822 if (irq_used[i]) { 808 if (irq_used[i]) {
823 /* already claimed */ 809 /* already claimed */
@@ -922,10 +908,10 @@ static int __init replace_intsrc_all(struct mpc_table *mpc,
922 if (irq_used[i]) 908 if (irq_used[i])
923 continue; 909 continue;
924 910
925 if (mp_irqs[i].mp_irqtype != mp_INT) 911 if (mp_irqs[i].irqtype != mp_INT)
926 continue; 912 continue;
927 913
928 if (mp_irqs[i].mp_irqflag != 0x0f) 914 if (mp_irqs[i].irqflag != 0x0f)
929 continue; 915 continue;
930 916
931 if (nr_m_spare > 0) { 917 if (nr_m_spare > 0) {
@@ -1001,7 +987,7 @@ static int __init update_mp_table(void)
1001{ 987{
1002 char str[16]; 988 char str[16];
1003 char oem[10]; 989 char oem[10];
1004 struct intel_mp_floating *mpf; 990 struct mpf_intel *mpf;
1005 struct mpc_table *mpc, *mpc_new; 991 struct mpc_table *mpc, *mpc_new;
1006 992
1007 if (!enable_update_mptable) 993 if (!enable_update_mptable)
@@ -1014,19 +1000,19 @@ static int __init update_mp_table(void)
1014 /* 1000 /*
1015 * Now see if we need to go further. 1001 * Now see if we need to go further.
1016 */ 1002 */
1017 if (mpf->mpf_feature1 != 0) 1003 if (mpf->feature1 != 0)
1018 return 0; 1004 return 0;
1019 1005
1020 if (!mpf->mpf_physptr) 1006 if (!mpf->physptr)
1021 return 0; 1007 return 0;
1022 1008
1023 mpc = phys_to_virt(mpf->mpf_physptr); 1009 mpc = phys_to_virt(mpf->physptr);
1024 1010
1025 if (!smp_check_mpc(mpc, oem, str)) 1011 if (!smp_check_mpc(mpc, oem, str))
1026 return 0; 1012 return 0;
1027 1013
1028 printk(KERN_INFO "mpf: %lx\n", virt_to_phys(mpf)); 1014 printk(KERN_INFO "mpf: %llx\n", (u64)virt_to_phys(mpf));
1029 printk(KERN_INFO "mpf_physptr: %x\n", mpf->mpf_physptr); 1015 printk(KERN_INFO "physptr: %x\n", mpf->physptr);
1030 1016
1031 if (mpc_new_phys && mpc->length > mpc_new_length) { 1017 if (mpc_new_phys && mpc->length > mpc_new_length) {
1032 mpc_new_phys = 0; 1018 mpc_new_phys = 0;
@@ -1047,23 +1033,23 @@ static int __init update_mp_table(void)
1047 } 1033 }
1048 printk(KERN_INFO "use in-positon replacing\n"); 1034 printk(KERN_INFO "use in-positon replacing\n");
1049 } else { 1035 } else {
1050 mpf->mpf_physptr = mpc_new_phys; 1036 mpf->physptr = mpc_new_phys;
1051 mpc_new = phys_to_virt(mpc_new_phys); 1037 mpc_new = phys_to_virt(mpc_new_phys);
1052 memcpy(mpc_new, mpc, mpc->length); 1038 memcpy(mpc_new, mpc, mpc->length);
1053 mpc = mpc_new; 1039 mpc = mpc_new;
1054 /* check if we can modify that */ 1040 /* check if we can modify that */
1055 if (mpc_new_phys - mpf->mpf_physptr) { 1041 if (mpc_new_phys - mpf->physptr) {
1056 struct intel_mp_floating *mpf_new; 1042 struct mpf_intel *mpf_new;
1057 /* steal 16 bytes from [0, 1k) */ 1043 /* steal 16 bytes from [0, 1k) */
1058 printk(KERN_INFO "mpf new: %x\n", 0x400 - 16); 1044 printk(KERN_INFO "mpf new: %x\n", 0x400 - 16);
1059 mpf_new = phys_to_virt(0x400 - 16); 1045 mpf_new = phys_to_virt(0x400 - 16);
1060 memcpy(mpf_new, mpf, 16); 1046 memcpy(mpf_new, mpf, 16);
1061 mpf = mpf_new; 1047 mpf = mpf_new;
1062 mpf->mpf_physptr = mpc_new_phys; 1048 mpf->physptr = mpc_new_phys;
1063 } 1049 }
1064 mpf->mpf_checksum = 0; 1050 mpf->checksum = 0;
1065 mpf->mpf_checksum -= mpf_checksum((unsigned char *)mpf, 16); 1051 mpf->checksum -= mpf_checksum((unsigned char *)mpf, 16);
1066 printk(KERN_INFO "mpf_physptr new: %x\n", mpf->mpf_physptr); 1052 printk(KERN_INFO "physptr new: %x\n", mpf->physptr);
1067 } 1053 }
1068 1054
1069 /* 1055 /*
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index 726266695b2c..3cf3413ec626 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -35,10 +35,10 @@
35#include <linux/device.h> 35#include <linux/device.h>
36#include <linux/cpu.h> 36#include <linux/cpu.h>
37#include <linux/notifier.h> 37#include <linux/notifier.h>
38#include <linux/uaccess.h>
38 39
39#include <asm/processor.h> 40#include <asm/processor.h>
40#include <asm/msr.h> 41#include <asm/msr.h>
41#include <asm/uaccess.h>
42#include <asm/system.h> 42#include <asm/system.h>
43 43
44static struct class *msr_class; 44static struct class *msr_class;
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index 7228979f1e7f..bdfad80c3cf1 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -34,7 +34,7 @@
34 34
35#include <asm/mce.h> 35#include <asm/mce.h>
36 36
37#include <mach_traps.h> 37#include <asm/mach_traps.h>
38 38
39int unknown_nmi_panic; 39int unknown_nmi_panic;
40int nmi_watchdog_enabled; 40int nmi_watchdog_enabled;
@@ -61,11 +61,7 @@ static int endflag __initdata;
61 61
62static inline unsigned int get_nmi_count(int cpu) 62static inline unsigned int get_nmi_count(int cpu)
63{ 63{
64#ifdef CONFIG_X86_64 64 return per_cpu(irq_stat, cpu).__nmi_count;
65 return cpu_pda(cpu)->__nmi_count;
66#else
67 return nmi_count(cpu);
68#endif
69} 65}
70 66
71static inline int mce_in_progress(void) 67static inline int mce_in_progress(void)
@@ -82,12 +78,8 @@ static inline int mce_in_progress(void)
82 */ 78 */
83static inline unsigned int get_timer_irqs(int cpu) 79static inline unsigned int get_timer_irqs(int cpu)
84{ 80{
85#ifdef CONFIG_X86_64
86 return read_pda(apic_timer_irqs) + read_pda(irq0_irqs);
87#else
88 return per_cpu(irq_stat, cpu).apic_timer_irqs + 81 return per_cpu(irq_stat, cpu).apic_timer_irqs +
89 per_cpu(irq_stat, cpu).irq0_irqs; 82 per_cpu(irq_stat, cpu).irq0_irqs;
90#endif
91} 83}
92 84
93#ifdef CONFIG_SMP 85#ifdef CONFIG_SMP
diff --git a/arch/x86/kernel/numaq_32.c b/arch/x86/kernel/numaq_32.c
index f2191d4f2717..0cc41a1d2550 100644
--- a/arch/x86/kernel/numaq_32.c
+++ b/arch/x86/kernel/numaq_32.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2002, IBM Corp. 4 * Copyright (C) 2002, IBM Corp.
5 * 5 *
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 9 * it under the terms of the GNU General Public License as published by
@@ -23,17 +23,18 @@
23 * Send feedback to <gone@us.ibm.com> 23 * Send feedback to <gone@us.ibm.com>
24 */ 24 */
25 25
26#include <linux/mm.h> 26#include <linux/nodemask.h>
27#include <linux/bootmem.h> 27#include <linux/bootmem.h>
28#include <linux/mmzone.h> 28#include <linux/mmzone.h>
29#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/nodemask.h> 30#include <linux/mm.h>
31#include <asm/numaq.h> 31
32#include <asm/topology.h>
33#include <asm/processor.h> 32#include <asm/processor.h>
33#include <asm/topology.h>
34#include <asm/genapic.h> 34#include <asm/genapic.h>
35#include <asm/e820.h> 35#include <asm/numaq.h>
36#include <asm/setup.h> 36#include <asm/setup.h>
37#include <asm/e820.h>
37 38
38#define MB_TO_PAGES(addr) ((addr) << (20 - PAGE_SHIFT)) 39#define MB_TO_PAGES(addr) ((addr) << (20 - PAGE_SHIFT))
39 40
@@ -91,19 +92,20 @@ static int __init numaq_pre_time_init(void)
91} 92}
92 93
93int found_numaq; 94int found_numaq;
95
94/* 96/*
95 * Have to match translation table entries to main table entries by counter 97 * Have to match translation table entries to main table entries by counter
96 * hence the mpc_record variable .... can't see a less disgusting way of 98 * hence the mpc_record variable .... can't see a less disgusting way of
97 * doing this .... 99 * doing this ....
98 */ 100 */
99struct mpc_config_translation { 101struct mpc_config_translation {
100 unsigned char mpc_type; 102 unsigned char mpc_type;
101 unsigned char trans_len; 103 unsigned char trans_len;
102 unsigned char trans_type; 104 unsigned char trans_type;
103 unsigned char trans_quad; 105 unsigned char trans_quad;
104 unsigned char trans_global; 106 unsigned char trans_global;
105 unsigned char trans_local; 107 unsigned char trans_local;
106 unsigned short trans_reserved; 108 unsigned short trans_reserved;
107}; 109};
108 110
109/* x86_quirks member */ 111/* x86_quirks member */
@@ -236,7 +238,7 @@ static int __init numaq_setup_ioapic_ids(void)
236 238
237static int __init numaq_update_genapic(void) 239static int __init numaq_update_genapic(void)
238{ 240{
239 genapic->wakeup_cpu = wakeup_secondary_cpu_via_nmi; 241 apic->wakeup_cpu = wakeup_secondary_cpu_via_nmi;
240 242
241 return 0; 243 return 0;
242} 244}
@@ -291,3 +293,280 @@ int __init get_memcfg_numaq(void)
291 smp_dump_qct(); 293 smp_dump_qct();
292 return 1; 294 return 1;
293} 295}
296
297/*
298 * APIC driver for the IBM NUMAQ chipset.
299 */
300#define APIC_DEFINITION 1
301#include <linux/threads.h>
302#include <linux/cpumask.h>
303#include <asm/mpspec.h>
304#include <asm/genapic.h>
305#include <asm/fixmap.h>
306#include <asm/apicdef.h>
307#include <asm/ipi.h>
308#include <linux/kernel.h>
309#include <linux/string.h>
310#include <linux/init.h>
311#include <linux/numa.h>
312#include <linux/smp.h>
313#include <asm/numaq.h>
314#include <asm/io.h>
315#include <linux/mmzone.h>
316#include <linux/nodemask.h>
317
318#define NUMAQ_APIC_DFR_VALUE (APIC_DFR_CLUSTER)
319
320static inline unsigned int numaq_get_apic_id(unsigned long x)
321{
322 return (x >> 24) & 0x0F;
323}
324
325static inline void numaq_send_IPI_mask(const struct cpumask *mask, int vector)
326{
327 default_send_IPI_mask_sequence_logical(mask, vector);
328}
329
330static inline void numaq_send_IPI_allbutself(int vector)
331{
332 default_send_IPI_mask_allbutself_logical(cpu_online_mask, vector);
333}
334
335static inline void numaq_send_IPI_all(int vector)
336{
337 numaq_send_IPI_mask(cpu_online_mask, vector);
338}
339
340extern void numaq_mps_oem_check(struct mpc_table *, char *, char *);
341
342#define NUMAQ_TRAMPOLINE_PHYS_LOW (0x8)
343#define NUMAQ_TRAMPOLINE_PHYS_HIGH (0xa)
344
345/*
346 * Because we use NMIs rather than the INIT-STARTUP sequence to
347 * bootstrap the CPUs, the APIC may be in a weird state. Kick it:
348 */
349static inline void numaq_smp_callin_clear_local_apic(void)
350{
351 clear_local_APIC();
352}
353
354static inline void
355numaq_store_NMI_vector(unsigned short *high, unsigned short *low)
356{
357 printk("Storing NMI vector\n");
358 *high =
359 *((volatile unsigned short *)phys_to_virt(NUMAQ_TRAMPOLINE_PHYS_HIGH));
360 *low =
361 *((volatile unsigned short *)phys_to_virt(NUMAQ_TRAMPOLINE_PHYS_LOW));
362}
363
364static inline const cpumask_t *numaq_target_cpus(void)
365{
366 return &CPU_MASK_ALL;
367}
368
369static inline unsigned long
370numaq_check_apicid_used(physid_mask_t bitmap, int apicid)
371{
372 return physid_isset(apicid, bitmap);
373}
374
375static inline unsigned long numaq_check_apicid_present(int bit)
376{
377 return physid_isset(bit, phys_cpu_present_map);
378}
379
380#define apicid_cluster(apicid) (apicid & 0xF0)
381
382static inline int numaq_apic_id_registered(void)
383{
384 return 1;
385}
386
387static inline void numaq_init_apic_ldr(void)
388{
389 /* Already done in NUMA-Q firmware */
390}
391
392static inline void numaq_setup_apic_routing(void)
393{
394 printk("Enabling APIC mode: %s. Using %d I/O APICs\n",
395 "NUMA-Q", nr_ioapics);
396}
397
398/*
399 * Skip adding the timer int on secondary nodes, which causes
400 * a small but painful rift in the time-space continuum.
401 */
402static inline int numaq_multi_timer_check(int apic, int irq)
403{
404 return apic != 0 && irq == 0;
405}
406
407static inline physid_mask_t numaq_ioapic_phys_id_map(physid_mask_t phys_map)
408{
409 /* We don't have a good way to do this yet - hack */
410 return physids_promote(0xFUL);
411}
412
413/* Mapping from cpu number to logical apicid */
414extern u8 cpu_2_logical_apicid[];
415
416static inline int numaq_cpu_to_logical_apicid(int cpu)
417{
418 if (cpu >= nr_cpu_ids)
419 return BAD_APICID;
420 return (int)cpu_2_logical_apicid[cpu];
421}
422
423/*
424 * Supporting over 60 cpus on NUMA-Q requires a locality-dependent
425 * cpu to APIC ID relation to properly interact with the intelligent
426 * mode of the cluster controller.
427 */
428static inline int numaq_cpu_present_to_apicid(int mps_cpu)
429{
430 if (mps_cpu < 60)
431 return ((mps_cpu >> 2) << 4) | (1 << (mps_cpu & 0x3));
432 else
433 return BAD_APICID;
434}
435
436static inline int numaq_apicid_to_node(int logical_apicid)
437{
438 return logical_apicid >> 4;
439}
440
441static inline physid_mask_t numaq_apicid_to_cpu_present(int logical_apicid)
442{
443 int node = numaq_apicid_to_node(logical_apicid);
444 int cpu = __ffs(logical_apicid & 0xf);
445
446 return physid_mask_of_physid(cpu + 4*node);
447}
448
449/* Where the IO area was mapped on multiquad, always 0 otherwise */
450void *xquad_portio;
451
452static inline int numaq_check_phys_apicid_present(int boot_cpu_physical_apicid)
453{
454 return 1;
455}
456
457/*
458 * We use physical apicids here, not logical, so just return the default
459 * physical broadcast to stop people from breaking us
460 */
461static inline unsigned int numaq_cpu_mask_to_apicid(const cpumask_t *cpumask)
462{
463 return 0x0F;
464}
465
466static inline unsigned int
467numaq_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
468 const struct cpumask *andmask)
469{
470 return 0x0F;
471}
472
473/* No NUMA-Q box has a HT CPU, but it can't hurt to use the default code. */
474static inline int numaq_phys_pkg_id(int cpuid_apic, int index_msb)
475{
476 return cpuid_apic >> index_msb;
477}
478static int __numaq_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
479{
480 numaq_mps_oem_check(mpc, oem, productid);
481 return found_numaq;
482}
483
484static int probe_numaq(void)
485{
486 /* already know from get_memcfg_numaq() */
487 return found_numaq;
488}
489
490static void numaq_vector_allocation_domain(int cpu, cpumask_t *retmask)
491{
492 /* Careful. Some cpus do not strictly honor the set of cpus
493 * specified in the interrupt destination when using lowest
494 * priority interrupt delivery mode.
495 *
496 * In particular there was a hyperthreading cpu observed to
497 * deliver interrupts to the wrong hyperthread when only one
498 * hyperthread was specified in the interrupt desitination.
499 */
500 *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
501}
502
503static void numaq_setup_portio_remap(void)
504{
505 int num_quads = num_online_nodes();
506
507 if (num_quads <= 1)
508 return;
509
510 printk("Remapping cross-quad port I/O for %d quads\n", num_quads);
511 xquad_portio = ioremap(XQUAD_PORTIO_BASE, num_quads*XQUAD_PORTIO_QUAD);
512 printk("xquad_portio vaddr 0x%08lx, len %08lx\n",
513 (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD);
514}
515
516struct genapic apic_numaq = {
517
518 .name = "NUMAQ",
519 .probe = probe_numaq,
520 .acpi_madt_oem_check = NULL,
521 .apic_id_registered = numaq_apic_id_registered,
522
523 .irq_delivery_mode = dest_LowestPrio,
524 /* physical delivery on LOCAL quad: */
525 .irq_dest_mode = 0,
526
527 .target_cpus = numaq_target_cpus,
528 .disable_esr = 1,
529 .dest_logical = APIC_DEST_LOGICAL,
530 .check_apicid_used = numaq_check_apicid_used,
531 .check_apicid_present = numaq_check_apicid_present,
532
533 .vector_allocation_domain = numaq_vector_allocation_domain,
534 .init_apic_ldr = numaq_init_apic_ldr,
535
536 .ioapic_phys_id_map = numaq_ioapic_phys_id_map,
537 .setup_apic_routing = numaq_setup_apic_routing,
538 .multi_timer_check = numaq_multi_timer_check,
539 .apicid_to_node = numaq_apicid_to_node,
540 .cpu_to_logical_apicid = numaq_cpu_to_logical_apicid,
541 .cpu_present_to_apicid = numaq_cpu_present_to_apicid,
542 .apicid_to_cpu_present = numaq_apicid_to_cpu_present,
543 .setup_portio_remap = numaq_setup_portio_remap,
544 .check_phys_apicid_present = numaq_check_phys_apicid_present,
545 .enable_apic_mode = NULL,
546 .phys_pkg_id = numaq_phys_pkg_id,
547 .mps_oem_check = __numaq_mps_oem_check,
548
549 .get_apic_id = numaq_get_apic_id,
550 .set_apic_id = NULL,
551 .apic_id_mask = 0x0F << 24,
552
553 .cpu_mask_to_apicid = numaq_cpu_mask_to_apicid,
554 .cpu_mask_to_apicid_and = numaq_cpu_mask_to_apicid_and,
555
556 .send_IPI_mask = numaq_send_IPI_mask,
557 .send_IPI_mask_allbutself = NULL,
558 .send_IPI_allbutself = numaq_send_IPI_allbutself,
559 .send_IPI_all = numaq_send_IPI_all,
560 .send_IPI_self = default_send_IPI_self,
561
562 .wakeup_cpu = NULL,
563 .trampoline_phys_low = NUMAQ_TRAMPOLINE_PHYS_LOW,
564 .trampoline_phys_high = NUMAQ_TRAMPOLINE_PHYS_HIGH,
565
566 /* We don't do anything here because we use NMI's to boot instead */
567 .wait_for_init_deassert = NULL,
568
569 .smp_callin_clear_local_apic = numaq_smp_callin_clear_local_apic,
570 .store_NMI_vector = numaq_store_NMI_vector,
571 .inquire_remote_apic = NULL,
572};
diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
index 95777b0faa73..3a7c5a44082e 100644
--- a/arch/x86/kernel/paravirt-spinlocks.c
+++ b/arch/x86/kernel/paravirt-spinlocks.c
@@ -26,13 +26,3 @@ struct pv_lock_ops pv_lock_ops = {
26}; 26};
27EXPORT_SYMBOL(pv_lock_ops); 27EXPORT_SYMBOL(pv_lock_ops);
28 28
29void __init paravirt_use_bytelocks(void)
30{
31#ifdef CONFIG_SMP
32 pv_lock_ops.spin_is_locked = __byte_spin_is_locked;
33 pv_lock_ops.spin_is_contended = __byte_spin_is_contended;
34 pv_lock_ops.spin_lock = __byte_spin_lock;
35 pv_lock_ops.spin_trylock = __byte_spin_trylock;
36 pv_lock_ops.spin_unlock = __byte_spin_unlock;
37#endif
38}
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index e4c8fb608873..cea11c8e3049 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -44,6 +44,17 @@ void _paravirt_nop(void)
44{ 44{
45} 45}
46 46
47/* identity function, which can be inlined */
48u32 _paravirt_ident_32(u32 x)
49{
50 return x;
51}
52
53u64 _paravirt_ident_64(u64 x)
54{
55 return x;
56}
57
47static void __init default_banner(void) 58static void __init default_banner(void)
48{ 59{
49 printk(KERN_INFO "Booting paravirtualized kernel on %s\n", 60 printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
@@ -138,9 +149,16 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
138 if (opfunc == NULL) 149 if (opfunc == NULL)
139 /* If there's no function, patch it with a ud2a (BUG) */ 150 /* If there's no function, patch it with a ud2a (BUG) */
140 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a)); 151 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
141 else if (opfunc == paravirt_nop) 152 else if (opfunc == _paravirt_nop)
142 /* If the operation is a nop, then nop the callsite */ 153 /* If the operation is a nop, then nop the callsite */
143 ret = paravirt_patch_nop(); 154 ret = paravirt_patch_nop();
155
156 /* identity functions just return their single argument */
157 else if (opfunc == _paravirt_ident_32)
158 ret = paravirt_patch_ident_32(insnbuf, len);
159 else if (opfunc == _paravirt_ident_64)
160 ret = paravirt_patch_ident_64(insnbuf, len);
161
144 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) || 162 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
145 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) || 163 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
146 type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret32) || 164 type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret32) ||
@@ -292,10 +310,10 @@ struct pv_time_ops pv_time_ops = {
292 310
293struct pv_irq_ops pv_irq_ops = { 311struct pv_irq_ops pv_irq_ops = {
294 .init_IRQ = native_init_IRQ, 312 .init_IRQ = native_init_IRQ,
295 .save_fl = native_save_fl, 313 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
296 .restore_fl = native_restore_fl, 314 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
297 .irq_disable = native_irq_disable, 315 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
298 .irq_enable = native_irq_enable, 316 .irq_enable = __PV_IS_CALLEE_SAVE(native_irq_enable),
299 .safe_halt = native_safe_halt, 317 .safe_halt = native_safe_halt,
300 .halt = native_halt, 318 .halt = native_halt,
301#ifdef CONFIG_X86_64 319#ifdef CONFIG_X86_64
@@ -373,6 +391,14 @@ struct pv_apic_ops pv_apic_ops = {
373#endif 391#endif
374}; 392};
375 393
394#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
395/* 32-bit pagetable entries */
396#define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
397#else
398/* 64-bit pagetable entries */
399#define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
400#endif
401
376struct pv_mmu_ops pv_mmu_ops = { 402struct pv_mmu_ops pv_mmu_ops = {
377#ifndef CONFIG_X86_64 403#ifndef CONFIG_X86_64
378 .pagetable_setup_start = native_pagetable_setup_start, 404 .pagetable_setup_start = native_pagetable_setup_start,
@@ -424,22 +450,23 @@ struct pv_mmu_ops pv_mmu_ops = {
424 .pmd_clear = native_pmd_clear, 450 .pmd_clear = native_pmd_clear,
425#endif 451#endif
426 .set_pud = native_set_pud, 452 .set_pud = native_set_pud,
427 .pmd_val = native_pmd_val, 453
428 .make_pmd = native_make_pmd, 454 .pmd_val = PTE_IDENT,
455 .make_pmd = PTE_IDENT,
429 456
430#if PAGETABLE_LEVELS == 4 457#if PAGETABLE_LEVELS == 4
431 .pud_val = native_pud_val, 458 .pud_val = PTE_IDENT,
432 .make_pud = native_make_pud, 459 .make_pud = PTE_IDENT,
460
433 .set_pgd = native_set_pgd, 461 .set_pgd = native_set_pgd,
434#endif 462#endif
435#endif /* PAGETABLE_LEVELS >= 3 */ 463#endif /* PAGETABLE_LEVELS >= 3 */
436 464
437 .pte_val = native_pte_val, 465 .pte_val = PTE_IDENT,
438 .pte_flags = native_pte_flags, 466 .pgd_val = PTE_IDENT,
439 .pgd_val = native_pgd_val,
440 467
441 .make_pte = native_make_pte, 468 .make_pte = PTE_IDENT,
442 .make_pgd = native_make_pgd, 469 .make_pgd = PTE_IDENT,
443 470
444 .dup_mmap = paravirt_nop, 471 .dup_mmap = paravirt_nop,
445 .exit_mmap = paravirt_nop, 472 .exit_mmap = paravirt_nop,
diff --git a/arch/x86/kernel/paravirt_patch_32.c b/arch/x86/kernel/paravirt_patch_32.c
index 9fe644f4861d..d9f32e6d6ab6 100644
--- a/arch/x86/kernel/paravirt_patch_32.c
+++ b/arch/x86/kernel/paravirt_patch_32.c
@@ -12,6 +12,18 @@ DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %cr3, %eax");
12DEF_NATIVE(pv_cpu_ops, clts, "clts"); 12DEF_NATIVE(pv_cpu_ops, clts, "clts");
13DEF_NATIVE(pv_cpu_ops, read_tsc, "rdtsc"); 13DEF_NATIVE(pv_cpu_ops, read_tsc, "rdtsc");
14 14
15unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
16{
17 /* arg in %eax, return in %eax */
18 return 0;
19}
20
21unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len)
22{
23 /* arg in %edx:%eax, return in %edx:%eax */
24 return 0;
25}
26
15unsigned native_patch(u8 type, u16 clobbers, void *ibuf, 27unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
16 unsigned long addr, unsigned len) 28 unsigned long addr, unsigned len)
17{ 29{
diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c
index 061d01df9ae6..3f08f34f93eb 100644
--- a/arch/x86/kernel/paravirt_patch_64.c
+++ b/arch/x86/kernel/paravirt_patch_64.c
@@ -19,6 +19,21 @@ DEF_NATIVE(pv_cpu_ops, usergs_sysret64, "swapgs; sysretq");
19DEF_NATIVE(pv_cpu_ops, usergs_sysret32, "swapgs; sysretl"); 19DEF_NATIVE(pv_cpu_ops, usergs_sysret32, "swapgs; sysretl");
20DEF_NATIVE(pv_cpu_ops, swapgs, "swapgs"); 20DEF_NATIVE(pv_cpu_ops, swapgs, "swapgs");
21 21
22DEF_NATIVE(, mov32, "mov %edi, %eax");
23DEF_NATIVE(, mov64, "mov %rdi, %rax");
24
25unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
26{
27 return paravirt_patch_insns(insnbuf, len,
28 start__mov32, end__mov32);
29}
30
31unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len)
32{
33 return paravirt_patch_insns(insnbuf, len,
34 start__mov64, end__mov64);
35}
36
22unsigned native_patch(u8 type, u16 clobbers, void *ibuf, 37unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
23 unsigned long addr, unsigned len) 38 unsigned long addr, unsigned len)
24{ 39{
diff --git a/arch/x86/kernel/probe_32.c b/arch/x86/kernel/probe_32.c
new file mode 100644
index 000000000000..22337b75de62
--- /dev/null
+++ b/arch/x86/kernel/probe_32.c
@@ -0,0 +1,411 @@
1/*
2 * Default generic APIC driver. This handles up to 8 CPUs.
3 *
4 * Copyright 2003 Andi Kleen, SuSE Labs.
5 * Subject to the GNU Public License, v.2
6 *
7 * Generic x86 APIC driver probe layer.
8 */
9#include <linux/threads.h>
10#include <linux/cpumask.h>
11#include <linux/string.h>
12#include <linux/kernel.h>
13#include <linux/ctype.h>
14#include <linux/init.h>
15#include <linux/errno.h>
16#include <asm/fixmap.h>
17#include <asm/mpspec.h>
18#include <asm/apicdef.h>
19#include <asm/genapic.h>
20#include <asm/setup.h>
21
22#include <linux/threads.h>
23#include <linux/cpumask.h>
24#include <asm/mpspec.h>
25#include <asm/genapic.h>
26#include <asm/fixmap.h>
27#include <asm/apicdef.h>
28#include <linux/kernel.h>
29#include <linux/string.h>
30#include <linux/smp.h>
31#include <linux/init.h>
32#include <asm/genapic.h>
33#include <asm/ipi.h>
34
35#include <linux/smp.h>
36#include <linux/init.h>
37#include <linux/interrupt.h>
38#include <asm/acpi.h>
39#include <asm/arch_hooks.h>
40#include <asm/e820.h>
41#include <asm/setup.h>
42
43#include <asm/genapic.h>
44
45#ifdef CONFIG_HOTPLUG_CPU
46#define DEFAULT_SEND_IPI (1)
47#else
48#define DEFAULT_SEND_IPI (0)
49#endif
50
51int no_broadcast = DEFAULT_SEND_IPI;
52
53#ifdef CONFIG_X86_LOCAL_APIC
54
55static void default_vector_allocation_domain(int cpu, struct cpumask *retmask)
56{
57 /*
58 * Careful. Some cpus do not strictly honor the set of cpus
59 * specified in the interrupt destination when using lowest
60 * priority interrupt delivery mode.
61 *
62 * In particular there was a hyperthreading cpu observed to
63 * deliver interrupts to the wrong hyperthread when only one
64 * hyperthread was specified in the interrupt desitination.
65 */
66 *retmask = (cpumask_t) { { [0] = APIC_ALL_CPUS } };
67}
68
69/* should be called last. */
70static int probe_default(void)
71{
72 return 1;
73}
74
75struct genapic apic_default = {
76
77 .name = "default",
78 .probe = probe_default,
79 .acpi_madt_oem_check = NULL,
80 .apic_id_registered = default_apic_id_registered,
81
82 .irq_delivery_mode = dest_LowestPrio,
83 /* logical delivery broadcast to all CPUs: */
84 .irq_dest_mode = 1,
85
86 .target_cpus = default_target_cpus,
87 .disable_esr = 0,
88 .dest_logical = APIC_DEST_LOGICAL,
89 .check_apicid_used = default_check_apicid_used,
90 .check_apicid_present = default_check_apicid_present,
91
92 .vector_allocation_domain = default_vector_allocation_domain,
93 .init_apic_ldr = default_init_apic_ldr,
94
95 .ioapic_phys_id_map = default_ioapic_phys_id_map,
96 .setup_apic_routing = default_setup_apic_routing,
97 .multi_timer_check = NULL,
98 .apicid_to_node = default_apicid_to_node,
99 .cpu_to_logical_apicid = default_cpu_to_logical_apicid,
100 .cpu_present_to_apicid = default_cpu_present_to_apicid,
101 .apicid_to_cpu_present = default_apicid_to_cpu_present,
102 .setup_portio_remap = NULL,
103 .check_phys_apicid_present = default_check_phys_apicid_present,
104 .enable_apic_mode = NULL,
105 .phys_pkg_id = default_phys_pkg_id,
106 .mps_oem_check = NULL,
107
108 .get_apic_id = default_get_apic_id,
109 .set_apic_id = NULL,
110 .apic_id_mask = 0x0F << 24,
111
112 .cpu_mask_to_apicid = default_cpu_mask_to_apicid,
113 .cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
114
115 .send_IPI_mask = default_send_IPI_mask_logical,
116 .send_IPI_mask_allbutself = default_send_IPI_mask_allbutself_logical,
117 .send_IPI_allbutself = default_send_IPI_allbutself,
118 .send_IPI_all = default_send_IPI_all,
119 .send_IPI_self = default_send_IPI_self,
120
121 .wakeup_cpu = NULL,
122 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
123 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
124
125 .wait_for_init_deassert = default_wait_for_init_deassert,
126
127 .smp_callin_clear_local_apic = NULL,
128 .store_NMI_vector = NULL,
129 .inquire_remote_apic = default_inquire_remote_apic,
130};
131
132extern struct genapic apic_numaq;
133extern struct genapic apic_summit;
134extern struct genapic apic_bigsmp;
135extern struct genapic apic_es7000;
136extern struct genapic apic_default;
137
138struct genapic *apic = &apic_default;
139
140static struct genapic *apic_probe[] __initdata = {
141#ifdef CONFIG_X86_NUMAQ
142 &apic_numaq,
143#endif
144#ifdef CONFIG_X86_SUMMIT
145 &apic_summit,
146#endif
147#ifdef CONFIG_X86_BIGSMP
148 &apic_bigsmp,
149#endif
150#ifdef CONFIG_X86_ES7000
151 &apic_es7000,
152#endif
153 &apic_default, /* must be last */
154 NULL,
155};
156
157static int cmdline_apic __initdata;
158static int __init parse_apic(char *arg)
159{
160 int i;
161
162 if (!arg)
163 return -EINVAL;
164
165 for (i = 0; apic_probe[i]; i++) {
166 if (!strcmp(apic_probe[i]->name, arg)) {
167 apic = apic_probe[i];
168 cmdline_apic = 1;
169 return 0;
170 }
171 }
172
173 if (x86_quirks->update_genapic)
174 x86_quirks->update_genapic();
175
176 /* Parsed again by __setup for debug/verbose */
177 return 0;
178}
179early_param("apic", parse_apic);
180
181void __init generic_bigsmp_probe(void)
182{
183#ifdef CONFIG_X86_BIGSMP
184 /*
185 * This routine is used to switch to bigsmp mode when
186 * - There is no apic= option specified by the user
187 * - generic_apic_probe() has chosen apic_default as the sub_arch
188 * - we find more than 8 CPUs in acpi LAPIC listing with xAPIC support
189 */
190
191 if (!cmdline_apic && apic == &apic_default) {
192 if (apic_bigsmp.probe()) {
193 apic = &apic_bigsmp;
194 if (x86_quirks->update_genapic)
195 x86_quirks->update_genapic();
196 printk(KERN_INFO "Overriding APIC driver with %s\n",
197 apic->name);
198 }
199 }
200#endif
201}
202
203void __init generic_apic_probe(void)
204{
205 if (!cmdline_apic) {
206 int i;
207 for (i = 0; apic_probe[i]; i++) {
208 if (apic_probe[i]->probe()) {
209 apic = apic_probe[i];
210 break;
211 }
212 }
213 /* Not visible without early console */
214 if (!apic_probe[i])
215 panic("Didn't find an APIC driver");
216
217 if (x86_quirks->update_genapic)
218 x86_quirks->update_genapic();
219 }
220 printk(KERN_INFO "Using APIC driver %s\n", apic->name);
221}
222
223/* These functions can switch the APIC even after the initial ->probe() */
224
225int __init
226generic_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
227{
228 int i;
229
230 for (i = 0; apic_probe[i]; ++i) {
231 if (!apic_probe[i]->mps_oem_check)
232 continue;
233 if (!apic_probe[i]->mps_oem_check(mpc, oem, productid))
234 continue;
235
236 if (!cmdline_apic) {
237 apic = apic_probe[i];
238 if (x86_quirks->update_genapic)
239 x86_quirks->update_genapic();
240 printk(KERN_INFO "Switched to APIC driver `%s'.\n",
241 apic->name);
242 }
243 return 1;
244 }
245 return 0;
246}
247
248int __init default_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
249{
250 int i;
251
252 for (i = 0; apic_probe[i]; ++i) {
253 if (!apic_probe[i]->acpi_madt_oem_check)
254 continue;
255 if (!apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id))
256 continue;
257
258 if (!cmdline_apic) {
259 apic = apic_probe[i];
260 if (x86_quirks->update_genapic)
261 x86_quirks->update_genapic();
262 printk(KERN_INFO "Switched to APIC driver `%s'.\n",
263 apic->name);
264 }
265 return 1;
266 }
267 return 0;
268}
269
270#endif /* CONFIG_X86_LOCAL_APIC */
271
272/**
273 * pre_intr_init_hook - initialisation prior to setting up interrupt vectors
274 *
275 * Description:
276 * Perform any necessary interrupt initialisation prior to setting up
277 * the "ordinary" interrupt call gates. For legacy reasons, the ISA
278 * interrupts should be initialised here if the machine emulates a PC
279 * in any way.
280 **/
281void __init pre_intr_init_hook(void)
282{
283 if (x86_quirks->arch_pre_intr_init) {
284 if (x86_quirks->arch_pre_intr_init())
285 return;
286 }
287 init_ISA_irqs();
288}
289
290/**
291 * intr_init_hook - post gate setup interrupt initialisation
292 *
293 * Description:
294 * Fill in any interrupts that may have been left out by the general
295 * init_IRQ() routine. interrupts having to do with the machine rather
296 * than the devices on the I/O bus (like APIC interrupts in intel MP
297 * systems) are started here.
298 **/
299void __init intr_init_hook(void)
300{
301 if (x86_quirks->arch_intr_init) {
302 if (x86_quirks->arch_intr_init())
303 return;
304 }
305}
306
307/**
308 * pre_setup_arch_hook - hook called prior to any setup_arch() execution
309 *
310 * Description:
311 * generally used to activate any machine specific identification
312 * routines that may be needed before setup_arch() runs. On Voyager
313 * this is used to get the board revision and type.
314 **/
315void __init pre_setup_arch_hook(void)
316{
317}
318
319/**
320 * trap_init_hook - initialise system specific traps
321 *
322 * Description:
323 * Called as the final act of trap_init(). Used in VISWS to initialise
324 * the various board specific APIC traps.
325 **/
326void __init trap_init_hook(void)
327{
328 if (x86_quirks->arch_trap_init) {
329 if (x86_quirks->arch_trap_init())
330 return;
331 }
332}
333
334static struct irqaction irq0 = {
335 .handler = timer_interrupt,
336 .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_IRQPOLL,
337 .mask = CPU_MASK_NONE,
338 .name = "timer"
339};
340
341/**
342 * pre_time_init_hook - do any specific initialisations before.
343 *
344 **/
345void __init pre_time_init_hook(void)
346{
347 if (x86_quirks->arch_pre_time_init)
348 x86_quirks->arch_pre_time_init();
349}
350
351/**
352 * time_init_hook - do any specific initialisations for the system timer.
353 *
354 * Description:
355 * Must plug the system timer interrupt source at HZ into the IRQ listed
356 * in irq_vectors.h:TIMER_IRQ
357 **/
358void __init time_init_hook(void)
359{
360 if (x86_quirks->arch_time_init) {
361 /*
362 * A nonzero return code does not mean failure, it means
363 * that the architecture quirk does not want any
364 * generic (timer) setup to be performed after this:
365 */
366 if (x86_quirks->arch_time_init())
367 return;
368 }
369
370 irq0.mask = cpumask_of_cpu(0);
371 setup_irq(0, &irq0);
372}
373
374#ifdef CONFIG_MCA
375/**
376 * mca_nmi_hook - hook into MCA specific NMI chain
377 *
378 * Description:
379 * The MCA (Microchannel Architecture) has an NMI chain for NMI sources
380 * along the MCA bus. Use this to hook into that chain if you will need
381 * it.
382 **/
383void mca_nmi_hook(void)
384{
385 /*
386 * If I recall correctly, there's a whole bunch of other things that
387 * we can do to check for NMI problems, but that's all I know about
388 * at the moment.
389 */
390 pr_warning("NMI generated from unknown source!\n");
391}
392#endif
393
394static __init int no_ipi_broadcast(char *str)
395{
396 get_option(&str, &no_broadcast);
397 pr_info("Using %s mode\n",
398 no_broadcast ? "No IPI Broadcast" : "IPI Broadcast");
399 return 1;
400}
401__setup("no_ipi_broadcast=", no_ipi_broadcast);
402
403static int __init print_ipi_mode(void)
404{
405 pr_info("Using IPI %s mode\n",
406 no_broadcast ? "No-Shortcut" : "Shortcut");
407 return 0;
408}
409
410late_initcall(print_ipi_mode);
411
diff --git a/arch/x86/kernel/probe_roms_32.c b/arch/x86/kernel/probe_roms_32.c
index 675a48c404a5..071e7fea42e5 100644
--- a/arch/x86/kernel/probe_roms_32.c
+++ b/arch/x86/kernel/probe_roms_32.c
@@ -18,7 +18,7 @@
18#include <asm/setup.h> 18#include <asm/setup.h>
19#include <asm/sections.h> 19#include <asm/sections.h>
20#include <asm/io.h> 20#include <asm/io.h>
21#include <setup_arch.h> 21#include <asm/setup_arch.h>
22 22
23static struct resource system_rom_resource = { 23static struct resource system_rom_resource = {
24 .name = "System ROM", 24 .name = "System ROM",
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 6d12f7e37f8c..87b69d4fac16 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -350,7 +350,7 @@ static void c1e_idle(void)
350 350
351void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) 351void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
352{ 352{
353#ifdef CONFIG_X86_SMP 353#ifdef CONFIG_SMP
354 if (pm_idle == poll_idle && smp_num_siblings > 1) { 354 if (pm_idle == poll_idle && smp_num_siblings > 1) {
355 printk(KERN_WARNING "WARNING: polling idle and HT enabled," 355 printk(KERN_WARNING "WARNING: polling idle and HT enabled,"
356 " performance may degrade.\n"); 356 " performance may degrade.\n");
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index a546f55c77b4..1a1ae8edc40c 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -66,9 +66,6 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
66DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; 66DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
67EXPORT_PER_CPU_SYMBOL(current_task); 67EXPORT_PER_CPU_SYMBOL(current_task);
68 68
69DEFINE_PER_CPU(int, cpu_number);
70EXPORT_PER_CPU_SYMBOL(cpu_number);
71
72/* 69/*
73 * Return saved PC of a blocked thread. 70 * Return saved PC of a blocked thread.
74 */ 71 */
@@ -111,7 +108,6 @@ void cpu_idle(void)
111 play_dead(); 108 play_dead();
112 109
113 local_irq_disable(); 110 local_irq_disable();
114 __get_cpu_var(irq_stat).idle_timestamp = jiffies;
115 /* Don't trace irqs off for idle */ 111 /* Don't trace irqs off for idle */
116 stop_critical_timings(); 112 stop_critical_timings();
117 pm_idle(); 113 pm_idle();
@@ -591,7 +587,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
591 if (prev->gs | next->gs) 587 if (prev->gs | next->gs)
592 loadsegment(gs, next->gs); 588 loadsegment(gs, next->gs);
593 589
594 x86_write_percpu(current_task, next_p); 590 percpu_write(current_task, next_p);
595 591
596 return prev_p; 592 return prev_p;
597} 593}
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 85b4cb5c1980..8eb169e45584 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -16,6 +16,7 @@
16 16
17#include <stdarg.h> 17#include <stdarg.h>
18 18
19#include <linux/stackprotector.h>
19#include <linux/cpu.h> 20#include <linux/cpu.h>
20#include <linux/errno.h> 21#include <linux/errno.h>
21#include <linux/sched.h> 22#include <linux/sched.h>
@@ -47,7 +48,6 @@
47#include <asm/processor.h> 48#include <asm/processor.h>
48#include <asm/i387.h> 49#include <asm/i387.h>
49#include <asm/mmu_context.h> 50#include <asm/mmu_context.h>
50#include <asm/pda.h>
51#include <asm/prctl.h> 51#include <asm/prctl.h>
52#include <asm/desc.h> 52#include <asm/desc.h>
53#include <asm/proto.h> 53#include <asm/proto.h>
@@ -58,6 +58,12 @@
58 58
59asmlinkage extern void ret_from_fork(void); 59asmlinkage extern void ret_from_fork(void);
60 60
61DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
62EXPORT_PER_CPU_SYMBOL(current_task);
63
64DEFINE_PER_CPU(unsigned long, old_rsp);
65static DEFINE_PER_CPU(unsigned char, is_idle);
66
61unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED; 67unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
62 68
63static ATOMIC_NOTIFIER_HEAD(idle_notifier); 69static ATOMIC_NOTIFIER_HEAD(idle_notifier);
@@ -76,13 +82,13 @@ EXPORT_SYMBOL_GPL(idle_notifier_unregister);
76 82
77void enter_idle(void) 83void enter_idle(void)
78{ 84{
79 write_pda(isidle, 1); 85 percpu_write(is_idle, 1);
80 atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL); 86 atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
81} 87}
82 88
83static void __exit_idle(void) 89static void __exit_idle(void)
84{ 90{
85 if (test_and_clear_bit_pda(0, isidle) == 0) 91 if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
86 return; 92 return;
87 atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL); 93 atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
88} 94}
@@ -112,6 +118,17 @@ static inline void play_dead(void)
112void cpu_idle(void) 118void cpu_idle(void)
113{ 119{
114 current_thread_info()->status |= TS_POLLING; 120 current_thread_info()->status |= TS_POLLING;
121
122 /*
123 * If we're the non-boot CPU, nothing set the PDA stack
124 * canary up for us - and if we are the boot CPU we have
125 * a 0 stack canary. This is a good place for updating
126 * it, as we wont ever return from this function (so the
127 * invalid canaries already on the stack wont ever
128 * trigger):
129 */
130 boot_init_stack_canary();
131
115 /* endless idle loop with no priority at all */ 132 /* endless idle loop with no priority at all */
116 while (1) { 133 while (1) {
117 tick_nohz_stop_sched_tick(1); 134 tick_nohz_stop_sched_tick(1);
@@ -397,7 +414,7 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
397 load_gs_index(0); 414 load_gs_index(0);
398 regs->ip = new_ip; 415 regs->ip = new_ip;
399 regs->sp = new_sp; 416 regs->sp = new_sp;
400 write_pda(oldrsp, new_sp); 417 percpu_write(old_rsp, new_sp);
401 regs->cs = __USER_CS; 418 regs->cs = __USER_CS;
402 regs->ss = __USER_DS; 419 regs->ss = __USER_DS;
403 regs->flags = 0x200; 420 regs->flags = 0x200;
@@ -618,21 +635,13 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
618 /* 635 /*
619 * Switch the PDA and FPU contexts. 636 * Switch the PDA and FPU contexts.
620 */ 637 */
621 prev->usersp = read_pda(oldrsp); 638 prev->usersp = percpu_read(old_rsp);
622 write_pda(oldrsp, next->usersp); 639 percpu_write(old_rsp, next->usersp);
623 write_pda(pcurrent, next_p); 640 percpu_write(current_task, next_p);
624 641
625 write_pda(kernelstack, 642 percpu_write(kernel_stack,
626 (unsigned long)task_stack_page(next_p) + 643 (unsigned long)task_stack_page(next_p) +
627 THREAD_SIZE - PDA_STACKOFFSET); 644 THREAD_SIZE - KERNEL_STACK_OFFSET);
628#ifdef CONFIG_CC_STACKPROTECTOR
629 write_pda(stack_canary, next_p->stack_canary);
630 /*
631 * Build time only check to make sure the stack_canary is at
632 * offset 40 in the pda; this is a gcc ABI requirement
633 */
634 BUILD_BUG_ON(offsetof(struct x8664_pda, stack_canary) != 40);
635#endif
636 645
637 /* 646 /*
638 * Now maybe reload the debug registers and handle I/O bitmaps 647 * Now maybe reload the debug registers and handle I/O bitmaps
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 2b46eb41643b..32e8f0af292c 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -14,6 +14,7 @@
14#include <asm/reboot.h> 14#include <asm/reboot.h>
15#include <asm/pci_x86.h> 15#include <asm/pci_x86.h>
16#include <asm/virtext.h> 16#include <asm/virtext.h>
17#include <asm/cpu.h>
17 18
18#ifdef CONFIG_X86_32 19#ifdef CONFIG_X86_32
19# include <linux/dmi.h> 20# include <linux/dmi.h>
@@ -23,7 +24,7 @@
23# include <asm/iommu.h> 24# include <asm/iommu.h>
24#endif 25#endif
25 26
26#include <mach_ipi.h> 27#include <asm/genapic.h>
27 28
28/* 29/*
29 * Power off function, if any 30 * Power off function, if any
@@ -650,7 +651,7 @@ static int crash_nmi_callback(struct notifier_block *self,
650 651
651static void smp_send_nmi_allbutself(void) 652static void smp_send_nmi_allbutself(void)
652{ 653{
653 send_IPI_allbutself(NMI_VECTOR); 654 apic->send_IPI_allbutself(NMI_VECTOR);
654} 655}
655 656
656static struct notifier_block crash_nmi_nb = { 657static struct notifier_block crash_nmi_nb = {
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index c461f6d69074..8fce6c714514 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -81,7 +81,7 @@
81#include <asm/io_apic.h> 81#include <asm/io_apic.h>
82#include <asm/ist.h> 82#include <asm/ist.h>
83#include <asm/vmi.h> 83#include <asm/vmi.h>
84#include <setup_arch.h> 84#include <asm/setup_arch.h>
85#include <asm/bios_ebda.h> 85#include <asm/bios_ebda.h>
86#include <asm/cacheflush.h> 86#include <asm/cacheflush.h>
87#include <asm/processor.h> 87#include <asm/processor.h>
@@ -89,7 +89,7 @@
89 89
90#include <asm/system.h> 90#include <asm/system.h>
91#include <asm/vsyscall.h> 91#include <asm/vsyscall.h>
92#include <asm/smp.h> 92#include <asm/cpu.h>
93#include <asm/desc.h> 93#include <asm/desc.h>
94#include <asm/dma.h> 94#include <asm/dma.h>
95#include <asm/iommu.h> 95#include <asm/iommu.h>
@@ -97,7 +97,7 @@
97#include <asm/mmu_context.h> 97#include <asm/mmu_context.h>
98#include <asm/proto.h> 98#include <asm/proto.h>
99 99
100#include <mach_apic.h> 100#include <asm/genapic.h>
101#include <asm/paravirt.h> 101#include <asm/paravirt.h>
102#include <asm/hypervisor.h> 102#include <asm/hypervisor.h>
103 103
@@ -112,6 +112,20 @@
112#define ARCH_SETUP 112#define ARCH_SETUP
113#endif 113#endif
114 114
115unsigned int boot_cpu_id __read_mostly;
116
117#ifdef CONFIG_X86_64
118int default_cpu_present_to_apicid(int mps_cpu)
119{
120 return __default_cpu_present_to_apicid(mps_cpu);
121}
122
123int default_check_phys_apicid_present(int boot_cpu_physical_apicid)
124{
125 return __default_check_phys_apicid_present(boot_cpu_physical_apicid);
126}
127#endif
128
115#ifndef CONFIG_DEBUG_BOOT_PARAMS 129#ifndef CONFIG_DEBUG_BOOT_PARAMS
116struct boot_params __initdata boot_params; 130struct boot_params __initdata boot_params;
117#else 131#else
@@ -588,10 +602,9 @@ early_param("elfcorehdr", setup_elfcorehdr);
588 602
589static int __init default_update_genapic(void) 603static int __init default_update_genapic(void)
590{ 604{
591#ifdef CONFIG_X86_SMP 605#ifdef CONFIG_SMP
592# if defined(CONFIG_X86_GENERICARCH) || defined(CONFIG_X86_64) 606 if (!apic->wakeup_cpu)
593 genapic->wakeup_cpu = wakeup_secondary_cpu_via_init; 607 apic->wakeup_cpu = wakeup_secondary_cpu_via_init;
594# endif
595#endif 608#endif
596 609
597 return 0; 610 return 0;
@@ -892,12 +905,11 @@ void __init setup_arch(char **cmdline_p)
892 */ 905 */
893 acpi_reserve_bootmem(); 906 acpi_reserve_bootmem();
894#endif 907#endif
895#ifdef CONFIG_X86_FIND_SMP_CONFIG
896 /* 908 /*
897 * Find and reserve possible boot-time SMP configuration: 909 * Find and reserve possible boot-time SMP configuration:
898 */ 910 */
899 find_smp_config(); 911 find_smp_config();
900#endif 912
901 reserve_crashkernel(); 913 reserve_crashkernel();
902 914
903#ifdef CONFIG_X86_64 915#ifdef CONFIG_X86_64
@@ -924,9 +936,7 @@ void __init setup_arch(char **cmdline_p)
924 map_vsyscall(); 936 map_vsyscall();
925#endif 937#endif
926 938
927#ifdef CONFIG_X86_GENERICARCH
928 generic_apic_probe(); 939 generic_apic_probe();
929#endif
930 940
931 early_quirks(); 941 early_quirks();
932 942
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index 01161077a49c..ef91747bbed5 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -13,145 +13,46 @@
13#include <asm/mpspec.h> 13#include <asm/mpspec.h>
14#include <asm/apicdef.h> 14#include <asm/apicdef.h>
15#include <asm/highmem.h> 15#include <asm/highmem.h>
16#include <asm/proto.h>
17#include <asm/cpumask.h>
18#include <asm/cpu.h>
16 19
17#ifdef CONFIG_X86_LOCAL_APIC 20#ifdef CONFIG_DEBUG_PER_CPU_MAPS
18unsigned int num_processors; 21# define DBG(x...) printk(KERN_DEBUG x)
19unsigned disabled_cpus __cpuinitdata;
20/* Processor that is doing the boot up */
21unsigned int boot_cpu_physical_apicid = -1U;
22EXPORT_SYMBOL(boot_cpu_physical_apicid);
23unsigned int max_physical_apicid;
24
25/* Bitmask of physically existing CPUs */
26physid_mask_t phys_cpu_present_map;
27#endif
28
29/* map cpu index to physical APIC ID */
30DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID);
31DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID);
32EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
33EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
34
35#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
36#define X86_64_NUMA 1
37
38/* map cpu index to node index */
39DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
40EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
41
42/* which logical CPUs are on which nodes */
43cpumask_t *node_to_cpumask_map;
44EXPORT_SYMBOL(node_to_cpumask_map);
45
46/* setup node_to_cpumask_map */
47static void __init setup_node_to_cpumask_map(void);
48
49#else 22#else
50static inline void setup_node_to_cpumask_map(void) { } 23# define DBG(x...)
51#endif 24#endif
52 25
53#if defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) && defined(CONFIG_X86_SMP) 26DEFINE_PER_CPU(int, cpu_number);
54/* 27EXPORT_PER_CPU_SYMBOL(cpu_number);
55 * Copy data used in early init routines from the initial arrays to the
56 * per cpu data areas. These arrays then become expendable and the
57 * *_early_ptr's are zeroed indicating that the static arrays are gone.
58 */
59static void __init setup_per_cpu_maps(void)
60{
61 int cpu;
62 28
63 for_each_possible_cpu(cpu) { 29#ifdef CONFIG_X86_64
64 per_cpu(x86_cpu_to_apicid, cpu) = 30#define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
65 early_per_cpu_map(x86_cpu_to_apicid, cpu); 31#else
66 per_cpu(x86_bios_cpu_apicid, cpu) = 32#define BOOT_PERCPU_OFFSET 0
67 early_per_cpu_map(x86_bios_cpu_apicid, cpu);
68#ifdef X86_64_NUMA
69 per_cpu(x86_cpu_to_node_map, cpu) =
70 early_per_cpu_map(x86_cpu_to_node_map, cpu);
71#endif 33#endif
72 }
73 34
74 /* indicate the early static arrays will soon be gone */ 35DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
75 early_per_cpu_ptr(x86_cpu_to_apicid) = NULL; 36EXPORT_PER_CPU_SYMBOL(this_cpu_off);
76 early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
77#ifdef X86_64_NUMA
78 early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
79#endif
80}
81 37
82#ifdef CONFIG_X86_32 38unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
83/* 39 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
84 * Great future not-so-futuristic plan: make i386 and x86_64 do it 40};
85 * the same way
86 */
87unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
88EXPORT_SYMBOL(__per_cpu_offset); 41EXPORT_SYMBOL(__per_cpu_offset);
89static inline void setup_cpu_pda_map(void) { }
90
91#elif !defined(CONFIG_SMP)
92static inline void setup_cpu_pda_map(void) { }
93
94#else /* CONFIG_SMP && CONFIG_X86_64 */
95
96/*
97 * Allocate cpu_pda pointer table and array via alloc_bootmem.
98 */
99static void __init setup_cpu_pda_map(void)
100{
101 char *pda;
102 struct x8664_pda **new_cpu_pda;
103 unsigned long size;
104 int cpu;
105
106 size = roundup(sizeof(struct x8664_pda), cache_line_size());
107
108 /* allocate cpu_pda array and pointer table */
109 {
110 unsigned long tsize = nr_cpu_ids * sizeof(void *);
111 unsigned long asize = size * (nr_cpu_ids - 1);
112 42
113 tsize = roundup(tsize, cache_line_size()); 43static inline void setup_percpu_segment(int cpu)
114 new_cpu_pda = alloc_bootmem(tsize + asize);
115 pda = (char *)new_cpu_pda + tsize;
116 }
117
118 /* initialize pointer table to static pda's */
119 for_each_possible_cpu(cpu) {
120 if (cpu == 0) {
121 /* leave boot cpu pda in place */
122 new_cpu_pda[0] = cpu_pda(0);
123 continue;
124 }
125 new_cpu_pda[cpu] = (struct x8664_pda *)pda;
126 new_cpu_pda[cpu]->in_bootmem = 1;
127 pda += size;
128 }
129
130 /* point to new pointer table */
131 _cpu_pda = new_cpu_pda;
132}
133
134#endif /* CONFIG_SMP && CONFIG_X86_64 */
135
136#ifdef CONFIG_X86_64
137
138/* correctly size the local cpu masks */
139static void __init setup_cpu_local_masks(void)
140{ 44{
141 alloc_bootmem_cpumask_var(&cpu_initialized_mask); 45#ifdef CONFIG_X86_32
142 alloc_bootmem_cpumask_var(&cpu_callin_mask); 46 struct desc_struct gdt;
143 alloc_bootmem_cpumask_var(&cpu_callout_mask);
144 alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
145}
146
147#else /* CONFIG_X86_32 */
148 47
149static inline void setup_cpu_local_masks(void) 48 pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
150{ 49 0x2 | DESCTYPE_S, 0x8);
50 gdt.s = 1;
51 write_gdt_entry(get_cpu_gdt_table(cpu),
52 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
53#endif
151} 54}
152 55
153#endif /* CONFIG_X86_32 */
154
155/* 56/*
156 * Great future plan: 57 * Great future plan:
157 * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data. 58 * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
@@ -159,18 +60,12 @@ static inline void setup_cpu_local_masks(void)
159 */ 60 */
160void __init setup_per_cpu_areas(void) 61void __init setup_per_cpu_areas(void)
161{ 62{
162 ssize_t size, old_size; 63 ssize_t size;
163 char *ptr; 64 char *ptr;
164 int cpu; 65 int cpu;
165 unsigned long align = 1;
166
167 /* Setup cpu_pda map */
168 setup_cpu_pda_map();
169 66
170 /* Copy section for each CPU (we discard the original) */ 67 /* Copy section for each CPU (we discard the original) */
171 old_size = PERCPU_ENOUGH_ROOM; 68 size = roundup(PERCPU_ENOUGH_ROOM, PAGE_SIZE);
172 align = max_t(unsigned long, PAGE_SIZE, align);
173 size = roundup(old_size, align);
174 69
175 pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n", 70 pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
176 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); 71 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
@@ -179,30 +74,67 @@ void __init setup_per_cpu_areas(void)
179 74
180 for_each_possible_cpu(cpu) { 75 for_each_possible_cpu(cpu) {
181#ifndef CONFIG_NEED_MULTIPLE_NODES 76#ifndef CONFIG_NEED_MULTIPLE_NODES
182 ptr = __alloc_bootmem(size, align, 77 ptr = alloc_bootmem_pages(size);
183 __pa(MAX_DMA_ADDRESS));
184#else 78#else
185 int node = early_cpu_to_node(cpu); 79 int node = early_cpu_to_node(cpu);
186 if (!node_online(node) || !NODE_DATA(node)) { 80 if (!node_online(node) || !NODE_DATA(node)) {
187 ptr = __alloc_bootmem(size, align, 81 ptr = alloc_bootmem_pages(size);
188 __pa(MAX_DMA_ADDRESS));
189 pr_info("cpu %d has no node %d or node-local memory\n", 82 pr_info("cpu %d has no node %d or node-local memory\n",
190 cpu, node); 83 cpu, node);
191 pr_debug("per cpu data for cpu%d at %016lx\n", 84 pr_debug("per cpu data for cpu%d at %016lx\n",
192 cpu, __pa(ptr)); 85 cpu, __pa(ptr));
193 } else { 86 } else {
194 ptr = __alloc_bootmem_node(NODE_DATA(node), size, align, 87 ptr = alloc_bootmem_pages_node(NODE_DATA(node), size);
195 __pa(MAX_DMA_ADDRESS));
196 pr_debug("per cpu data for cpu%d on node%d at %016lx\n", 88 pr_debug("per cpu data for cpu%d on node%d at %016lx\n",
197 cpu, node, __pa(ptr)); 89 cpu, node, __pa(ptr));
198 } 90 }
199#endif 91#endif
92
93 memcpy(ptr, __per_cpu_load, __per_cpu_end - __per_cpu_start);
200 per_cpu_offset(cpu) = ptr - __per_cpu_start; 94 per_cpu_offset(cpu) = ptr - __per_cpu_start;
201 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); 95 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
96 per_cpu(cpu_number, cpu) = cpu;
97 setup_percpu_segment(cpu);
98 /*
99 * Copy data used in early init routines from the
100 * initial arrays to the per cpu data areas. These
101 * arrays then become expendable and the *_early_ptr's
102 * are zeroed indicating that the static arrays are
103 * gone.
104 */
105#ifdef CONFIG_X86_LOCAL_APIC
106 per_cpu(x86_cpu_to_apicid, cpu) =
107 early_per_cpu_map(x86_cpu_to_apicid, cpu);
108 per_cpu(x86_bios_cpu_apicid, cpu) =
109 early_per_cpu_map(x86_bios_cpu_apicid, cpu);
110#endif
111#ifdef CONFIG_X86_64
112 per_cpu(irq_stack_ptr, cpu) =
113 per_cpu(irq_stack_union.irq_stack, cpu) +
114 IRQ_STACK_SIZE - 64;
115#ifdef CONFIG_NUMA
116 per_cpu(x86_cpu_to_node_map, cpu) =
117 early_per_cpu_map(x86_cpu_to_node_map, cpu);
118#endif
119#endif
120 /*
121 * Up to this point, the boot CPU has been using .data.init
122 * area. Reload any changed state for the boot CPU.
123 */
124 if (cpu == boot_cpu_id)
125 switch_to_new_gdt(cpu);
126
127 DBG("PERCPU: cpu %4d %p\n", cpu, ptr);
202 } 128 }
203 129
204 /* Setup percpu data maps */ 130 /* indicate the early static arrays will soon be gone */
205 setup_per_cpu_maps(); 131#ifdef CONFIG_X86_LOCAL_APIC
132 early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
133 early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
134#endif
135#if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
136 early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
137#endif
206 138
207 /* Setup node to cpumask map */ 139 /* Setup node to cpumask map */
208 setup_node_to_cpumask_map(); 140 setup_node_to_cpumask_map();
@@ -210,199 +142,3 @@ void __init setup_per_cpu_areas(void)
210 /* Setup cpu initialized, callin, callout masks */ 142 /* Setup cpu initialized, callin, callout masks */
211 setup_cpu_local_masks(); 143 setup_cpu_local_masks();
212} 144}
213
214#endif
215
216#ifdef X86_64_NUMA
217
218/*
219 * Allocate node_to_cpumask_map based on number of available nodes
220 * Requires node_possible_map to be valid.
221 *
222 * Note: node_to_cpumask() is not valid until after this is done.
223 */
224static void __init setup_node_to_cpumask_map(void)
225{
226 unsigned int node, num = 0;
227 cpumask_t *map;
228
229 /* setup nr_node_ids if not done yet */
230 if (nr_node_ids == MAX_NUMNODES) {
231 for_each_node_mask(node, node_possible_map)
232 num = node;
233 nr_node_ids = num + 1;
234 }
235
236 /* allocate the map */
237 map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t));
238
239 pr_debug("Node to cpumask map at %p for %d nodes\n",
240 map, nr_node_ids);
241
242 /* node_to_cpumask() will now work */
243 node_to_cpumask_map = map;
244}
245
246void __cpuinit numa_set_node(int cpu, int node)
247{
248 int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
249
250 if (cpu_pda(cpu) && node != NUMA_NO_NODE)
251 cpu_pda(cpu)->nodenumber = node;
252
253 if (cpu_to_node_map)
254 cpu_to_node_map[cpu] = node;
255
256 else if (per_cpu_offset(cpu))
257 per_cpu(x86_cpu_to_node_map, cpu) = node;
258
259 else
260 pr_debug("Setting node for non-present cpu %d\n", cpu);
261}
262
263void __cpuinit numa_clear_node(int cpu)
264{
265 numa_set_node(cpu, NUMA_NO_NODE);
266}
267
268#ifndef CONFIG_DEBUG_PER_CPU_MAPS
269
270void __cpuinit numa_add_cpu(int cpu)
271{
272 cpu_set(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
273}
274
275void __cpuinit numa_remove_cpu(int cpu)
276{
277 cpu_clear(cpu, node_to_cpumask_map[cpu_to_node(cpu)]);
278}
279
280#else /* CONFIG_DEBUG_PER_CPU_MAPS */
281
282/*
283 * --------- debug versions of the numa functions ---------
284 */
285static void __cpuinit numa_set_cpumask(int cpu, int enable)
286{
287 int node = cpu_to_node(cpu);
288 cpumask_t *mask;
289 char buf[64];
290
291 if (node_to_cpumask_map == NULL) {
292 printk(KERN_ERR "node_to_cpumask_map NULL\n");
293 dump_stack();
294 return;
295 }
296
297 mask = &node_to_cpumask_map[node];
298 if (enable)
299 cpu_set(cpu, *mask);
300 else
301 cpu_clear(cpu, *mask);
302
303 cpulist_scnprintf(buf, sizeof(buf), mask);
304 printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
305 enable ? "numa_add_cpu" : "numa_remove_cpu", cpu, node, buf);
306}
307
308void __cpuinit numa_add_cpu(int cpu)
309{
310 numa_set_cpumask(cpu, 1);
311}
312
313void __cpuinit numa_remove_cpu(int cpu)
314{
315 numa_set_cpumask(cpu, 0);
316}
317
318int cpu_to_node(int cpu)
319{
320 if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
321 printk(KERN_WARNING
322 "cpu_to_node(%d): usage too early!\n", cpu);
323 dump_stack();
324 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
325 }
326 return per_cpu(x86_cpu_to_node_map, cpu);
327}
328EXPORT_SYMBOL(cpu_to_node);
329
330/*
331 * Same function as cpu_to_node() but used if called before the
332 * per_cpu areas are setup.
333 */
334int early_cpu_to_node(int cpu)
335{
336 if (early_per_cpu_ptr(x86_cpu_to_node_map))
337 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
338
339 if (!per_cpu_offset(cpu)) {
340 printk(KERN_WARNING
341 "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
342 dump_stack();
343 return NUMA_NO_NODE;
344 }
345 return per_cpu(x86_cpu_to_node_map, cpu);
346}
347
348
349/* empty cpumask */
350static const cpumask_t cpu_mask_none;
351
352/*
353 * Returns a pointer to the bitmask of CPUs on Node 'node'.
354 */
355const cpumask_t *cpumask_of_node(int node)
356{
357 if (node_to_cpumask_map == NULL) {
358 printk(KERN_WARNING
359 "cpumask_of_node(%d): no node_to_cpumask_map!\n",
360 node);
361 dump_stack();
362 return (const cpumask_t *)&cpu_online_map;
363 }
364 if (node >= nr_node_ids) {
365 printk(KERN_WARNING
366 "cpumask_of_node(%d): node > nr_node_ids(%d)\n",
367 node, nr_node_ids);
368 dump_stack();
369 return &cpu_mask_none;
370 }
371 return &node_to_cpumask_map[node];
372}
373EXPORT_SYMBOL(cpumask_of_node);
374
375/*
376 * Returns a bitmask of CPUs on Node 'node'.
377 *
378 * Side note: this function creates the returned cpumask on the stack
379 * so with a high NR_CPUS count, excessive stack space is used. The
380 * node_to_cpumask_ptr function should be used whenever possible.
381 */
382cpumask_t node_to_cpumask(int node)
383{
384 if (node_to_cpumask_map == NULL) {
385 printk(KERN_WARNING
386 "node_to_cpumask(%d): no node_to_cpumask_map!\n", node);
387 dump_stack();
388 return cpu_online_map;
389 }
390 if (node >= nr_node_ids) {
391 printk(KERN_WARNING
392 "node_to_cpumask(%d): node > nr_node_ids(%d)\n",
393 node, nr_node_ids);
394 dump_stack();
395 return cpu_mask_none;
396 }
397 return node_to_cpumask_map[node];
398}
399EXPORT_SYMBOL(node_to_cpumask);
400
401/*
402 * --------- end of debug versions of the numa functions ---------
403 */
404
405#endif /* CONFIG_DEBUG_PER_CPU_MAPS */
406
407#endif /* X86_64_NUMA */
408
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index df0587f24c54..7fc78b019815 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -51,24 +51,24 @@
51#endif 51#endif
52 52
53#define COPY(x) { \ 53#define COPY(x) { \
54 err |= __get_user(regs->x, &sc->x); \ 54 get_user_ex(regs->x, &sc->x); \
55} 55}
56 56
57#define COPY_SEG(seg) { \ 57#define COPY_SEG(seg) { \
58 unsigned short tmp; \ 58 unsigned short tmp; \
59 err |= __get_user(tmp, &sc->seg); \ 59 get_user_ex(tmp, &sc->seg); \
60 regs->seg = tmp; \ 60 regs->seg = tmp; \
61} 61}
62 62
63#define COPY_SEG_CPL3(seg) { \ 63#define COPY_SEG_CPL3(seg) { \
64 unsigned short tmp; \ 64 unsigned short tmp; \
65 err |= __get_user(tmp, &sc->seg); \ 65 get_user_ex(tmp, &sc->seg); \
66 regs->seg = tmp | 3; \ 66 regs->seg = tmp | 3; \
67} 67}
68 68
69#define GET_SEG(seg) { \ 69#define GET_SEG(seg) { \
70 unsigned short tmp; \ 70 unsigned short tmp; \
71 err |= __get_user(tmp, &sc->seg); \ 71 get_user_ex(tmp, &sc->seg); \
72 loadsegment(seg, tmp); \ 72 loadsegment(seg, tmp); \
73} 73}
74 74
@@ -83,45 +83,49 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
83 /* Always make any pending restarted system calls return -EINTR */ 83 /* Always make any pending restarted system calls return -EINTR */
84 current_thread_info()->restart_block.fn = do_no_restart_syscall; 84 current_thread_info()->restart_block.fn = do_no_restart_syscall;
85 85
86 get_user_try {
87
86#ifdef CONFIG_X86_32 88#ifdef CONFIG_X86_32
87 GET_SEG(gs); 89 GET_SEG(gs);
88 COPY_SEG(fs); 90 COPY_SEG(fs);
89 COPY_SEG(es); 91 COPY_SEG(es);
90 COPY_SEG(ds); 92 COPY_SEG(ds);
91#endif /* CONFIG_X86_32 */ 93#endif /* CONFIG_X86_32 */
92 94
93 COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx); 95 COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
94 COPY(dx); COPY(cx); COPY(ip); 96 COPY(dx); COPY(cx); COPY(ip);
95 97
96#ifdef CONFIG_X86_64 98#ifdef CONFIG_X86_64
97 COPY(r8); 99 COPY(r8);
98 COPY(r9); 100 COPY(r9);
99 COPY(r10); 101 COPY(r10);
100 COPY(r11); 102 COPY(r11);
101 COPY(r12); 103 COPY(r12);
102 COPY(r13); 104 COPY(r13);
103 COPY(r14); 105 COPY(r14);
104 COPY(r15); 106 COPY(r15);
105#endif /* CONFIG_X86_64 */ 107#endif /* CONFIG_X86_64 */
106 108
107#ifdef CONFIG_X86_32 109#ifdef CONFIG_X86_32
108 COPY_SEG_CPL3(cs); 110 COPY_SEG_CPL3(cs);
109 COPY_SEG_CPL3(ss); 111 COPY_SEG_CPL3(ss);
110#else /* !CONFIG_X86_32 */ 112#else /* !CONFIG_X86_32 */
111 /* Kernel saves and restores only the CS segment register on signals, 113 /* Kernel saves and restores only the CS segment register on signals,
112 * which is the bare minimum needed to allow mixed 32/64-bit code. 114 * which is the bare minimum needed to allow mixed 32/64-bit code.
113 * App's signal handler can save/restore other segments if needed. */ 115 * App's signal handler can save/restore other segments if needed. */
114 COPY_SEG_CPL3(cs); 116 COPY_SEG_CPL3(cs);
115#endif /* CONFIG_X86_32 */ 117#endif /* CONFIG_X86_32 */
116 118
117 err |= __get_user(tmpflags, &sc->flags); 119 get_user_ex(tmpflags, &sc->flags);
118 regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS); 120 regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
119 regs->orig_ax = -1; /* disable syscall checks */ 121 regs->orig_ax = -1; /* disable syscall checks */
122
123 get_user_ex(buf, &sc->fpstate);
124 err |= restore_i387_xstate(buf);
120 125
121 err |= __get_user(buf, &sc->fpstate); 126 get_user_ex(*pax, &sc->ax);
122 err |= restore_i387_xstate(buf); 127 } get_user_catch(err);
123 128
124 err |= __get_user(*pax, &sc->ax);
125 return err; 129 return err;
126} 130}
127 131
@@ -131,57 +135,60 @@ setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
131{ 135{
132 int err = 0; 136 int err = 0;
133 137
138 put_user_try {
139
134#ifdef CONFIG_X86_32 140#ifdef CONFIG_X86_32
135 { 141 {
136 unsigned int tmp; 142 unsigned int tmp;
137 143
138 savesegment(gs, tmp); 144 savesegment(gs, tmp);
139 err |= __put_user(tmp, (unsigned int __user *)&sc->gs); 145 put_user_ex(tmp, (unsigned int __user *)&sc->gs);
140 } 146 }
141 err |= __put_user(regs->fs, (unsigned int __user *)&sc->fs); 147 put_user_ex(regs->fs, (unsigned int __user *)&sc->fs);
142 err |= __put_user(regs->es, (unsigned int __user *)&sc->es); 148 put_user_ex(regs->es, (unsigned int __user *)&sc->es);
143 err |= __put_user(regs->ds, (unsigned int __user *)&sc->ds); 149 put_user_ex(regs->ds, (unsigned int __user *)&sc->ds);
144#endif /* CONFIG_X86_32 */ 150#endif /* CONFIG_X86_32 */
145 151
146 err |= __put_user(regs->di, &sc->di); 152 put_user_ex(regs->di, &sc->di);
147 err |= __put_user(regs->si, &sc->si); 153 put_user_ex(regs->si, &sc->si);
148 err |= __put_user(regs->bp, &sc->bp); 154 put_user_ex(regs->bp, &sc->bp);
149 err |= __put_user(regs->sp, &sc->sp); 155 put_user_ex(regs->sp, &sc->sp);
150 err |= __put_user(regs->bx, &sc->bx); 156 put_user_ex(regs->bx, &sc->bx);
151 err |= __put_user(regs->dx, &sc->dx); 157 put_user_ex(regs->dx, &sc->dx);
152 err |= __put_user(regs->cx, &sc->cx); 158 put_user_ex(regs->cx, &sc->cx);
153 err |= __put_user(regs->ax, &sc->ax); 159 put_user_ex(regs->ax, &sc->ax);
154#ifdef CONFIG_X86_64 160#ifdef CONFIG_X86_64
155 err |= __put_user(regs->r8, &sc->r8); 161 put_user_ex(regs->r8, &sc->r8);
156 err |= __put_user(regs->r9, &sc->r9); 162 put_user_ex(regs->r9, &sc->r9);
157 err |= __put_user(regs->r10, &sc->r10); 163 put_user_ex(regs->r10, &sc->r10);
158 err |= __put_user(regs->r11, &sc->r11); 164 put_user_ex(regs->r11, &sc->r11);
159 err |= __put_user(regs->r12, &sc->r12); 165 put_user_ex(regs->r12, &sc->r12);
160 err |= __put_user(regs->r13, &sc->r13); 166 put_user_ex(regs->r13, &sc->r13);
161 err |= __put_user(regs->r14, &sc->r14); 167 put_user_ex(regs->r14, &sc->r14);
162 err |= __put_user(regs->r15, &sc->r15); 168 put_user_ex(regs->r15, &sc->r15);
163#endif /* CONFIG_X86_64 */ 169#endif /* CONFIG_X86_64 */
164 170
165 err |= __put_user(current->thread.trap_no, &sc->trapno); 171 put_user_ex(current->thread.trap_no, &sc->trapno);
166 err |= __put_user(current->thread.error_code, &sc->err); 172 put_user_ex(current->thread.error_code, &sc->err);
167 err |= __put_user(regs->ip, &sc->ip); 173 put_user_ex(regs->ip, &sc->ip);
168#ifdef CONFIG_X86_32 174#ifdef CONFIG_X86_32
169 err |= __put_user(regs->cs, (unsigned int __user *)&sc->cs); 175 put_user_ex(regs->cs, (unsigned int __user *)&sc->cs);
170 err |= __put_user(regs->flags, &sc->flags); 176 put_user_ex(regs->flags, &sc->flags);
171 err |= __put_user(regs->sp, &sc->sp_at_signal); 177 put_user_ex(regs->sp, &sc->sp_at_signal);
172 err |= __put_user(regs->ss, (unsigned int __user *)&sc->ss); 178 put_user_ex(regs->ss, (unsigned int __user *)&sc->ss);
173#else /* !CONFIG_X86_32 */ 179#else /* !CONFIG_X86_32 */
174 err |= __put_user(regs->flags, &sc->flags); 180 put_user_ex(regs->flags, &sc->flags);
175 err |= __put_user(regs->cs, &sc->cs); 181 put_user_ex(regs->cs, &sc->cs);
176 err |= __put_user(0, &sc->gs); 182 put_user_ex(0, &sc->gs);
177 err |= __put_user(0, &sc->fs); 183 put_user_ex(0, &sc->fs);
178#endif /* CONFIG_X86_32 */ 184#endif /* CONFIG_X86_32 */
179 185
180 err |= __put_user(fpstate, &sc->fpstate); 186 put_user_ex(fpstate, &sc->fpstate);
181 187
182 /* non-iBCS2 extensions.. */ 188 /* non-iBCS2 extensions.. */
183 err |= __put_user(mask, &sc->oldmask); 189 put_user_ex(mask, &sc->oldmask);
184 err |= __put_user(current->thread.cr2, &sc->cr2); 190 put_user_ex(current->thread.cr2, &sc->cr2);
191 } put_user_catch(err);
185 192
186 return err; 193 return err;
187} 194}
@@ -336,43 +343,41 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
336 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) 343 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
337 return -EFAULT; 344 return -EFAULT;
338 345
339 err |= __put_user(sig, &frame->sig); 346 put_user_try {
340 err |= __put_user(&frame->info, &frame->pinfo); 347 put_user_ex(sig, &frame->sig);
341 err |= __put_user(&frame->uc, &frame->puc); 348 put_user_ex(&frame->info, &frame->pinfo);
342 err |= copy_siginfo_to_user(&frame->info, info); 349 put_user_ex(&frame->uc, &frame->puc);
343 if (err) 350 err |= copy_siginfo_to_user(&frame->info, info);
344 return -EFAULT;
345
346 /* Create the ucontext. */
347 if (cpu_has_xsave)
348 err |= __put_user(UC_FP_XSTATE, &frame->uc.uc_flags);
349 else
350 err |= __put_user(0, &frame->uc.uc_flags);
351 err |= __put_user(0, &frame->uc.uc_link);
352 err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
353 err |= __put_user(sas_ss_flags(regs->sp),
354 &frame->uc.uc_stack.ss_flags);
355 err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
356 err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
357 regs, set->sig[0]);
358 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
359 if (err)
360 return -EFAULT;
361 351
362 /* Set up to return from userspace. */ 352 /* Create the ucontext. */
363 restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn); 353 if (cpu_has_xsave)
364 if (ka->sa.sa_flags & SA_RESTORER) 354 put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags);
365 restorer = ka->sa.sa_restorer; 355 else
366 err |= __put_user(restorer, &frame->pretcode); 356 put_user_ex(0, &frame->uc.uc_flags);
357 put_user_ex(0, &frame->uc.uc_link);
358 put_user_ex(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
359 put_user_ex(sas_ss_flags(regs->sp),
360 &frame->uc.uc_stack.ss_flags);
361 put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
362 err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
363 regs, set->sig[0]);
364 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
365
366 /* Set up to return from userspace. */
367 restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
368 if (ka->sa.sa_flags & SA_RESTORER)
369 restorer = ka->sa.sa_restorer;
370 put_user_ex(restorer, &frame->pretcode);
367 371
368 /* 372 /*
369 * This is movl $__NR_rt_sigreturn, %ax ; int $0x80 373 * This is movl $__NR_rt_sigreturn, %ax ; int $0x80
370 * 374 *
371 * WE DO NOT USE IT ANY MORE! It's only left here for historical 375 * WE DO NOT USE IT ANY MORE! It's only left here for historical
372 * reasons and because gdb uses it as a signature to notice 376 * reasons and because gdb uses it as a signature to notice
373 * signal handler stack frames. 377 * signal handler stack frames.
374 */ 378 */
375 err |= __put_user(*((u64 *)&rt_retcode), (u64 *)frame->retcode); 379 put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
380 } put_user_catch(err);
376 381
377 if (err) 382 if (err)
378 return -EFAULT; 383 return -EFAULT;
@@ -436,28 +441,30 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
436 return -EFAULT; 441 return -EFAULT;
437 } 442 }
438 443
439 /* Create the ucontext. */ 444 put_user_try {
440 if (cpu_has_xsave) 445 /* Create the ucontext. */
441 err |= __put_user(UC_FP_XSTATE, &frame->uc.uc_flags); 446 if (cpu_has_xsave)
442 else 447 put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags);
443 err |= __put_user(0, &frame->uc.uc_flags); 448 else
444 err |= __put_user(0, &frame->uc.uc_link); 449 put_user_ex(0, &frame->uc.uc_flags);
445 err |= __put_user(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp); 450 put_user_ex(0, &frame->uc.uc_link);
446 err |= __put_user(sas_ss_flags(regs->sp), 451 put_user_ex(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
447 &frame->uc.uc_stack.ss_flags); 452 put_user_ex(sas_ss_flags(regs->sp),
448 err |= __put_user(me->sas_ss_size, &frame->uc.uc_stack.ss_size); 453 &frame->uc.uc_stack.ss_flags);
449 err |= setup_sigcontext(&frame->uc.uc_mcontext, fp, regs, set->sig[0]); 454 put_user_ex(me->sas_ss_size, &frame->uc.uc_stack.ss_size);
450 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); 455 err |= setup_sigcontext(&frame->uc.uc_mcontext, fp, regs, set->sig[0]);
451 456 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
452 /* Set up to return from userspace. If provided, use a stub 457
453 already in userspace. */ 458 /* Set up to return from userspace. If provided, use a stub
454 /* x86-64 should always use SA_RESTORER. */ 459 already in userspace. */
455 if (ka->sa.sa_flags & SA_RESTORER) { 460 /* x86-64 should always use SA_RESTORER. */
456 err |= __put_user(ka->sa.sa_restorer, &frame->pretcode); 461 if (ka->sa.sa_flags & SA_RESTORER) {
457 } else { 462 put_user_ex(ka->sa.sa_restorer, &frame->pretcode);
458 /* could use a vstub here */ 463 } else {
459 return -EFAULT; 464 /* could use a vstub here */
460 } 465 err |= -EFAULT;
466 }
467 } put_user_catch(err);
461 468
462 if (err) 469 if (err)
463 return -EFAULT; 470 return -EFAULT;
@@ -509,31 +516,41 @@ sys_sigaction(int sig, const struct old_sigaction __user *act,
509 struct old_sigaction __user *oact) 516 struct old_sigaction __user *oact)
510{ 517{
511 struct k_sigaction new_ka, old_ka; 518 struct k_sigaction new_ka, old_ka;
512 int ret; 519 int ret = 0;
513 520
514 if (act) { 521 if (act) {
515 old_sigset_t mask; 522 old_sigset_t mask;
516 523
517 if (!access_ok(VERIFY_READ, act, sizeof(*act)) || 524 if (!access_ok(VERIFY_READ, act, sizeof(*act)))
518 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
519 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
520 return -EFAULT; 525 return -EFAULT;
521 526
522 __get_user(new_ka.sa.sa_flags, &act->sa_flags); 527 get_user_try {
523 __get_user(mask, &act->sa_mask); 528 get_user_ex(new_ka.sa.sa_handler, &act->sa_handler);
529 get_user_ex(new_ka.sa.sa_flags, &act->sa_flags);
530 get_user_ex(mask, &act->sa_mask);
531 get_user_ex(new_ka.sa.sa_restorer, &act->sa_restorer);
532 } get_user_catch(ret);
533
534 if (ret)
535 return -EFAULT;
524 siginitset(&new_ka.sa.sa_mask, mask); 536 siginitset(&new_ka.sa.sa_mask, mask);
525 } 537 }
526 538
527 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); 539 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
528 540
529 if (!ret && oact) { 541 if (!ret && oact) {
530 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || 542 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)))
531 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
532 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
533 return -EFAULT; 543 return -EFAULT;
534 544
535 __put_user(old_ka.sa.sa_flags, &oact->sa_flags); 545 put_user_try {
536 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); 546 put_user_ex(old_ka.sa.sa_handler, &oact->sa_handler);
547 put_user_ex(old_ka.sa.sa_flags, &oact->sa_flags);
548 put_user_ex(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
549 put_user_ex(old_ka.sa.sa_restorer, &oact->sa_restorer);
550 } put_user_catch(ret);
551
552 if (ret)
553 return -EFAULT;
537 } 554 }
538 555
539 return ret; 556 return ret;
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index e6faa3316bd2..eaaffae31cc0 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -2,7 +2,7 @@
2 * Intel SMP support routines. 2 * Intel SMP support routines.
3 * 3 *
4 * (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk> 4 * (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
5 * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com> 5 * (c) 1998-99, 2000, 2009 Ingo Molnar <mingo@redhat.com>
6 * (c) 2002,2003 Andi Kleen, SuSE Labs. 6 * (c) 2002,2003 Andi Kleen, SuSE Labs.
7 * 7 *
8 * i386 and x86_64 integration by Glauber Costa <gcosta@redhat.com> 8 * i386 and x86_64 integration by Glauber Costa <gcosta@redhat.com>
@@ -26,8 +26,7 @@
26#include <asm/tlbflush.h> 26#include <asm/tlbflush.h>
27#include <asm/mmu_context.h> 27#include <asm/mmu_context.h>
28#include <asm/proto.h> 28#include <asm/proto.h>
29#include <mach_ipi.h> 29#include <asm/genapic.h>
30#include <mach_apic.h>
31/* 30/*
32 * Some notes on x86 processor bugs affecting SMP operation: 31 * Some notes on x86 processor bugs affecting SMP operation:
33 * 32 *
@@ -118,12 +117,12 @@ static void native_smp_send_reschedule(int cpu)
118 WARN_ON(1); 117 WARN_ON(1);
119 return; 118 return;
120 } 119 }
121 send_IPI_mask(cpumask_of(cpu), RESCHEDULE_VECTOR); 120 apic->send_IPI_mask(cpumask_of(cpu), RESCHEDULE_VECTOR);
122} 121}
123 122
124void native_send_call_func_single_ipi(int cpu) 123void native_send_call_func_single_ipi(int cpu)
125{ 124{
126 send_IPI_mask(cpumask_of(cpu), CALL_FUNCTION_SINGLE_VECTOR); 125 apic->send_IPI_mask(cpumask_of(cpu), CALL_FUNCTION_SINGLE_VECTOR);
127} 126}
128 127
129void native_send_call_func_ipi(const struct cpumask *mask) 128void native_send_call_func_ipi(const struct cpumask *mask)
@@ -131,7 +130,7 @@ void native_send_call_func_ipi(const struct cpumask *mask)
131 cpumask_var_t allbutself; 130 cpumask_var_t allbutself;
132 131
133 if (!alloc_cpumask_var(&allbutself, GFP_ATOMIC)) { 132 if (!alloc_cpumask_var(&allbutself, GFP_ATOMIC)) {
134 send_IPI_mask(mask, CALL_FUNCTION_VECTOR); 133 apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
135 return; 134 return;
136 } 135 }
137 136
@@ -140,9 +139,9 @@ void native_send_call_func_ipi(const struct cpumask *mask)
140 139
141 if (cpumask_equal(mask, allbutself) && 140 if (cpumask_equal(mask, allbutself) &&
142 cpumask_equal(cpu_online_mask, cpu_callout_mask)) 141 cpumask_equal(cpu_online_mask, cpu_callout_mask))
143 send_IPI_allbutself(CALL_FUNCTION_VECTOR); 142 apic->send_IPI_allbutself(CALL_FUNCTION_VECTOR);
144 else 143 else
145 send_IPI_mask(mask, CALL_FUNCTION_VECTOR); 144 apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
146 145
147 free_cpumask_var(allbutself); 146 free_cpumask_var(allbutself);
148} 147}
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index bb1a3b1fc87f..af57f88186e7 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -2,7 +2,7 @@
2 * x86 SMP booting functions 2 * x86 SMP booting functions
3 * 3 *
4 * (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk> 4 * (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
5 * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com> 5 * (c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
6 * Copyright 2001 Andi Kleen, SuSE Labs. 6 * Copyright 2001 Andi Kleen, SuSE Labs.
7 * 7 *
8 * Much of the core SMP work is based on previous work by Thomas Radke, to 8 * Much of the core SMP work is based on previous work by Thomas Radke, to
@@ -53,7 +53,6 @@
53#include <asm/nmi.h> 53#include <asm/nmi.h>
54#include <asm/irq.h> 54#include <asm/irq.h>
55#include <asm/idle.h> 55#include <asm/idle.h>
56#include <asm/smp.h>
57#include <asm/trampoline.h> 56#include <asm/trampoline.h>
58#include <asm/cpu.h> 57#include <asm/cpu.h>
59#include <asm/numa.h> 58#include <asm/numa.h>
@@ -63,11 +62,11 @@
63#include <asm/vmi.h> 62#include <asm/vmi.h>
64#include <asm/genapic.h> 63#include <asm/genapic.h>
65#include <asm/setup.h> 64#include <asm/setup.h>
65#include <asm/uv/uv.h>
66#include <linux/mc146818rtc.h> 66#include <linux/mc146818rtc.h>
67 67
68#include <mach_apic.h> 68#include <asm/genapic.h>
69#include <mach_wakecpu.h> 69#include <asm/smpboot_hooks.h>
70#include <smpboot_hooks.h>
71 70
72#ifdef CONFIG_X86_32 71#ifdef CONFIG_X86_32
73u8 apicid_2_node[MAX_APICID]; 72u8 apicid_2_node[MAX_APICID];
@@ -163,7 +162,7 @@ static void map_cpu_to_logical_apicid(void)
163{ 162{
164 int cpu = smp_processor_id(); 163 int cpu = smp_processor_id();
165 int apicid = logical_smp_processor_id(); 164 int apicid = logical_smp_processor_id();
166 int node = apicid_to_node(apicid); 165 int node = apic->apicid_to_node(apicid);
167 166
168 if (!node_online(node)) 167 if (!node_online(node))
169 node = first_online_node; 168 node = first_online_node;
@@ -196,7 +195,8 @@ static void __cpuinit smp_callin(void)
196 * our local APIC. We have to wait for the IPI or we'll 195 * our local APIC. We have to wait for the IPI or we'll
197 * lock up on an APIC access. 196 * lock up on an APIC access.
198 */ 197 */
199 wait_for_init_deassert(&init_deasserted); 198 if (apic->wait_for_init_deassert)
199 apic->wait_for_init_deassert(&init_deasserted);
200 200
201 /* 201 /*
202 * (This works even if the APIC is not enabled.) 202 * (This works even if the APIC is not enabled.)
@@ -243,7 +243,8 @@ static void __cpuinit smp_callin(void)
243 */ 243 */
244 244
245 pr_debug("CALLIN, before setup_local_APIC().\n"); 245 pr_debug("CALLIN, before setup_local_APIC().\n");
246 smp_callin_clear_local_apic(); 246 if (apic->smp_callin_clear_local_apic)
247 apic->smp_callin_clear_local_apic();
247 setup_local_APIC(); 248 setup_local_APIC();
248 end_local_APIC_setup(); 249 end_local_APIC_setup();
249 map_cpu_to_logical_apicid(); 250 map_cpu_to_logical_apicid();
@@ -583,7 +584,7 @@ wakeup_secondary_cpu_via_nmi(int logical_apicid, unsigned long start_eip)
583 /* Target chip */ 584 /* Target chip */
584 /* Boot on the stack */ 585 /* Boot on the stack */
585 /* Kick the second */ 586 /* Kick the second */
586 apic_icr_write(APIC_DM_NMI | APIC_DEST_LOGICAL, logical_apicid); 587 apic_icr_write(APIC_DM_NMI | apic->dest_logical, logical_apicid);
587 588
588 pr_debug("Waiting for send to finish...\n"); 589 pr_debug("Waiting for send to finish...\n");
589 send_status = safe_apic_wait_icr_idle(); 590 send_status = safe_apic_wait_icr_idle();
@@ -745,57 +746,11 @@ static void __cpuinit do_fork_idle(struct work_struct *work)
745 complete(&c_idle->done); 746 complete(&c_idle->done);
746} 747}
747 748
748#ifdef CONFIG_X86_64
749
750/* __ref because it's safe to call free_bootmem when after_bootmem == 0. */
751static void __ref free_bootmem_pda(struct x8664_pda *oldpda)
752{
753 if (!after_bootmem)
754 free_bootmem((unsigned long)oldpda, sizeof(*oldpda));
755}
756
757/*
758 * Allocate node local memory for the AP pda.
759 *
760 * Must be called after the _cpu_pda pointer table is initialized.
761 */
762int __cpuinit get_local_pda(int cpu)
763{
764 struct x8664_pda *oldpda, *newpda;
765 unsigned long size = sizeof(struct x8664_pda);
766 int node = cpu_to_node(cpu);
767
768 if (cpu_pda(cpu) && !cpu_pda(cpu)->in_bootmem)
769 return 0;
770
771 oldpda = cpu_pda(cpu);
772 newpda = kmalloc_node(size, GFP_ATOMIC, node);
773 if (!newpda) {
774 printk(KERN_ERR "Could not allocate node local PDA "
775 "for CPU %d on node %d\n", cpu, node);
776
777 if (oldpda)
778 return 0; /* have a usable pda */
779 else
780 return -1;
781 }
782
783 if (oldpda) {
784 memcpy(newpda, oldpda, size);
785 free_bootmem_pda(oldpda);
786 }
787
788 newpda->in_bootmem = 0;
789 cpu_pda(cpu) = newpda;
790 return 0;
791}
792#endif /* CONFIG_X86_64 */
793
794static int __cpuinit do_boot_cpu(int apicid, int cpu) 749static int __cpuinit do_boot_cpu(int apicid, int cpu)
795/* 750/*
796 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad 751 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
797 * (ie clustered apic addressing mode), this is a LOGICAL apic ID. 752 * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
798 * Returns zero if CPU booted OK, else error code from wakeup_secondary_cpu. 753 * Returns zero if CPU booted OK, else error code from ->wakeup_cpu.
799 */ 754 */
800{ 755{
801 unsigned long boot_error = 0; 756 unsigned long boot_error = 0;
@@ -808,16 +763,6 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
808 }; 763 };
809 INIT_WORK(&c_idle.work, do_fork_idle); 764 INIT_WORK(&c_idle.work, do_fork_idle);
810 765
811#ifdef CONFIG_X86_64
812 /* Allocate node local memory for AP pdas */
813 if (cpu > 0) {
814 boot_error = get_local_pda(cpu);
815 if (boot_error)
816 goto restore_state;
817 /* if can't get pda memory, can't start cpu */
818 }
819#endif
820
821 alternatives_smp_switch(1); 766 alternatives_smp_switch(1);
822 767
823 c_idle.idle = get_idle_for_cpu(cpu); 768 c_idle.idle = get_idle_for_cpu(cpu);
@@ -847,14 +792,16 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
847 792
848 set_idle_for_cpu(cpu, c_idle.idle); 793 set_idle_for_cpu(cpu, c_idle.idle);
849do_rest: 794do_rest:
850#ifdef CONFIG_X86_32
851 per_cpu(current_task, cpu) = c_idle.idle; 795 per_cpu(current_task, cpu) = c_idle.idle;
852 init_gdt(cpu); 796#ifdef CONFIG_X86_32
853 /* Stack for startup_32 can be just as for start_secondary onwards */ 797 /* Stack for startup_32 can be just as for start_secondary onwards */
854 irq_ctx_init(cpu); 798 irq_ctx_init(cpu);
855#else 799#else
856 cpu_pda(cpu)->pcurrent = c_idle.idle;
857 clear_tsk_thread_flag(c_idle.idle, TIF_FORK); 800 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
801 initial_gs = per_cpu_offset(cpu);
802 per_cpu(kernel_stack, cpu) =
803 (unsigned long)task_stack_page(c_idle.idle) -
804 KERNEL_STACK_OFFSET + THREAD_SIZE;
858#endif 805#endif
859 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); 806 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
860 initial_code = (unsigned long)start_secondary; 807 initial_code = (unsigned long)start_secondary;
@@ -878,7 +825,8 @@ do_rest:
878 825
879 pr_debug("Setting warm reset code and vector.\n"); 826 pr_debug("Setting warm reset code and vector.\n");
880 827
881 store_NMI_vector(&nmi_high, &nmi_low); 828 if (apic->store_NMI_vector)
829 apic->store_NMI_vector(&nmi_high, &nmi_low);
882 830
883 smpboot_setup_warm_reset_vector(start_ip); 831 smpboot_setup_warm_reset_vector(start_ip);
884 /* 832 /*
@@ -893,7 +841,7 @@ do_rest:
893 /* 841 /*
894 * Starting actual IPI sequence... 842 * Starting actual IPI sequence...
895 */ 843 */
896 boot_error = wakeup_secondary_cpu(apicid, start_ip); 844 boot_error = apic->wakeup_cpu(apicid, start_ip);
897 845
898 if (!boot_error) { 846 if (!boot_error) {
899 /* 847 /*
@@ -927,13 +875,11 @@ do_rest:
927 else 875 else
928 /* trampoline code not run */ 876 /* trampoline code not run */
929 printk(KERN_ERR "Not responding.\n"); 877 printk(KERN_ERR "Not responding.\n");
930 if (get_uv_system_type() != UV_NON_UNIQUE_APIC) 878 if (apic->inquire_remote_apic)
931 inquire_remote_apic(apicid); 879 apic->inquire_remote_apic(apicid);
932 } 880 }
933 } 881 }
934#ifdef CONFIG_X86_64 882
935restore_state:
936#endif
937 if (boot_error) { 883 if (boot_error) {
938 /* Try to put things back the way they were before ... */ 884 /* Try to put things back the way they were before ... */
939 numa_remove_cpu(cpu); /* was set by numa_add_cpu */ 885 numa_remove_cpu(cpu); /* was set by numa_add_cpu */
@@ -961,7 +907,7 @@ restore_state:
961 907
962int __cpuinit native_cpu_up(unsigned int cpu) 908int __cpuinit native_cpu_up(unsigned int cpu)
963{ 909{
964 int apicid = cpu_present_to_apicid(cpu); 910 int apicid = apic->cpu_present_to_apicid(cpu);
965 unsigned long flags; 911 unsigned long flags;
966 int err; 912 int err;
967 913
@@ -1054,14 +1000,14 @@ static int __init smp_sanity_check(unsigned max_cpus)
1054{ 1000{
1055 preempt_disable(); 1001 preempt_disable();
1056 1002
1057#if defined(CONFIG_X86_PC) && defined(CONFIG_X86_32) 1003#if !defined(CONFIG_X86_BIGSMP) && defined(CONFIG_X86_32)
1058 if (def_to_bigsmp && nr_cpu_ids > 8) { 1004 if (def_to_bigsmp && nr_cpu_ids > 8) {
1059 unsigned int cpu; 1005 unsigned int cpu;
1060 unsigned nr; 1006 unsigned nr;
1061 1007
1062 printk(KERN_WARNING 1008 printk(KERN_WARNING
1063 "More than 8 CPUs detected - skipping them.\n" 1009 "More than 8 CPUs detected - skipping them.\n"
1064 "Use CONFIG_X86_GENERICARCH and CONFIG_X86_BIGSMP.\n"); 1010 "Use CONFIG_X86_BIGSMP.\n");
1065 1011
1066 nr = 0; 1012 nr = 0;
1067 for_each_present_cpu(cpu) { 1013 for_each_present_cpu(cpu) {
@@ -1107,7 +1053,7 @@ static int __init smp_sanity_check(unsigned max_cpus)
1107 * Should not be necessary because the MP table should list the boot 1053 * Should not be necessary because the MP table should list the boot
1108 * CPU too, but we do it for the sake of robustness anyway. 1054 * CPU too, but we do it for the sake of robustness anyway.
1109 */ 1055 */
1110 if (!check_phys_apicid_present(boot_cpu_physical_apicid)) { 1056 if (!apic->check_phys_apicid_present(boot_cpu_physical_apicid)) {
1111 printk(KERN_NOTICE 1057 printk(KERN_NOTICE
1112 "weird, boot CPU (#%d) not listed by the BIOS.\n", 1058 "weird, boot CPU (#%d) not listed by the BIOS.\n",
1113 boot_cpu_physical_apicid); 1059 boot_cpu_physical_apicid);
@@ -1125,6 +1071,7 @@ static int __init smp_sanity_check(unsigned max_cpus)
1125 printk(KERN_ERR "... forcing use of dummy APIC emulation." 1071 printk(KERN_ERR "... forcing use of dummy APIC emulation."
1126 "(tell your hw vendor)\n"); 1072 "(tell your hw vendor)\n");
1127 smpboot_clear_io_apic(); 1073 smpboot_clear_io_apic();
1074 arch_disable_smp_support();
1128 return -1; 1075 return -1;
1129 } 1076 }
1130 1077
@@ -1183,7 +1130,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
1183 1130
1184#ifdef CONFIG_X86_64 1131#ifdef CONFIG_X86_64
1185 enable_IR_x2apic(); 1132 enable_IR_x2apic();
1186 setup_apic_routing(); 1133 default_setup_apic_routing();
1187#endif 1134#endif
1188 1135
1189 if (smp_sanity_check(max_cpus) < 0) { 1136 if (smp_sanity_check(max_cpus) < 0) {
@@ -1218,7 +1165,8 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
1218 1165
1219 map_cpu_to_logical_apicid(); 1166 map_cpu_to_logical_apicid();
1220 1167
1221 setup_portio_remap(); 1168 if (apic->setup_portio_remap)
1169 apic->setup_portio_remap();
1222 1170
1223 smpboot_setup_io_apic(); 1171 smpboot_setup_io_apic();
1224 /* 1172 /*
@@ -1240,10 +1188,7 @@ out:
1240void __init native_smp_prepare_boot_cpu(void) 1188void __init native_smp_prepare_boot_cpu(void)
1241{ 1189{
1242 int me = smp_processor_id(); 1190 int me = smp_processor_id();
1243#ifdef CONFIG_X86_32 1191 switch_to_new_gdt(me);
1244 init_gdt(me);
1245#endif
1246 switch_to_new_gdt();
1247 /* already set me in cpu_online_mask in boot_cpu_init() */ 1192 /* already set me in cpu_online_mask in boot_cpu_init() */
1248 cpumask_set_cpu(me, cpu_callout_mask); 1193 cpumask_set_cpu(me, cpu_callout_mask);
1249 per_cpu(cpu_state, me) = CPU_ONLINE; 1194 per_cpu(cpu_state, me) = CPU_ONLINE;
diff --git a/arch/x86/kernel/smpcommon.c b/arch/x86/kernel/smpcommon.c
deleted file mode 100644
index 397e309839dd..000000000000
--- a/arch/x86/kernel/smpcommon.c
+++ /dev/null
@@ -1,30 +0,0 @@
1/*
2 * SMP stuff which is common to all sub-architectures.
3 */
4#include <linux/module.h>
5#include <asm/smp.h>
6
7#ifdef CONFIG_X86_32
8DEFINE_PER_CPU(unsigned long, this_cpu_off);
9EXPORT_PER_CPU_SYMBOL(this_cpu_off);
10
11/*
12 * Initialize the CPU's GDT. This is either the boot CPU doing itself
13 * (still using the master per-cpu area), or a CPU doing it for a
14 * secondary which will soon come up.
15 */
16__cpuinit void init_gdt(int cpu)
17{
18 struct desc_struct gdt;
19
20 pack_descriptor(&gdt, __per_cpu_offset[cpu], 0xFFFFF,
21 0x2 | DESCTYPE_S, 0x8);
22 gdt.s = 1;
23
24 write_gdt_entry(get_cpu_gdt_table(cpu),
25 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
26
27 per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu];
28 per_cpu(cpu_number, cpu) = cpu;
29}
30#endif
diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c
index 10786af95545..f7bddc2e37d1 100644
--- a/arch/x86/kernel/stacktrace.c
+++ b/arch/x86/kernel/stacktrace.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Stack trace management functions 2 * Stack trace management functions
3 * 3 *
4 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 4 * Copyright (C) 2006-2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 */ 5 */
6#include <linux/sched.h> 6#include <linux/sched.h>
7#include <linux/stacktrace.h> 7#include <linux/stacktrace.h>
diff --git a/arch/x86/kernel/summit_32.c b/arch/x86/kernel/summit_32.c
index 7b987852e876..1e733eff9b33 100644
--- a/arch/x86/kernel/summit_32.c
+++ b/arch/x86/kernel/summit_32.c
@@ -30,8 +30,364 @@
30#include <linux/init.h> 30#include <linux/init.h>
31#include <asm/io.h> 31#include <asm/io.h>
32#include <asm/bios_ebda.h> 32#include <asm/bios_ebda.h>
33#include <asm/summit/mpparse.h>
34 33
34/*
35 * APIC driver for the IBM "Summit" chipset.
36 */
37#define APIC_DEFINITION 1
38#include <linux/threads.h>
39#include <linux/cpumask.h>
40#include <asm/mpspec.h>
41#include <asm/apic.h>
42#include <asm/smp.h>
43#include <asm/genapic.h>
44#include <asm/fixmap.h>
45#include <asm/apicdef.h>
46#include <asm/ipi.h>
47#include <linux/kernel.h>
48#include <linux/string.h>
49#include <linux/init.h>
50#include <linux/gfp.h>
51#include <linux/smp.h>
52
53static inline unsigned summit_get_apic_id(unsigned long x)
54{
55 return (x >> 24) & 0xFF;
56}
57
58static inline void summit_send_IPI_mask(const cpumask_t *mask, int vector)
59{
60 default_send_IPI_mask_sequence_logical(mask, vector);
61}
62
63static inline void summit_send_IPI_allbutself(int vector)
64{
65 cpumask_t mask = cpu_online_map;
66 cpu_clear(smp_processor_id(), mask);
67
68 if (!cpus_empty(mask))
69 summit_send_IPI_mask(&mask, vector);
70}
71
72static inline void summit_send_IPI_all(int vector)
73{
74 summit_send_IPI_mask(&cpu_online_map, vector);
75}
76
77#include <asm/tsc.h>
78
79extern int use_cyclone;
80
81#ifdef CONFIG_X86_SUMMIT_NUMA
82extern void setup_summit(void);
83#else
84#define setup_summit() {}
85#endif
86
87static inline int
88summit_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
89{
90 if (!strncmp(oem, "IBM ENSW", 8) &&
91 (!strncmp(productid, "VIGIL SMP", 9)
92 || !strncmp(productid, "EXA", 3)
93 || !strncmp(productid, "RUTHLESS SMP", 12))){
94 mark_tsc_unstable("Summit based system");
95 use_cyclone = 1; /*enable cyclone-timer*/
96 setup_summit();
97 return 1;
98 }
99 return 0;
100}
101
102/* Hook from generic ACPI tables.c */
103static inline int summit_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
104{
105 if (!strncmp(oem_id, "IBM", 3) &&
106 (!strncmp(oem_table_id, "SERVIGIL", 8)
107 || !strncmp(oem_table_id, "EXA", 3))){
108 mark_tsc_unstable("Summit based system");
109 use_cyclone = 1; /*enable cyclone-timer*/
110 setup_summit();
111 return 1;
112 }
113 return 0;
114}
115
116struct rio_table_hdr {
117 unsigned char version; /* Version number of this data structure */
118 /* Version 3 adds chassis_num & WP_index */
119 unsigned char num_scal_dev; /* # of Scalability devices (Twisters for Vigil) */
120 unsigned char num_rio_dev; /* # of RIO I/O devices (Cyclones and Winnipegs) */
121} __attribute__((packed));
122
123struct scal_detail {
124 unsigned char node_id; /* Scalability Node ID */
125 unsigned long CBAR; /* Address of 1MB register space */
126 unsigned char port0node; /* Node ID port connected to: 0xFF=None */
127 unsigned char port0port; /* Port num port connected to: 0,1,2, or 0xFF=None */
128 unsigned char port1node; /* Node ID port connected to: 0xFF = None */
129 unsigned char port1port; /* Port num port connected to: 0,1,2, or 0xFF=None */
130 unsigned char port2node; /* Node ID port connected to: 0xFF = None */
131 unsigned char port2port; /* Port num port connected to: 0,1,2, or 0xFF=None */
132 unsigned char chassis_num; /* 1 based Chassis number (1 = boot node) */
133} __attribute__((packed));
134
135struct rio_detail {
136 unsigned char node_id; /* RIO Node ID */
137 unsigned long BBAR; /* Address of 1MB register space */
138 unsigned char type; /* Type of device */
139 unsigned char owner_id; /* For WPEG: Node ID of Cyclone that owns this WPEG*/
140 /* For CYC: Node ID of Twister that owns this CYC */
141 unsigned char port0node; /* Node ID port connected to: 0xFF=None */
142 unsigned char port0port; /* Port num port connected to: 0,1,2, or 0xFF=None */
143 unsigned char port1node; /* Node ID port connected to: 0xFF=None */
144 unsigned char port1port; /* Port num port connected to: 0,1,2, or 0xFF=None */
145 unsigned char first_slot; /* For WPEG: Lowest slot number below this WPEG */
146 /* For CYC: 0 */
147 unsigned char status; /* For WPEG: Bit 0 = 1 : the XAPIC is used */
148 /* = 0 : the XAPIC is not used, ie:*/
149 /* ints fwded to another XAPIC */
150 /* Bits1:7 Reserved */
151 /* For CYC: Bits0:7 Reserved */
152 unsigned char WP_index; /* For WPEG: WPEG instance index - lower ones have */
153 /* lower slot numbers/PCI bus numbers */
154 /* For CYC: No meaning */
155 unsigned char chassis_num; /* 1 based Chassis number */
156 /* For LookOut WPEGs this field indicates the */
157 /* Expansion Chassis #, enumerated from Boot */
158 /* Node WPEG external port, then Boot Node CYC */
159 /* external port, then Next Vigil chassis WPEG */
160 /* external port, etc. */
161 /* Shared Lookouts have only 1 chassis number (the */
162 /* first one assigned) */
163} __attribute__((packed));
164
165
166typedef enum {
167 CompatTwister = 0, /* Compatibility Twister */
168 AltTwister = 1, /* Alternate Twister of internal 8-way */
169 CompatCyclone = 2, /* Compatibility Cyclone */
170 AltCyclone = 3, /* Alternate Cyclone of internal 8-way */
171 CompatWPEG = 4, /* Compatibility WPEG */
172 AltWPEG = 5, /* Second Planar WPEG */
173 LookOutAWPEG = 6, /* LookOut WPEG */
174 LookOutBWPEG = 7, /* LookOut WPEG */
175} node_type;
176
177static inline int is_WPEG(struct rio_detail *rio){
178 return (rio->type == CompatWPEG || rio->type == AltWPEG ||
179 rio->type == LookOutAWPEG || rio->type == LookOutBWPEG);
180}
181
182
183/* In clustered mode, the high nibble of APIC ID is a cluster number.
184 * The low nibble is a 4-bit bitmap. */
185#define XAPIC_DEST_CPUS_SHIFT 4
186#define XAPIC_DEST_CPUS_MASK ((1u << XAPIC_DEST_CPUS_SHIFT) - 1)
187#define XAPIC_DEST_CLUSTER_MASK (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT)
188
189#define SUMMIT_APIC_DFR_VALUE (APIC_DFR_CLUSTER)
190
191static inline const cpumask_t *summit_target_cpus(void)
192{
193 /* CPU_MASK_ALL (0xff) has undefined behaviour with
194 * dest_LowestPrio mode logical clustered apic interrupt routing
195 * Just start on cpu 0. IRQ balancing will spread load
196 */
197 return &cpumask_of_cpu(0);
198}
199
200static inline unsigned long
201summit_check_apicid_used(physid_mask_t bitmap, int apicid)
202{
203 return 0;
204}
205
206/* we don't use the phys_cpu_present_map to indicate apicid presence */
207static inline unsigned long summit_check_apicid_present(int bit)
208{
209 return 1;
210}
211
212#define apicid_cluster(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK)
213
214extern u8 cpu_2_logical_apicid[];
215
216static inline void summit_init_apic_ldr(void)
217{
218 unsigned long val, id;
219 int count = 0;
220 u8 my_id = (u8)hard_smp_processor_id();
221 u8 my_cluster = (u8)apicid_cluster(my_id);
222#ifdef CONFIG_SMP
223 u8 lid;
224 int i;
225
226 /* Create logical APIC IDs by counting CPUs already in cluster. */
227 for (count = 0, i = nr_cpu_ids; --i >= 0; ) {
228 lid = cpu_2_logical_apicid[i];
229 if (lid != BAD_APICID && apicid_cluster(lid) == my_cluster)
230 ++count;
231 }
232#endif
233 /* We only have a 4 wide bitmap in cluster mode. If a deranged
234 * BIOS puts 5 CPUs in one APIC cluster, we're hosed. */
235 BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT);
236 id = my_cluster | (1UL << count);
237 apic_write(APIC_DFR, SUMMIT_APIC_DFR_VALUE);
238 val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
239 val |= SET_APIC_LOGICAL_ID(id);
240 apic_write(APIC_LDR, val);
241}
242
243static inline int summit_apic_id_registered(void)
244{
245 return 1;
246}
247
248static inline void summit_setup_apic_routing(void)
249{
250 printk("Enabling APIC mode: Summit. Using %d I/O APICs\n",
251 nr_ioapics);
252}
253
254static inline int summit_apicid_to_node(int logical_apicid)
255{
256#ifdef CONFIG_SMP
257 return apicid_2_node[hard_smp_processor_id()];
258#else
259 return 0;
260#endif
261}
262
263/* Mapping from cpu number to logical apicid */
264static inline int summit_cpu_to_logical_apicid(int cpu)
265{
266#ifdef CONFIG_SMP
267 if (cpu >= nr_cpu_ids)
268 return BAD_APICID;
269 return (int)cpu_2_logical_apicid[cpu];
270#else
271 return logical_smp_processor_id();
272#endif
273}
274
275static inline int summit_cpu_present_to_apicid(int mps_cpu)
276{
277 if (mps_cpu < nr_cpu_ids)
278 return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
279 else
280 return BAD_APICID;
281}
282
283static inline physid_mask_t
284summit_ioapic_phys_id_map(physid_mask_t phys_id_map)
285{
286 /* For clustered we don't have a good way to do this yet - hack */
287 return physids_promote(0x0F);
288}
289
290static inline physid_mask_t summit_apicid_to_cpu_present(int apicid)
291{
292 return physid_mask_of_physid(0);
293}
294
295static inline void summit_setup_portio_remap(void)
296{
297}
298
299static inline int summit_check_phys_apicid_present(int boot_cpu_physical_apicid)
300{
301 return 1;
302}
303
304static inline unsigned int summit_cpu_mask_to_apicid(const cpumask_t *cpumask)
305{
306 int cpus_found = 0;
307 int num_bits_set;
308 int apicid;
309 int cpu;
310
311 num_bits_set = cpus_weight(*cpumask);
312 /* Return id to all */
313 if (num_bits_set >= nr_cpu_ids)
314 return 0xFF;
315 /*
316 * The cpus in the mask must all be on the apic cluster. If are not
317 * on the same apicid cluster return default value of target_cpus():
318 */
319 cpu = first_cpu(*cpumask);
320 apicid = summit_cpu_to_logical_apicid(cpu);
321
322 while (cpus_found < num_bits_set) {
323 if (cpu_isset(cpu, *cpumask)) {
324 int new_apicid = summit_cpu_to_logical_apicid(cpu);
325
326 if (apicid_cluster(apicid) !=
327 apicid_cluster(new_apicid)) {
328 printk ("%s: Not a valid mask!\n", __func__);
329
330 return 0xFF;
331 }
332 apicid = apicid | new_apicid;
333 cpus_found++;
334 }
335 cpu++;
336 }
337 return apicid;
338}
339
340static inline unsigned int
341summit_cpu_mask_to_apicid_and(const struct cpumask *inmask,
342 const struct cpumask *andmask)
343{
344 int apicid = summit_cpu_to_logical_apicid(0);
345 cpumask_var_t cpumask;
346
347 if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
348 return apicid;
349
350 cpumask_and(cpumask, inmask, andmask);
351 cpumask_and(cpumask, cpumask, cpu_online_mask);
352 apicid = summit_cpu_mask_to_apicid(cpumask);
353
354 free_cpumask_var(cpumask);
355
356 return apicid;
357}
358
359/*
360 * cpuid returns the value latched in the HW at reset, not the APIC ID
361 * register's value. For any box whose BIOS changes APIC IDs, like
362 * clustered APIC systems, we must use hard_smp_processor_id.
363 *
364 * See Intel's IA-32 SW Dev's Manual Vol2 under CPUID.
365 */
366static inline int summit_phys_pkg_id(int cpuid_apic, int index_msb)
367{
368 return hard_smp_processor_id() >> index_msb;
369}
370
371static int probe_summit(void)
372{
373 /* probed later in mptable/ACPI hooks */
374 return 0;
375}
376
377static void summit_vector_allocation_domain(int cpu, cpumask_t *retmask)
378{
379 /* Careful. Some cpus do not strictly honor the set of cpus
380 * specified in the interrupt destination when using lowest
381 * priority interrupt delivery mode.
382 *
383 * In particular there was a hyperthreading cpu observed to
384 * deliver interrupts to the wrong hyperthread when only one
385 * hyperthread was specified in the interrupt desitination.
386 */
387 *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
388}
389
390#ifdef CONFIG_X86_SUMMIT_NUMA
35static struct rio_table_hdr *rio_table_hdr __initdata; 391static struct rio_table_hdr *rio_table_hdr __initdata;
36static struct scal_detail *scal_devs[MAX_NUMNODES] __initdata; 392static struct scal_detail *scal_devs[MAX_NUMNODES] __initdata;
37static struct rio_detail *rio_devs[MAX_NUMNODES*4] __initdata; 393static struct rio_detail *rio_devs[MAX_NUMNODES*4] __initdata;
@@ -186,3 +542,61 @@ void __init setup_summit(void)
186 next_wpeg = 0; 542 next_wpeg = 0;
187 } while (next_wpeg != 0); 543 } while (next_wpeg != 0);
188} 544}
545#endif
546
547struct genapic apic_summit = {
548
549 .name = "summit",
550 .probe = probe_summit,
551 .acpi_madt_oem_check = summit_acpi_madt_oem_check,
552 .apic_id_registered = summit_apic_id_registered,
553
554 .irq_delivery_mode = dest_LowestPrio,
555 /* logical delivery broadcast to all CPUs: */
556 .irq_dest_mode = 1,
557
558 .target_cpus = summit_target_cpus,
559 .disable_esr = 1,
560 .dest_logical = APIC_DEST_LOGICAL,
561 .check_apicid_used = summit_check_apicid_used,
562 .check_apicid_present = summit_check_apicid_present,
563
564 .vector_allocation_domain = summit_vector_allocation_domain,
565 .init_apic_ldr = summit_init_apic_ldr,
566
567 .ioapic_phys_id_map = summit_ioapic_phys_id_map,
568 .setup_apic_routing = summit_setup_apic_routing,
569 .multi_timer_check = NULL,
570 .apicid_to_node = summit_apicid_to_node,
571 .cpu_to_logical_apicid = summit_cpu_to_logical_apicid,
572 .cpu_present_to_apicid = summit_cpu_present_to_apicid,
573 .apicid_to_cpu_present = summit_apicid_to_cpu_present,
574 .setup_portio_remap = NULL,
575 .check_phys_apicid_present = summit_check_phys_apicid_present,
576 .enable_apic_mode = NULL,
577 .phys_pkg_id = summit_phys_pkg_id,
578 .mps_oem_check = summit_mps_oem_check,
579
580 .get_apic_id = summit_get_apic_id,
581 .set_apic_id = NULL,
582 .apic_id_mask = 0xFF << 24,
583
584 .cpu_mask_to_apicid = summit_cpu_mask_to_apicid,
585 .cpu_mask_to_apicid_and = summit_cpu_mask_to_apicid_and,
586
587 .send_IPI_mask = summit_send_IPI_mask,
588 .send_IPI_mask_allbutself = NULL,
589 .send_IPI_allbutself = summit_send_IPI_allbutself,
590 .send_IPI_all = summit_send_IPI_all,
591 .send_IPI_self = default_send_IPI_self,
592
593 .wakeup_cpu = NULL,
594 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
595 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
596
597 .wait_for_init_deassert = default_wait_for_init_deassert,
598
599 .smp_callin_clear_local_apic = NULL,
600 .store_NMI_vector = NULL,
601 .inquire_remote_apic = default_inquire_remote_apic,
602};
diff --git a/arch/x86/kernel/time_32.c b/arch/x86/kernel/time_32.c
index 3985cac0ed47..764c74e871f2 100644
--- a/arch/x86/kernel/time_32.c
+++ b/arch/x86/kernel/time_32.c
@@ -38,7 +38,7 @@
38#include <asm/time.h> 38#include <asm/time.h>
39#include <asm/timer.h> 39#include <asm/timer.h>
40 40
41#include "do_timer.h" 41#include <asm/do_timer.h>
42 42
43int timer_ack; 43int timer_ack;
44 44
diff --git a/arch/x86/kernel/tlb_32.c b/arch/x86/kernel/tlb_32.c
deleted file mode 100644
index ce5054642247..000000000000
--- a/arch/x86/kernel/tlb_32.c
+++ /dev/null
@@ -1,256 +0,0 @@
1#include <linux/spinlock.h>
2#include <linux/cpu.h>
3#include <linux/interrupt.h>
4
5#include <asm/tlbflush.h>
6
7DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate)
8 ____cacheline_aligned = { &init_mm, 0, };
9
10/* must come after the send_IPI functions above for inlining */
11#include <mach_ipi.h>
12
13/*
14 * Smarter SMP flushing macros.
15 * c/o Linus Torvalds.
16 *
17 * These mean you can really definitely utterly forget about
18 * writing to user space from interrupts. (Its not allowed anyway).
19 *
20 * Optimizations Manfred Spraul <manfred@colorfullife.com>
21 */
22
23static cpumask_t flush_cpumask;
24static struct mm_struct *flush_mm;
25static unsigned long flush_va;
26static DEFINE_SPINLOCK(tlbstate_lock);
27
28/*
29 * We cannot call mmdrop() because we are in interrupt context,
30 * instead update mm->cpu_vm_mask.
31 *
32 * We need to reload %cr3 since the page tables may be going
33 * away from under us..
34 */
35void leave_mm(int cpu)
36{
37 BUG_ON(x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_OK);
38 cpu_clear(cpu, x86_read_percpu(cpu_tlbstate.active_mm)->cpu_vm_mask);
39 load_cr3(swapper_pg_dir);
40}
41EXPORT_SYMBOL_GPL(leave_mm);
42
43/*
44 *
45 * The flush IPI assumes that a thread switch happens in this order:
46 * [cpu0: the cpu that switches]
47 * 1) switch_mm() either 1a) or 1b)
48 * 1a) thread switch to a different mm
49 * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
50 * Stop ipi delivery for the old mm. This is not synchronized with
51 * the other cpus, but smp_invalidate_interrupt ignore flush ipis
52 * for the wrong mm, and in the worst case we perform a superfluous
53 * tlb flush.
54 * 1a2) set cpu_tlbstate to TLBSTATE_OK
55 * Now the smp_invalidate_interrupt won't call leave_mm if cpu0
56 * was in lazy tlb mode.
57 * 1a3) update cpu_tlbstate[].active_mm
58 * Now cpu0 accepts tlb flushes for the new mm.
59 * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
60 * Now the other cpus will send tlb flush ipis.
61 * 1a4) change cr3.
62 * 1b) thread switch without mm change
63 * cpu_tlbstate[].active_mm is correct, cpu0 already handles
64 * flush ipis.
65 * 1b1) set cpu_tlbstate to TLBSTATE_OK
66 * 1b2) test_and_set the cpu bit in cpu_vm_mask.
67 * Atomically set the bit [other cpus will start sending flush ipis],
68 * and test the bit.
69 * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
70 * 2) switch %%esp, ie current
71 *
72 * The interrupt must handle 2 special cases:
73 * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
74 * - the cpu performs speculative tlb reads, i.e. even if the cpu only
75 * runs in kernel space, the cpu could load tlb entries for user space
76 * pages.
77 *
78 * The good news is that cpu_tlbstate is local to each cpu, no
79 * write/read ordering problems.
80 */
81
82/*
83 * TLB flush IPI:
84 *
85 * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
86 * 2) Leave the mm if we are in the lazy tlb mode.
87 */
88
89void smp_invalidate_interrupt(struct pt_regs *regs)
90{
91 unsigned long cpu;
92
93 cpu = get_cpu();
94
95 if (!cpu_isset(cpu, flush_cpumask))
96 goto out;
97 /*
98 * This was a BUG() but until someone can quote me the
99 * line from the intel manual that guarantees an IPI to
100 * multiple CPUs is retried _only_ on the erroring CPUs
101 * its staying as a return
102 *
103 * BUG();
104 */
105
106 if (flush_mm == x86_read_percpu(cpu_tlbstate.active_mm)) {
107 if (x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_OK) {
108 if (flush_va == TLB_FLUSH_ALL)
109 local_flush_tlb();
110 else
111 __flush_tlb_one(flush_va);
112 } else
113 leave_mm(cpu);
114 }
115 ack_APIC_irq();
116 smp_mb__before_clear_bit();
117 cpu_clear(cpu, flush_cpumask);
118 smp_mb__after_clear_bit();
119out:
120 put_cpu_no_resched();
121 inc_irq_stat(irq_tlb_count);
122}
123
124void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
125 unsigned long va)
126{
127 cpumask_t cpumask = *cpumaskp;
128
129 /*
130 * A couple of (to be removed) sanity checks:
131 *
132 * - current CPU must not be in mask
133 * - mask must exist :)
134 */
135 BUG_ON(cpus_empty(cpumask));
136 BUG_ON(cpu_isset(smp_processor_id(), cpumask));
137 BUG_ON(!mm);
138
139#ifdef CONFIG_HOTPLUG_CPU
140 /* If a CPU which we ran on has gone down, OK. */
141 cpus_and(cpumask, cpumask, cpu_online_map);
142 if (unlikely(cpus_empty(cpumask)))
143 return;
144#endif
145
146 /*
147 * i'm not happy about this global shared spinlock in the
148 * MM hot path, but we'll see how contended it is.
149 * AK: x86-64 has a faster method that could be ported.
150 */
151 spin_lock(&tlbstate_lock);
152
153 flush_mm = mm;
154 flush_va = va;
155 cpus_or(flush_cpumask, cpumask, flush_cpumask);
156
157 /*
158 * Make the above memory operations globally visible before
159 * sending the IPI.
160 */
161 smp_mb();
162 /*
163 * We have to send the IPI only to
164 * CPUs affected.
165 */
166 send_IPI_mask(&cpumask, INVALIDATE_TLB_VECTOR);
167
168 while (!cpus_empty(flush_cpumask))
169 /* nothing. lockup detection does not belong here */
170 cpu_relax();
171
172 flush_mm = NULL;
173 flush_va = 0;
174 spin_unlock(&tlbstate_lock);
175}
176
177void flush_tlb_current_task(void)
178{
179 struct mm_struct *mm = current->mm;
180 cpumask_t cpu_mask;
181
182 preempt_disable();
183 cpu_mask = mm->cpu_vm_mask;
184 cpu_clear(smp_processor_id(), cpu_mask);
185
186 local_flush_tlb();
187 if (!cpus_empty(cpu_mask))
188 flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
189 preempt_enable();
190}
191
192void flush_tlb_mm(struct mm_struct *mm)
193{
194 cpumask_t cpu_mask;
195
196 preempt_disable();
197 cpu_mask = mm->cpu_vm_mask;
198 cpu_clear(smp_processor_id(), cpu_mask);
199
200 if (current->active_mm == mm) {
201 if (current->mm)
202 local_flush_tlb();
203 else
204 leave_mm(smp_processor_id());
205 }
206 if (!cpus_empty(cpu_mask))
207 flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
208
209 preempt_enable();
210}
211
212void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
213{
214 struct mm_struct *mm = vma->vm_mm;
215 cpumask_t cpu_mask;
216
217 preempt_disable();
218 cpu_mask = mm->cpu_vm_mask;
219 cpu_clear(smp_processor_id(), cpu_mask);
220
221 if (current->active_mm == mm) {
222 if (current->mm)
223 __flush_tlb_one(va);
224 else
225 leave_mm(smp_processor_id());
226 }
227
228 if (!cpus_empty(cpu_mask))
229 flush_tlb_others(cpu_mask, mm, va);
230
231 preempt_enable();
232}
233EXPORT_SYMBOL(flush_tlb_page);
234
235static void do_flush_tlb_all(void *info)
236{
237 unsigned long cpu = smp_processor_id();
238
239 __flush_tlb_all();
240 if (x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_LAZY)
241 leave_mm(cpu);
242}
243
244void flush_tlb_all(void)
245{
246 on_each_cpu(do_flush_tlb_all, NULL, 1);
247}
248
249void reset_lazy_tlbstate(void)
250{
251 int cpu = raw_smp_processor_id();
252
253 per_cpu(cpu_tlbstate, cpu).state = 0;
254 per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm;
255}
256
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c
index 6812b829ed83..f396e61bcb34 100644
--- a/arch/x86/kernel/tlb_uv.c
+++ b/arch/x86/kernel/tlb_uv.c
@@ -11,6 +11,7 @@
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12 12
13#include <asm/mmu_context.h> 13#include <asm/mmu_context.h>
14#include <asm/uv/uv.h>
14#include <asm/uv/uv_mmrs.h> 15#include <asm/uv/uv_mmrs.h>
15#include <asm/uv/uv_hub.h> 16#include <asm/uv/uv_hub.h>
16#include <asm/uv/uv_bau.h> 17#include <asm/uv/uv_bau.h>
@@ -19,7 +20,7 @@
19#include <asm/tsc.h> 20#include <asm/tsc.h>
20#include <asm/irq_vectors.h> 21#include <asm/irq_vectors.h>
21 22
22#include <mach_apic.h> 23#include <asm/genapic.h>
23 24
24static struct bau_control **uv_bau_table_bases __read_mostly; 25static struct bau_control **uv_bau_table_bases __read_mostly;
25static int uv_bau_retry_limit __read_mostly; 26static int uv_bau_retry_limit __read_mostly;
@@ -210,14 +211,15 @@ static int uv_wait_completion(struct bau_desc *bau_desc,
210 * 211 *
211 * Send a broadcast and wait for a broadcast message to complete. 212 * Send a broadcast and wait for a broadcast message to complete.
212 * 213 *
213 * The cpumaskp mask contains the cpus the broadcast was sent to. 214 * The flush_mask contains the cpus the broadcast was sent to.
214 * 215 *
215 * Returns 1 if all remote flushing was done. The mask is zeroed. 216 * Returns NULL if all remote flushing was done. The mask is zeroed.
216 * Returns 0 if some remote flushing remains to be done. The mask is left 217 * Returns @flush_mask if some remote flushing remains to be done. The
217 * unchanged. 218 * mask will have some bits still set.
218 */ 219 */
219int uv_flush_send_and_wait(int cpu, int this_blade, struct bau_desc *bau_desc, 220const struct cpumask *uv_flush_send_and_wait(int cpu, int this_blade,
220 cpumask_t *cpumaskp) 221 struct bau_desc *bau_desc,
222 struct cpumask *flush_mask)
221{ 223{
222 int completion_status = 0; 224 int completion_status = 0;
223 int right_shift; 225 int right_shift;
@@ -257,66 +259,76 @@ int uv_flush_send_and_wait(int cpu, int this_blade, struct bau_desc *bau_desc,
257 * the cpu's, all of which are still in the mask. 259 * the cpu's, all of which are still in the mask.
258 */ 260 */
259 __get_cpu_var(ptcstats).ptc_i++; 261 __get_cpu_var(ptcstats).ptc_i++;
260 return 0; 262 return flush_mask;
261 } 263 }
262 264
263 /* 265 /*
264 * Success, so clear the remote cpu's from the mask so we don't 266 * Success, so clear the remote cpu's from the mask so we don't
265 * use the IPI method of shootdown on them. 267 * use the IPI method of shootdown on them.
266 */ 268 */
267 for_each_cpu_mask(bit, *cpumaskp) { 269 for_each_cpu(bit, flush_mask) {
268 blade = uv_cpu_to_blade_id(bit); 270 blade = uv_cpu_to_blade_id(bit);
269 if (blade == this_blade) 271 if (blade == this_blade)
270 continue; 272 continue;
271 cpu_clear(bit, *cpumaskp); 273 cpumask_clear_cpu(bit, flush_mask);
272 } 274 }
273 if (!cpus_empty(*cpumaskp)) 275 if (!cpumask_empty(flush_mask))
274 return 0; 276 return flush_mask;
275 return 1; 277 return NULL;
276} 278}
277 279
278/** 280/**
279 * uv_flush_tlb_others - globally purge translation cache of a virtual 281 * uv_flush_tlb_others - globally purge translation cache of a virtual
280 * address or all TLB's 282 * address or all TLB's
281 * @cpumaskp: mask of all cpu's in which the address is to be removed 283 * @cpumask: mask of all cpu's in which the address is to be removed
282 * @mm: mm_struct containing virtual address range 284 * @mm: mm_struct containing virtual address range
283 * @va: virtual address to be removed (or TLB_FLUSH_ALL for all TLB's on cpu) 285 * @va: virtual address to be removed (or TLB_FLUSH_ALL for all TLB's on cpu)
286 * @cpu: the current cpu
284 * 287 *
285 * This is the entry point for initiating any UV global TLB shootdown. 288 * This is the entry point for initiating any UV global TLB shootdown.
286 * 289 *
287 * Purges the translation caches of all specified processors of the given 290 * Purges the translation caches of all specified processors of the given
288 * virtual address, or purges all TLB's on specified processors. 291 * virtual address, or purges all TLB's on specified processors.
289 * 292 *
290 * The caller has derived the cpumaskp from the mm_struct and has subtracted 293 * The caller has derived the cpumask from the mm_struct. This function
291 * the local cpu from the mask. This function is called only if there 294 * is called only if there are bits set in the mask. (e.g. flush_tlb_page())
292 * are bits set in the mask. (e.g. flush_tlb_page())
293 * 295 *
294 * The cpumaskp is converted into a nodemask of the nodes containing 296 * The cpumask is converted into a nodemask of the nodes containing
295 * the cpus. 297 * the cpus.
296 * 298 *
297 * Returns 1 if all remote flushing was done. 299 * Note that this function should be called with preemption disabled.
298 * Returns 0 if some remote flushing remains to be done. 300 *
301 * Returns NULL if all remote flushing was done.
302 * Returns pointer to cpumask if some remote flushing remains to be
303 * done. The returned pointer is valid till preemption is re-enabled.
299 */ 304 */
300int uv_flush_tlb_others(cpumask_t *cpumaskp, struct mm_struct *mm, 305const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
301 unsigned long va) 306 struct mm_struct *mm,
307 unsigned long va, unsigned int cpu)
302{ 308{
309 static DEFINE_PER_CPU(cpumask_t, flush_tlb_mask);
310 struct cpumask *flush_mask = &__get_cpu_var(flush_tlb_mask);
303 int i; 311 int i;
304 int bit; 312 int bit;
305 int blade; 313 int blade;
306 int cpu; 314 int uv_cpu;
307 int this_blade; 315 int this_blade;
308 int locals = 0; 316 int locals = 0;
309 struct bau_desc *bau_desc; 317 struct bau_desc *bau_desc;
310 318
311 cpu = uv_blade_processor_id(); 319 WARN_ON(!in_atomic());
320
321 cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu));
322
323 uv_cpu = uv_blade_processor_id();
312 this_blade = uv_numa_blade_id(); 324 this_blade = uv_numa_blade_id();
313 bau_desc = __get_cpu_var(bau_control).descriptor_base; 325 bau_desc = __get_cpu_var(bau_control).descriptor_base;
314 bau_desc += UV_ITEMS_PER_DESCRIPTOR * cpu; 326 bau_desc += UV_ITEMS_PER_DESCRIPTOR * uv_cpu;
315 327
316 bau_nodes_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE); 328 bau_nodes_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
317 329
318 i = 0; 330 i = 0;
319 for_each_cpu_mask(bit, *cpumaskp) { 331 for_each_cpu(bit, flush_mask) {
320 blade = uv_cpu_to_blade_id(bit); 332 blade = uv_cpu_to_blade_id(bit);
321 BUG_ON(blade > (UV_DISTRIBUTION_SIZE - 1)); 333 BUG_ON(blade > (UV_DISTRIBUTION_SIZE - 1));
322 if (blade == this_blade) { 334 if (blade == this_blade) {
@@ -331,17 +343,17 @@ int uv_flush_tlb_others(cpumask_t *cpumaskp, struct mm_struct *mm,
331 * no off_node flushing; return status for local node 343 * no off_node flushing; return status for local node
332 */ 344 */
333 if (locals) 345 if (locals)
334 return 0; 346 return flush_mask;
335 else 347 else
336 return 1; 348 return NULL;
337 } 349 }
338 __get_cpu_var(ptcstats).requestor++; 350 __get_cpu_var(ptcstats).requestor++;
339 __get_cpu_var(ptcstats).ntargeted += i; 351 __get_cpu_var(ptcstats).ntargeted += i;
340 352
341 bau_desc->payload.address = va; 353 bau_desc->payload.address = va;
342 bau_desc->payload.sending_cpu = smp_processor_id(); 354 bau_desc->payload.sending_cpu = cpu;
343 355
344 return uv_flush_send_and_wait(cpu, this_blade, bau_desc, cpumaskp); 356 return uv_flush_send_and_wait(uv_cpu, this_blade, bau_desc, flush_mask);
345} 357}
346 358
347/* 359/*
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 7932338d7cb3..0d032d2d8a18 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -54,12 +54,11 @@
54#include <asm/desc.h> 54#include <asm/desc.h>
55#include <asm/i387.h> 55#include <asm/i387.h>
56 56
57#include <mach_traps.h> 57#include <asm/mach_traps.h>
58 58
59#ifdef CONFIG_X86_64 59#ifdef CONFIG_X86_64
60#include <asm/pgalloc.h> 60#include <asm/pgalloc.h>
61#include <asm/proto.h> 61#include <asm/proto.h>
62#include <asm/pda.h>
63#else 62#else
64#include <asm/processor-flags.h> 63#include <asm/processor-flags.h>
65#include <asm/arch_hooks.h> 64#include <asm/arch_hooks.h>
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 599e58168631..83d53ce5d4c4 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -773,7 +773,7 @@ __cpuinit int unsynchronized_tsc(void)
773 if (!cpu_has_tsc || tsc_unstable) 773 if (!cpu_has_tsc || tsc_unstable)
774 return 1; 774 return 1;
775 775
776#ifdef CONFIG_X86_SMP 776#ifdef CONFIG_SMP
777 if (apic_is_clustered_box()) 777 if (apic_is_clustered_box())
778 return 1; 778 return 1;
779#endif 779#endif
diff --git a/arch/x86/kernel/visws_quirks.c b/arch/x86/kernel/visws_quirks.c
index d801d06af068..4fd646e6dd43 100644
--- a/arch/x86/kernel/visws_quirks.c
+++ b/arch/x86/kernel/visws_quirks.c
@@ -32,9 +32,9 @@
32#include <asm/e820.h> 32#include <asm/e820.h>
33#include <asm/io.h> 33#include <asm/io.h>
34 34
35#include <mach_ipi.h> 35#include <asm/genapic.h>
36 36
37#include "mach_apic.h" 37#include <asm/genapic.h>
38 38
39#include <linux/kernel_stat.h> 39#include <linux/kernel_stat.h>
40 40
@@ -200,7 +200,7 @@ static void __init MP_processor_info(struct mpc_cpu *m)
200 return; 200 return;
201 } 201 }
202 202
203 apic_cpus = apicid_to_cpu_present(m->apicid); 203 apic_cpus = apic->apicid_to_cpu_present(m->apicid);
204 physids_or(phys_cpu_present_map, phys_cpu_present_map, apic_cpus); 204 physids_or(phys_cpu_present_map, phys_cpu_present_map, apic_cpus);
205 /* 205 /*
206 * Validate version 206 * Validate version
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
index bef58b4982db..f052c84ecbe4 100644
--- a/arch/x86/kernel/vmi_32.c
+++ b/arch/x86/kernel/vmi_32.c
@@ -680,10 +680,11 @@ static inline int __init activate_vmi(void)
680 para_fill(pv_mmu_ops.write_cr2, SetCR2); 680 para_fill(pv_mmu_ops.write_cr2, SetCR2);
681 para_fill(pv_mmu_ops.write_cr3, SetCR3); 681 para_fill(pv_mmu_ops.write_cr3, SetCR3);
682 para_fill(pv_cpu_ops.write_cr4, SetCR4); 682 para_fill(pv_cpu_ops.write_cr4, SetCR4);
683 para_fill(pv_irq_ops.save_fl, GetInterruptMask); 683
684 para_fill(pv_irq_ops.restore_fl, SetInterruptMask); 684 para_fill(pv_irq_ops.save_fl.func, GetInterruptMask);
685 para_fill(pv_irq_ops.irq_disable, DisableInterrupts); 685 para_fill(pv_irq_ops.restore_fl.func, SetInterruptMask);
686 para_fill(pv_irq_ops.irq_enable, EnableInterrupts); 686 para_fill(pv_irq_ops.irq_disable.func, DisableInterrupts);
687 para_fill(pv_irq_ops.irq_enable.func, EnableInterrupts);
687 688
688 para_fill(pv_cpu_ops.wbinvd, WBINVD); 689 para_fill(pv_cpu_ops.wbinvd, WBINVD);
689 para_fill(pv_cpu_ops.read_tsc, RDTSC); 690 para_fill(pv_cpu_ops.read_tsc, RDTSC);
diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c
index c4c1f9e09402..a4791ef412d1 100644
--- a/arch/x86/kernel/vmiclock_32.c
+++ b/arch/x86/kernel/vmiclock_32.c
@@ -256,7 +256,7 @@ void __devinit vmi_time_bsp_init(void)
256 */ 256 */
257 clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL); 257 clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
258 local_irq_disable(); 258 local_irq_disable();
259#ifdef CONFIG_X86_SMP 259#ifdef CONFIG_SMP
260 /* 260 /*
261 * XXX handle_percpu_irq only defined for SMP; we need to switch over 261 * XXX handle_percpu_irq only defined for SMP; we need to switch over
262 * to using it, since this is a local interrupt, which each CPU must 262 * to using it, since this is a local interrupt, which each CPU must
diff --git a/arch/x86/kernel/vmlinux_32.lds.S b/arch/x86/kernel/vmlinux_32.lds.S
index 82c67559dde7..3eba7f7bac05 100644
--- a/arch/x86/kernel/vmlinux_32.lds.S
+++ b/arch/x86/kernel/vmlinux_32.lds.S
@@ -178,14 +178,7 @@ SECTIONS
178 __initramfs_end = .; 178 __initramfs_end = .;
179 } 179 }
180#endif 180#endif
181 . = ALIGN(PAGE_SIZE); 181 PERCPU(PAGE_SIZE)
182 .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) {
183 __per_cpu_start = .;
184 *(.data.percpu.page_aligned)
185 *(.data.percpu)
186 *(.data.percpu.shared_aligned)
187 __per_cpu_end = .;
188 }
189 . = ALIGN(PAGE_SIZE); 182 . = ALIGN(PAGE_SIZE);
190 /* freed after init ends here */ 183 /* freed after init ends here */
191 184
diff --git a/arch/x86/kernel/vmlinux_64.lds.S b/arch/x86/kernel/vmlinux_64.lds.S
index 1a614c0e6bef..07f62d287ff0 100644
--- a/arch/x86/kernel/vmlinux_64.lds.S
+++ b/arch/x86/kernel/vmlinux_64.lds.S
@@ -5,6 +5,7 @@
5#define LOAD_OFFSET __START_KERNEL_map 5#define LOAD_OFFSET __START_KERNEL_map
6 6
7#include <asm-generic/vmlinux.lds.h> 7#include <asm-generic/vmlinux.lds.h>
8#include <asm/asm-offsets.h>
8#include <asm/page.h> 9#include <asm/page.h>
9 10
10#undef i386 /* in case the preprocessor is a 32bit one */ 11#undef i386 /* in case the preprocessor is a 32bit one */
@@ -13,12 +14,15 @@ OUTPUT_FORMAT("elf64-x86-64", "elf64-x86-64", "elf64-x86-64")
13OUTPUT_ARCH(i386:x86-64) 14OUTPUT_ARCH(i386:x86-64)
14ENTRY(phys_startup_64) 15ENTRY(phys_startup_64)
15jiffies_64 = jiffies; 16jiffies_64 = jiffies;
16_proxy_pda = 1;
17PHDRS { 17PHDRS {
18 text PT_LOAD FLAGS(5); /* R_E */ 18 text PT_LOAD FLAGS(5); /* R_E */
19 data PT_LOAD FLAGS(7); /* RWE */ 19 data PT_LOAD FLAGS(7); /* RWE */
20 user PT_LOAD FLAGS(7); /* RWE */ 20 user PT_LOAD FLAGS(7); /* RWE */
21 data.init PT_LOAD FLAGS(7); /* RWE */ 21 data.init PT_LOAD FLAGS(7); /* RWE */
22#ifdef CONFIG_SMP
23 percpu PT_LOAD FLAGS(7); /* RWE */
24#endif
25 data.init2 PT_LOAD FLAGS(7); /* RWE */
22 note PT_NOTE FLAGS(0); /* ___ */ 26 note PT_NOTE FLAGS(0); /* ___ */
23} 27}
24SECTIONS 28SECTIONS
@@ -208,14 +212,28 @@ SECTIONS
208 __initramfs_end = .; 212 __initramfs_end = .;
209#endif 213#endif
210 214
215#ifdef CONFIG_SMP
216 /*
217 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
218 * output PHDR, so the next output section - __data_nosave - should
219 * start another section data.init2. Also, pda should be at the head of
220 * percpu area. Preallocate it and define the percpu offset symbol
221 * so that it can be accessed as a percpu variable.
222 */
223 . = ALIGN(PAGE_SIZE);
224 PERCPU_VADDR(0, :percpu)
225#else
211 PERCPU(PAGE_SIZE) 226 PERCPU(PAGE_SIZE)
227#endif
212 228
213 . = ALIGN(PAGE_SIZE); 229 . = ALIGN(PAGE_SIZE);
214 __init_end = .; 230 __init_end = .;
215 231
216 . = ALIGN(PAGE_SIZE); 232 . = ALIGN(PAGE_SIZE);
217 __nosave_begin = .; 233 __nosave_begin = .;
218 .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { *(.data.nosave) } 234 .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
235 *(.data.nosave)
236 } :data.init2 /* use another section data.init2, see PERCPU_VADDR() above */
219 . = ALIGN(PAGE_SIZE); 237 . = ALIGN(PAGE_SIZE);
220 __nosave_end = .; 238 __nosave_end = .;
221 239
@@ -244,3 +262,8 @@ SECTIONS
244 */ 262 */
245ASSERT((_end - _text <= KERNEL_IMAGE_SIZE), 263ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
246 "kernel image bigger than KERNEL_IMAGE_SIZE") 264 "kernel image bigger than KERNEL_IMAGE_SIZE")
265
266#ifdef CONFIG_SMP
267ASSERT((per_cpu__irq_stack_union == 0),
268 "irq_stack_union is not at start of per-cpu area");
269#endif
diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c
index a688f3bfaec2..c609205df594 100644
--- a/arch/x86/kernel/vsmp_64.c
+++ b/arch/x86/kernel/vsmp_64.c
@@ -37,6 +37,7 @@ static unsigned long vsmp_save_fl(void)
37 flags &= ~X86_EFLAGS_IF; 37 flags &= ~X86_EFLAGS_IF;
38 return flags; 38 return flags;
39} 39}
40PV_CALLEE_SAVE_REGS_THUNK(vsmp_save_fl);
40 41
41static void vsmp_restore_fl(unsigned long flags) 42static void vsmp_restore_fl(unsigned long flags)
42{ 43{
@@ -46,6 +47,7 @@ static void vsmp_restore_fl(unsigned long flags)
46 flags |= X86_EFLAGS_AC; 47 flags |= X86_EFLAGS_AC;
47 native_restore_fl(flags); 48 native_restore_fl(flags);
48} 49}
50PV_CALLEE_SAVE_REGS_THUNK(vsmp_restore_fl);
49 51
50static void vsmp_irq_disable(void) 52static void vsmp_irq_disable(void)
51{ 53{
@@ -53,6 +55,7 @@ static void vsmp_irq_disable(void)
53 55
54 native_restore_fl((flags & ~X86_EFLAGS_IF) | X86_EFLAGS_AC); 56 native_restore_fl((flags & ~X86_EFLAGS_IF) | X86_EFLAGS_AC);
55} 57}
58PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_disable);
56 59
57static void vsmp_irq_enable(void) 60static void vsmp_irq_enable(void)
58{ 61{
@@ -60,6 +63,7 @@ static void vsmp_irq_enable(void)
60 63
61 native_restore_fl((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC)); 64 native_restore_fl((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC));
62} 65}
66PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_enable);
63 67
64static unsigned __init_or_module vsmp_patch(u8 type, u16 clobbers, void *ibuf, 68static unsigned __init_or_module vsmp_patch(u8 type, u16 clobbers, void *ibuf,
65 unsigned long addr, unsigned len) 69 unsigned long addr, unsigned len)
@@ -90,10 +94,10 @@ static void __init set_vsmp_pv_ops(void)
90 cap, ctl); 94 cap, ctl);
91 if (cap & ctl & (1 << 4)) { 95 if (cap & ctl & (1 << 4)) {
92 /* Setup irq ops and turn on vSMP IRQ fastpath handling */ 96 /* Setup irq ops and turn on vSMP IRQ fastpath handling */
93 pv_irq_ops.irq_disable = vsmp_irq_disable; 97 pv_irq_ops.irq_disable = PV_CALLEE_SAVE(vsmp_irq_disable);
94 pv_irq_ops.irq_enable = vsmp_irq_enable; 98 pv_irq_ops.irq_enable = PV_CALLEE_SAVE(vsmp_irq_enable);
95 pv_irq_ops.save_fl = vsmp_save_fl; 99 pv_irq_ops.save_fl = PV_CALLEE_SAVE(vsmp_save_fl);
96 pv_irq_ops.restore_fl = vsmp_restore_fl; 100 pv_irq_ops.restore_fl = PV_CALLEE_SAVE(vsmp_restore_fl);
97 pv_init_ops.patch = vsmp_patch; 101 pv_init_ops.patch = vsmp_patch;
98 102
99 ctl &= ~(1 << 4); 103 ctl &= ~(1 << 4);
diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
index 695e426aa354..3909e3ba5ce3 100644
--- a/arch/x86/kernel/x8664_ksyms_64.c
+++ b/arch/x86/kernel/x8664_ksyms_64.c
@@ -58,5 +58,3 @@ EXPORT_SYMBOL(__memcpy);
58EXPORT_SYMBOL(empty_zero_page); 58EXPORT_SYMBOL(empty_zero_page);
59EXPORT_SYMBOL(init_level4_pgt); 59EXPORT_SYMBOL(init_level4_pgt);
60EXPORT_SYMBOL(load_gs_index); 60EXPORT_SYMBOL(load_gs_index);
61
62EXPORT_SYMBOL(_proxy_pda);
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index 92f1c6f3e19d..19e33b6cd593 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -173,24 +173,29 @@ static unsigned long save_fl(void)
173{ 173{
174 return lguest_data.irq_enabled; 174 return lguest_data.irq_enabled;
175} 175}
176PV_CALLEE_SAVE_REGS_THUNK(save_fl);
176 177
177/* restore_flags() just sets the flags back to the value given. */ 178/* restore_flags() just sets the flags back to the value given. */
178static void restore_fl(unsigned long flags) 179static void restore_fl(unsigned long flags)
179{ 180{
180 lguest_data.irq_enabled = flags; 181 lguest_data.irq_enabled = flags;
181} 182}
183PV_CALLEE_SAVE_REGS_THUNK(restore_fl);
182 184
183/* Interrupts go off... */ 185/* Interrupts go off... */
184static void irq_disable(void) 186static void irq_disable(void)
185{ 187{
186 lguest_data.irq_enabled = 0; 188 lguest_data.irq_enabled = 0;
187} 189}
190PV_CALLEE_SAVE_REGS_THUNK(irq_disable);
188 191
189/* Interrupts go on... */ 192/* Interrupts go on... */
190static void irq_enable(void) 193static void irq_enable(void)
191{ 194{
192 lguest_data.irq_enabled = X86_EFLAGS_IF; 195 lguest_data.irq_enabled = X86_EFLAGS_IF;
193} 196}
197PV_CALLEE_SAVE_REGS_THUNK(irq_enable);
198
194/*:*/ 199/*:*/
195/*M:003 Note that we don't check for outstanding interrupts when we re-enable 200/*M:003 Note that we don't check for outstanding interrupts when we re-enable
196 * them (or when we unmask an interrupt). This seems to work for the moment, 201 * them (or when we unmask an interrupt). This seems to work for the moment,
@@ -984,10 +989,10 @@ __init void lguest_init(void)
984 989
985 /* interrupt-related operations */ 990 /* interrupt-related operations */
986 pv_irq_ops.init_IRQ = lguest_init_IRQ; 991 pv_irq_ops.init_IRQ = lguest_init_IRQ;
987 pv_irq_ops.save_fl = save_fl; 992 pv_irq_ops.save_fl = PV_CALLEE_SAVE(save_fl);
988 pv_irq_ops.restore_fl = restore_fl; 993 pv_irq_ops.restore_fl = PV_CALLEE_SAVE(restore_fl);
989 pv_irq_ops.irq_disable = irq_disable; 994 pv_irq_ops.irq_disable = PV_CALLEE_SAVE(irq_disable);
990 pv_irq_ops.irq_enable = irq_enable; 995 pv_irq_ops.irq_enable = PV_CALLEE_SAVE(irq_enable);
991 pv_irq_ops.safe_halt = lguest_safe_halt; 996 pv_irq_ops.safe_halt = lguest_safe_halt;
992 997
993 /* init-time operations */ 998 /* init-time operations */
diff --git a/arch/x86/mach-default/Makefile b/arch/x86/mach-default/Makefile
deleted file mode 100644
index 012fe34459e6..000000000000
--- a/arch/x86/mach-default/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
1#
2# Makefile for the linux kernel.
3#
4
5obj-y := setup.o
diff --git a/arch/x86/mach-default/setup.c b/arch/x86/mach-default/setup.c
deleted file mode 100644
index a265a7c63190..000000000000
--- a/arch/x86/mach-default/setup.c
+++ /dev/null
@@ -1,174 +0,0 @@
1/*
2 * Machine specific setup for generic
3 */
4
5#include <linux/smp.h>
6#include <linux/init.h>
7#include <linux/interrupt.h>
8#include <asm/acpi.h>
9#include <asm/arch_hooks.h>
10#include <asm/e820.h>
11#include <asm/setup.h>
12
13#include <mach_ipi.h>
14
15#ifdef CONFIG_HOTPLUG_CPU
16#define DEFAULT_SEND_IPI (1)
17#else
18#define DEFAULT_SEND_IPI (0)
19#endif
20
21int no_broadcast = DEFAULT_SEND_IPI;
22
23/**
24 * pre_intr_init_hook - initialisation prior to setting up interrupt vectors
25 *
26 * Description:
27 * Perform any necessary interrupt initialisation prior to setting up
28 * the "ordinary" interrupt call gates. For legacy reasons, the ISA
29 * interrupts should be initialised here if the machine emulates a PC
30 * in any way.
31 **/
32void __init pre_intr_init_hook(void)
33{
34 if (x86_quirks->arch_pre_intr_init) {
35 if (x86_quirks->arch_pre_intr_init())
36 return;
37 }
38 init_ISA_irqs();
39}
40
41/*
42 * IRQ2 is cascade interrupt to second interrupt controller
43 */
44static struct irqaction irq2 = {
45 .handler = no_action,
46 .mask = CPU_MASK_NONE,
47 .name = "cascade",
48};
49
50/**
51 * intr_init_hook - post gate setup interrupt initialisation
52 *
53 * Description:
54 * Fill in any interrupts that may have been left out by the general
55 * init_IRQ() routine. interrupts having to do with the machine rather
56 * than the devices on the I/O bus (like APIC interrupts in intel MP
57 * systems) are started here.
58 **/
59void __init intr_init_hook(void)
60{
61 if (x86_quirks->arch_intr_init) {
62 if (x86_quirks->arch_intr_init())
63 return;
64 }
65 if (!acpi_ioapic)
66 setup_irq(2, &irq2);
67
68}
69
70/**
71 * pre_setup_arch_hook - hook called prior to any setup_arch() execution
72 *
73 * Description:
74 * generally used to activate any machine specific identification
75 * routines that may be needed before setup_arch() runs. On Voyager
76 * this is used to get the board revision and type.
77 **/
78void __init pre_setup_arch_hook(void)
79{
80}
81
82/**
83 * trap_init_hook - initialise system specific traps
84 *
85 * Description:
86 * Called as the final act of trap_init(). Used in VISWS to initialise
87 * the various board specific APIC traps.
88 **/
89void __init trap_init_hook(void)
90{
91 if (x86_quirks->arch_trap_init) {
92 if (x86_quirks->arch_trap_init())
93 return;
94 }
95}
96
97static struct irqaction irq0 = {
98 .handler = timer_interrupt,
99 .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_IRQPOLL,
100 .mask = CPU_MASK_NONE,
101 .name = "timer"
102};
103
104/**
105 * pre_time_init_hook - do any specific initialisations before.
106 *
107 **/
108void __init pre_time_init_hook(void)
109{
110 if (x86_quirks->arch_pre_time_init)
111 x86_quirks->arch_pre_time_init();
112}
113
114/**
115 * time_init_hook - do any specific initialisations for the system timer.
116 *
117 * Description:
118 * Must plug the system timer interrupt source at HZ into the IRQ listed
119 * in irq_vectors.h:TIMER_IRQ
120 **/
121void __init time_init_hook(void)
122{
123 if (x86_quirks->arch_time_init) {
124 /*
125 * A nonzero return code does not mean failure, it means
126 * that the architecture quirk does not want any
127 * generic (timer) setup to be performed after this:
128 */
129 if (x86_quirks->arch_time_init())
130 return;
131 }
132
133 irq0.mask = cpumask_of_cpu(0);
134 setup_irq(0, &irq0);
135}
136
137#ifdef CONFIG_MCA
138/**
139 * mca_nmi_hook - hook into MCA specific NMI chain
140 *
141 * Description:
142 * The MCA (Microchannel Architecture) has an NMI chain for NMI sources
143 * along the MCA bus. Use this to hook into that chain if you will need
144 * it.
145 **/
146void mca_nmi_hook(void)
147{
148 /*
149 * If I recall correctly, there's a whole bunch of other things that
150 * we can do to check for NMI problems, but that's all I know about
151 * at the moment.
152 */
153 pr_warning("NMI generated from unknown source!\n");
154}
155#endif
156
157static __init int no_ipi_broadcast(char *str)
158{
159 get_option(&str, &no_broadcast);
160 pr_info("Using %s mode\n",
161 no_broadcast ? "No IPI Broadcast" : "IPI Broadcast");
162 return 1;
163}
164__setup("no_ipi_broadcast=", no_ipi_broadcast);
165
166static int __init print_ipi_mode(void)
167{
168 pr_info("Using IPI %s mode\n",
169 no_broadcast ? "No-Shortcut" : "Shortcut");
170 return 0;
171}
172
173late_initcall(print_ipi_mode);
174
diff --git a/arch/x86/mach-generic/Makefile b/arch/x86/mach-generic/Makefile
deleted file mode 100644
index 6730f4e7c744..000000000000
--- a/arch/x86/mach-generic/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
1#
2# Makefile for the generic architecture
3#
4
5EXTRA_CFLAGS := -Iarch/x86/kernel
6
7obj-y := probe.o default.o
8obj-$(CONFIG_X86_NUMAQ) += numaq.o
9obj-$(CONFIG_X86_SUMMIT) += summit.o
10obj-$(CONFIG_X86_BIGSMP) += bigsmp.o
11obj-$(CONFIG_X86_ES7000) += es7000.o
diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c
deleted file mode 100644
index bc4c7840b2a8..000000000000
--- a/arch/x86/mach-generic/bigsmp.c
+++ /dev/null
@@ -1,60 +0,0 @@
1/*
2 * APIC driver for "bigsmp" XAPIC machines with more than 8 virtual CPUs.
3 * Drives the local APIC in "clustered mode".
4 */
5#define APIC_DEFINITION 1
6#include <linux/threads.h>
7#include <linux/cpumask.h>
8#include <asm/mpspec.h>
9#include <asm/genapic.h>
10#include <asm/fixmap.h>
11#include <asm/apicdef.h>
12#include <linux/kernel.h>
13#include <linux/init.h>
14#include <linux/dmi.h>
15#include <asm/bigsmp/apicdef.h>
16#include <linux/smp.h>
17#include <asm/bigsmp/apic.h>
18#include <asm/bigsmp/ipi.h>
19#include <asm/mach-default/mach_mpparse.h>
20#include <asm/mach-default/mach_wakecpu.h>
21
22static int dmi_bigsmp; /* can be set by dmi scanners */
23
24static int hp_ht_bigsmp(const struct dmi_system_id *d)
25{
26 printk(KERN_NOTICE "%s detected: force use of apic=bigsmp\n", d->ident);
27 dmi_bigsmp = 1;
28 return 0;
29}
30
31
32static const struct dmi_system_id bigsmp_dmi_table[] = {
33 { hp_ht_bigsmp, "HP ProLiant DL760 G2",
34 { DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
35 DMI_MATCH(DMI_BIOS_VERSION, "P44-"),}
36 },
37
38 { hp_ht_bigsmp, "HP ProLiant DL740",
39 { DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
40 DMI_MATCH(DMI_BIOS_VERSION, "P47-"),}
41 },
42 { }
43};
44
45static void vector_allocation_domain(int cpu, cpumask_t *retmask)
46{
47 cpus_clear(*retmask);
48 cpu_set(cpu, *retmask);
49}
50
51static int probe_bigsmp(void)
52{
53 if (def_to_bigsmp)
54 dmi_bigsmp = 1;
55 else
56 dmi_check_system(bigsmp_dmi_table);
57 return dmi_bigsmp;
58}
59
60struct genapic apic_bigsmp = APIC_INIT("bigsmp", probe_bigsmp);
diff --git a/arch/x86/mach-generic/default.c b/arch/x86/mach-generic/default.c
deleted file mode 100644
index e63a4a76d8cd..000000000000
--- a/arch/x86/mach-generic/default.c
+++ /dev/null
@@ -1,27 +0,0 @@
1/*
2 * Default generic APIC driver. This handles up to 8 CPUs.
3 */
4#define APIC_DEFINITION 1
5#include <linux/threads.h>
6#include <linux/cpumask.h>
7#include <asm/mpspec.h>
8#include <asm/mach-default/mach_apicdef.h>
9#include <asm/genapic.h>
10#include <asm/fixmap.h>
11#include <asm/apicdef.h>
12#include <linux/kernel.h>
13#include <linux/string.h>
14#include <linux/smp.h>
15#include <linux/init.h>
16#include <asm/mach-default/mach_apic.h>
17#include <asm/mach-default/mach_ipi.h>
18#include <asm/mach-default/mach_mpparse.h>
19#include <asm/mach-default/mach_wakecpu.h>
20
21/* should be called last. */
22static int probe_default(void)
23{
24 return 1;
25}
26
27struct genapic apic_default = APIC_INIT("default", probe_default);
diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c
deleted file mode 100644
index c2ded1448024..000000000000
--- a/arch/x86/mach-generic/es7000.c
+++ /dev/null
@@ -1,103 +0,0 @@
1/*
2 * APIC driver for the Unisys ES7000 chipset.
3 */
4#define APIC_DEFINITION 1
5#include <linux/threads.h>
6#include <linux/cpumask.h>
7#include <asm/mpspec.h>
8#include <asm/genapic.h>
9#include <asm/fixmap.h>
10#include <asm/apicdef.h>
11#include <linux/kernel.h>
12#include <linux/string.h>
13#include <linux/init.h>
14#include <asm/es7000/apicdef.h>
15#include <linux/smp.h>
16#include <asm/es7000/apic.h>
17#include <asm/es7000/ipi.h>
18#include <asm/es7000/mpparse.h>
19#include <asm/mach-default/mach_wakecpu.h>
20
21void __init es7000_update_genapic_to_cluster(void)
22{
23 genapic->target_cpus = target_cpus_cluster;
24 genapic->int_delivery_mode = INT_DELIVERY_MODE_CLUSTER;
25 genapic->int_dest_mode = INT_DEST_MODE_CLUSTER;
26 genapic->no_balance_irq = NO_BALANCE_IRQ_CLUSTER;
27
28 genapic->init_apic_ldr = init_apic_ldr_cluster;
29
30 genapic->cpu_mask_to_apicid = cpu_mask_to_apicid_cluster;
31}
32
33static int probe_es7000(void)
34{
35 /* probed later in mptable/ACPI hooks */
36 return 0;
37}
38
39extern void es7000_sw_apic(void);
40static void __init enable_apic_mode(void)
41{
42 es7000_sw_apic();
43 return;
44}
45
46static __init int
47mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
48{
49 if (mpc->oemptr) {
50 struct mpc_oemtable *oem_table =
51 (struct mpc_oemtable *)mpc->oemptr;
52 if (!strncmp(oem, "UNISYS", 6))
53 return parse_unisys_oem((char *)oem_table);
54 }
55 return 0;
56}
57
58#ifdef CONFIG_ACPI
59/* Hook from generic ACPI tables.c */
60static int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
61{
62 unsigned long oem_addr = 0;
63 int check_dsdt;
64 int ret = 0;
65
66 /* check dsdt at first to avoid clear fix_map for oem_addr */
67 check_dsdt = es7000_check_dsdt();
68
69 if (!find_unisys_acpi_oem_table(&oem_addr)) {
70 if (check_dsdt)
71 ret = parse_unisys_oem((char *)oem_addr);
72 else {
73 setup_unisys();
74 ret = 1;
75 }
76 /*
77 * we need to unmap it
78 */
79 unmap_unisys_acpi_oem_table(oem_addr);
80 }
81 return ret;
82}
83#else
84static int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
85{
86 return 0;
87}
88#endif
89
90static void vector_allocation_domain(int cpu, cpumask_t *retmask)
91{
92 /* Careful. Some cpus do not strictly honor the set of cpus
93 * specified in the interrupt destination when using lowest
94 * priority interrupt delivery mode.
95 *
96 * In particular there was a hyperthreading cpu observed to
97 * deliver interrupts to the wrong hyperthread when only one
98 * hyperthread was specified in the interrupt desitination.
99 */
100 *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
101}
102
103struct genapic __initdata_refok apic_es7000 = APIC_INIT("es7000", probe_es7000);
diff --git a/arch/x86/mach-generic/numaq.c b/arch/x86/mach-generic/numaq.c
deleted file mode 100644
index 3679e2255645..000000000000
--- a/arch/x86/mach-generic/numaq.c
+++ /dev/null
@@ -1,53 +0,0 @@
1/*
2 * APIC driver for the IBM NUMAQ chipset.
3 */
4#define APIC_DEFINITION 1
5#include <linux/threads.h>
6#include <linux/cpumask.h>
7#include <asm/mpspec.h>
8#include <asm/genapic.h>
9#include <asm/fixmap.h>
10#include <asm/apicdef.h>
11#include <linux/kernel.h>
12#include <linux/string.h>
13#include <linux/init.h>
14#include <asm/numaq/apicdef.h>
15#include <linux/smp.h>
16#include <asm/numaq/apic.h>
17#include <asm/numaq/ipi.h>
18#include <asm/numaq/mpparse.h>
19#include <asm/numaq/wakecpu.h>
20#include <asm/numaq.h>
21
22static int mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
23{
24 numaq_mps_oem_check(mpc, oem, productid);
25 return found_numaq;
26}
27
28static int probe_numaq(void)
29{
30 /* already know from get_memcfg_numaq() */
31 return found_numaq;
32}
33
34/* Hook from generic ACPI tables.c */
35static int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
36{
37 return 0;
38}
39
40static void vector_allocation_domain(int cpu, cpumask_t *retmask)
41{
42 /* Careful. Some cpus do not strictly honor the set of cpus
43 * specified in the interrupt destination when using lowest
44 * priority interrupt delivery mode.
45 *
46 * In particular there was a hyperthreading cpu observed to
47 * deliver interrupts to the wrong hyperthread when only one
48 * hyperthread was specified in the interrupt desitination.
49 */
50 *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
51}
52
53struct genapic apic_numaq = APIC_INIT("NUMAQ", probe_numaq);
diff --git a/arch/x86/mach-generic/probe.c b/arch/x86/mach-generic/probe.c
deleted file mode 100644
index 15a38daef1a8..000000000000
--- a/arch/x86/mach-generic/probe.c
+++ /dev/null
@@ -1,152 +0,0 @@
1/*
2 * Copyright 2003 Andi Kleen, SuSE Labs.
3 * Subject to the GNU Public License, v.2
4 *
5 * Generic x86 APIC driver probe layer.
6 */
7#include <linux/threads.h>
8#include <linux/cpumask.h>
9#include <linux/string.h>
10#include <linux/kernel.h>
11#include <linux/ctype.h>
12#include <linux/init.h>
13#include <linux/errno.h>
14#include <asm/fixmap.h>
15#include <asm/mpspec.h>
16#include <asm/apicdef.h>
17#include <asm/genapic.h>
18#include <asm/setup.h>
19
20extern struct genapic apic_numaq;
21extern struct genapic apic_summit;
22extern struct genapic apic_bigsmp;
23extern struct genapic apic_es7000;
24extern struct genapic apic_default;
25
26struct genapic *genapic = &apic_default;
27
28static struct genapic *apic_probe[] __initdata = {
29#ifdef CONFIG_X86_NUMAQ
30 &apic_numaq,
31#endif
32#ifdef CONFIG_X86_SUMMIT
33 &apic_summit,
34#endif
35#ifdef CONFIG_X86_BIGSMP
36 &apic_bigsmp,
37#endif
38#ifdef CONFIG_X86_ES7000
39 &apic_es7000,
40#endif
41 &apic_default, /* must be last */
42 NULL,
43};
44
45static int cmdline_apic __initdata;
46static int __init parse_apic(char *arg)
47{
48 int i;
49
50 if (!arg)
51 return -EINVAL;
52
53 for (i = 0; apic_probe[i]; i++) {
54 if (!strcmp(apic_probe[i]->name, arg)) {
55 genapic = apic_probe[i];
56 cmdline_apic = 1;
57 return 0;
58 }
59 }
60
61 if (x86_quirks->update_genapic)
62 x86_quirks->update_genapic();
63
64 /* Parsed again by __setup for debug/verbose */
65 return 0;
66}
67early_param("apic", parse_apic);
68
69void __init generic_bigsmp_probe(void)
70{
71#ifdef CONFIG_X86_BIGSMP
72 /*
73 * This routine is used to switch to bigsmp mode when
74 * - There is no apic= option specified by the user
75 * - generic_apic_probe() has chosen apic_default as the sub_arch
76 * - we find more than 8 CPUs in acpi LAPIC listing with xAPIC support
77 */
78
79 if (!cmdline_apic && genapic == &apic_default) {
80 if (apic_bigsmp.probe()) {
81 genapic = &apic_bigsmp;
82 if (x86_quirks->update_genapic)
83 x86_quirks->update_genapic();
84 printk(KERN_INFO "Overriding APIC driver with %s\n",
85 genapic->name);
86 }
87 }
88#endif
89}
90
91void __init generic_apic_probe(void)
92{
93 if (!cmdline_apic) {
94 int i;
95 for (i = 0; apic_probe[i]; i++) {
96 if (apic_probe[i]->probe()) {
97 genapic = apic_probe[i];
98 break;
99 }
100 }
101 /* Not visible without early console */
102 if (!apic_probe[i])
103 panic("Didn't find an APIC driver");
104
105 if (x86_quirks->update_genapic)
106 x86_quirks->update_genapic();
107 }
108 printk(KERN_INFO "Using APIC driver %s\n", genapic->name);
109}
110
111/* These functions can switch the APIC even after the initial ->probe() */
112
113int __init mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
114{
115 int i;
116 for (i = 0; apic_probe[i]; ++i) {
117 if (apic_probe[i]->mps_oem_check(mpc, oem, productid)) {
118 if (!cmdline_apic) {
119 genapic = apic_probe[i];
120 if (x86_quirks->update_genapic)
121 x86_quirks->update_genapic();
122 printk(KERN_INFO "Switched to APIC driver `%s'.\n",
123 genapic->name);
124 }
125 return 1;
126 }
127 }
128 return 0;
129}
130
131int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
132{
133 int i;
134 for (i = 0; apic_probe[i]; ++i) {
135 if (apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id)) {
136 if (!cmdline_apic) {
137 genapic = apic_probe[i];
138 if (x86_quirks->update_genapic)
139 x86_quirks->update_genapic();
140 printk(KERN_INFO "Switched to APIC driver `%s'.\n",
141 genapic->name);
142 }
143 return 1;
144 }
145 }
146 return 0;
147}
148
149int hard_smp_processor_id(void)
150{
151 return genapic->get_apic_id(*(unsigned long *)(APIC_BASE+APIC_ID));
152}
diff --git a/arch/x86/mach-generic/summit.c b/arch/x86/mach-generic/summit.c
deleted file mode 100644
index 2821ffc188b5..000000000000
--- a/arch/x86/mach-generic/summit.c
+++ /dev/null
@@ -1,40 +0,0 @@
1/*
2 * APIC driver for the IBM "Summit" chipset.
3 */
4#define APIC_DEFINITION 1
5#include <linux/threads.h>
6#include <linux/cpumask.h>
7#include <asm/mpspec.h>
8#include <asm/genapic.h>
9#include <asm/fixmap.h>
10#include <asm/apicdef.h>
11#include <linux/kernel.h>
12#include <linux/string.h>
13#include <linux/init.h>
14#include <asm/summit/apicdef.h>
15#include <linux/smp.h>
16#include <asm/summit/apic.h>
17#include <asm/summit/ipi.h>
18#include <asm/summit/mpparse.h>
19#include <asm/mach-default/mach_wakecpu.h>
20
21static int probe_summit(void)
22{
23 /* probed later in mptable/ACPI hooks */
24 return 0;
25}
26
27static void vector_allocation_domain(int cpu, cpumask_t *retmask)
28{
29 /* Careful. Some cpus do not strictly honor the set of cpus
30 * specified in the interrupt destination when using lowest
31 * priority interrupt delivery mode.
32 *
33 * In particular there was a hyperthreading cpu observed to
34 * deliver interrupts to the wrong hyperthread when only one
35 * hyperthread was specified in the interrupt desitination.
36 */
37 *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
38}
39
40struct genapic apic_summit = APIC_INIT("summit", probe_summit);
diff --git a/arch/x86/mach-rdc321x/Makefile b/arch/x86/mach-rdc321x/Makefile
deleted file mode 100644
index 8325b4ca431c..000000000000
--- a/arch/x86/mach-rdc321x/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
1#
2# Makefile for the RDC321x specific parts of the kernel
3#
4obj-$(CONFIG_X86_RDC321X) := gpio.o platform.o
5
diff --git a/arch/x86/mach-rdc321x/gpio.c b/arch/x86/mach-rdc321x/gpio.c
deleted file mode 100644
index 247f33d3a407..000000000000
--- a/arch/x86/mach-rdc321x/gpio.c
+++ /dev/null
@@ -1,194 +0,0 @@
1/*
2 * GPIO support for RDC SoC R3210/R8610
3 *
4 * Copyright (C) 2007, Florian Fainelli <florian@openwrt.org>
5 * Copyright (C) 2008, Volker Weiss <dev@tintuc.de>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 *
21 */
22
23
24#include <linux/spinlock.h>
25#include <linux/io.h>
26#include <linux/types.h>
27#include <linux/module.h>
28
29#include <asm/gpio.h>
30#include <asm/mach-rdc321x/rdc321x_defs.h>
31
32
33/* spin lock to protect our private copy of GPIO data register plus
34 the access to PCI conf registers. */
35static DEFINE_SPINLOCK(gpio_lock);
36
37/* copy of GPIO data registers */
38static u32 gpio_data_reg1;
39static u32 gpio_data_reg2;
40
41static u32 gpio_request_data[2];
42
43
44static inline void rdc321x_conf_write(unsigned addr, u32 value)
45{
46 outl((1 << 31) | (7 << 11) | addr, RDC3210_CFGREG_ADDR);
47 outl(value, RDC3210_CFGREG_DATA);
48}
49
50static inline void rdc321x_conf_or(unsigned addr, u32 value)
51{
52 outl((1 << 31) | (7 << 11) | addr, RDC3210_CFGREG_ADDR);
53 value |= inl(RDC3210_CFGREG_DATA);
54 outl(value, RDC3210_CFGREG_DATA);
55}
56
57static inline u32 rdc321x_conf_read(unsigned addr)
58{
59 outl((1 << 31) | (7 << 11) | addr, RDC3210_CFGREG_ADDR);
60
61 return inl(RDC3210_CFGREG_DATA);
62}
63
64/* configure pin as GPIO */
65static void rdc321x_configure_gpio(unsigned gpio)
66{
67 unsigned long flags;
68
69 spin_lock_irqsave(&gpio_lock, flags);
70 rdc321x_conf_or(gpio < 32
71 ? RDC321X_GPIO_CTRL_REG1 : RDC321X_GPIO_CTRL_REG2,
72 1 << (gpio & 0x1f));
73 spin_unlock_irqrestore(&gpio_lock, flags);
74}
75
76/* initially setup the 2 copies of the gpio data registers.
77 This function must be called by the platform setup code. */
78void __init rdc321x_gpio_setup()
79{
80 /* this might not be, what others (BIOS, bootloader, etc.)
81 wrote to these registers before, but it's a good guess. Still
82 better than just using 0xffffffff. */
83
84 gpio_data_reg1 = rdc321x_conf_read(RDC321X_GPIO_DATA_REG1);
85 gpio_data_reg2 = rdc321x_conf_read(RDC321X_GPIO_DATA_REG2);
86}
87
88/* determine, if gpio number is valid */
89static inline int rdc321x_is_gpio(unsigned gpio)
90{
91 return gpio <= RDC321X_MAX_GPIO;
92}
93
94/* request GPIO */
95int rdc_gpio_request(unsigned gpio, const char *label)
96{
97 unsigned long flags;
98
99 if (!rdc321x_is_gpio(gpio))
100 return -EINVAL;
101
102 spin_lock_irqsave(&gpio_lock, flags);
103 if (gpio_request_data[(gpio & 0x20) ? 1 : 0] & (1 << (gpio & 0x1f)))
104 goto inuse;
105 gpio_request_data[(gpio & 0x20) ? 1 : 0] |= (1 << (gpio & 0x1f));
106 spin_unlock_irqrestore(&gpio_lock, flags);
107
108 return 0;
109inuse:
110 spin_unlock_irqrestore(&gpio_lock, flags);
111 return -EINVAL;
112}
113EXPORT_SYMBOL(rdc_gpio_request);
114
115/* release previously-claimed GPIO */
116void rdc_gpio_free(unsigned gpio)
117{
118 unsigned long flags;
119
120 if (!rdc321x_is_gpio(gpio))
121 return;
122
123 spin_lock_irqsave(&gpio_lock, flags);
124 gpio_request_data[(gpio & 0x20) ? 1 : 0] &= ~(1 << (gpio & 0x1f));
125 spin_unlock_irqrestore(&gpio_lock, flags);
126}
127EXPORT_SYMBOL(rdc_gpio_free);
128
129/* read GPIO pin */
130int rdc_gpio_get_value(unsigned gpio)
131{
132 u32 reg;
133 unsigned long flags;
134
135 spin_lock_irqsave(&gpio_lock, flags);
136 reg = rdc321x_conf_read(gpio < 32
137 ? RDC321X_GPIO_DATA_REG1 : RDC321X_GPIO_DATA_REG2);
138 spin_unlock_irqrestore(&gpio_lock, flags);
139
140 return (1 << (gpio & 0x1f)) & reg ? 1 : 0;
141}
142EXPORT_SYMBOL(rdc_gpio_get_value);
143
144/* set GPIO pin to value */
145void rdc_gpio_set_value(unsigned gpio, int value)
146{
147 unsigned long flags;
148 u32 reg;
149
150 reg = 1 << (gpio & 0x1f);
151 if (gpio < 32) {
152 spin_lock_irqsave(&gpio_lock, flags);
153 if (value)
154 gpio_data_reg1 |= reg;
155 else
156 gpio_data_reg1 &= ~reg;
157 rdc321x_conf_write(RDC321X_GPIO_DATA_REG1, gpio_data_reg1);
158 spin_unlock_irqrestore(&gpio_lock, flags);
159 } else {
160 spin_lock_irqsave(&gpio_lock, flags);
161 if (value)
162 gpio_data_reg2 |= reg;
163 else
164 gpio_data_reg2 &= ~reg;
165 rdc321x_conf_write(RDC321X_GPIO_DATA_REG2, gpio_data_reg2);
166 spin_unlock_irqrestore(&gpio_lock, flags);
167 }
168}
169EXPORT_SYMBOL(rdc_gpio_set_value);
170
171/* configure GPIO pin as input */
172int rdc_gpio_direction_input(unsigned gpio)
173{
174 if (!rdc321x_is_gpio(gpio))
175 return -EINVAL;
176
177 rdc321x_configure_gpio(gpio);
178
179 return 0;
180}
181EXPORT_SYMBOL(rdc_gpio_direction_input);
182
183/* configure GPIO pin as output and set value */
184int rdc_gpio_direction_output(unsigned gpio, int value)
185{
186 if (!rdc321x_is_gpio(gpio))
187 return -EINVAL;
188
189 gpio_set_value(gpio, value);
190 rdc321x_configure_gpio(gpio);
191
192 return 0;
193}
194EXPORT_SYMBOL(rdc_gpio_direction_output);
diff --git a/arch/x86/mach-rdc321x/platform.c b/arch/x86/mach-rdc321x/platform.c
deleted file mode 100644
index 4f4e50c3ad3b..000000000000
--- a/arch/x86/mach-rdc321x/platform.c
+++ /dev/null
@@ -1,69 +0,0 @@
1/*
2 * Generic RDC321x platform devices
3 *
4 * Copyright (C) 2007 Florian Fainelli <florian@openwrt.org>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the
18 * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
19 * Boston, MA 02110-1301, USA.
20 *
21 */
22
23#include <linux/init.h>
24#include <linux/kernel.h>
25#include <linux/list.h>
26#include <linux/device.h>
27#include <linux/platform_device.h>
28#include <linux/leds.h>
29
30#include <asm/gpio.h>
31
32/* LEDS */
33static struct gpio_led default_leds[] = {
34 { .name = "rdc:dmz", .gpio = 1, },
35};
36
37static struct gpio_led_platform_data rdc321x_led_data = {
38 .num_leds = ARRAY_SIZE(default_leds),
39 .leds = default_leds,
40};
41
42static struct platform_device rdc321x_leds = {
43 .name = "leds-gpio",
44 .id = -1,
45 .dev = {
46 .platform_data = &rdc321x_led_data,
47 }
48};
49
50/* Watchdog */
51static struct platform_device rdc321x_wdt = {
52 .name = "rdc321x-wdt",
53 .id = -1,
54 .num_resources = 0,
55};
56
57static struct platform_device *rdc321x_devs[] = {
58 &rdc321x_leds,
59 &rdc321x_wdt
60};
61
62static int __init rdc_board_setup(void)
63{
64 rdc321x_gpio_setup();
65
66 return platform_add_devices(rdc321x_devs, ARRAY_SIZE(rdc321x_devs));
67}
68
69arch_initcall(rdc_board_setup);
diff --git a/arch/x86/mach-voyager/setup.c b/arch/x86/mach-voyager/setup.c
index d914a7996a66..66b7eb57d8e4 100644
--- a/arch/x86/mach-voyager/setup.c
+++ b/arch/x86/mach-voyager/setup.c
@@ -9,6 +9,7 @@
9#include <asm/e820.h> 9#include <asm/e820.h>
10#include <asm/io.h> 10#include <asm/io.h>
11#include <asm/setup.h> 11#include <asm/setup.h>
12#include <asm/cpu.h>
12 13
13void __init pre_intr_init_hook(void) 14void __init pre_intr_init_hook(void)
14{ 15{
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c
index 7ffcdeec4631..6f5a38c7f900 100644
--- a/arch/x86/mach-voyager/voyager_smp.c
+++ b/arch/x86/mach-voyager/voyager_smp.c
@@ -400,7 +400,7 @@ void __init find_smp_config(void)
400 VOYAGER_SUS_IN_CONTROL_PORT); 400 VOYAGER_SUS_IN_CONTROL_PORT);
401 401
402 current_thread_info()->cpu = boot_cpu_id; 402 current_thread_info()->cpu = boot_cpu_id;
403 x86_write_percpu(cpu_number, boot_cpu_id); 403 percpu_write(cpu_number, boot_cpu_id);
404} 404}
405 405
406/* 406/*
@@ -528,7 +528,6 @@ static void __init do_boot_cpu(__u8 cpu)
528 /* init_tasks (in sched.c) is indexed logically */ 528 /* init_tasks (in sched.c) is indexed logically */
529 stack_start.sp = (void *)idle->thread.sp; 529 stack_start.sp = (void *)idle->thread.sp;
530 530
531 init_gdt(cpu);
532 per_cpu(current_task, cpu) = idle; 531 per_cpu(current_task, cpu) = idle;
533 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); 532 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
534 irq_ctx_init(cpu); 533 irq_ctx_init(cpu);
@@ -1745,14 +1744,13 @@ static void __init voyager_smp_prepare_cpus(unsigned int max_cpus)
1745 1744
1746static void __cpuinit voyager_smp_prepare_boot_cpu(void) 1745static void __cpuinit voyager_smp_prepare_boot_cpu(void)
1747{ 1746{
1748 init_gdt(smp_processor_id()); 1747 int cpu = smp_processor_id();
1749 switch_to_new_gdt(); 1748 switch_to_new_gdt(cpu);
1750 1749
1751 cpu_online_map = cpumask_of_cpu(smp_processor_id()); 1750 cpu_online_map = cpumask_of_cpu(smp_processor_id());
1752 cpu_callout_map = cpumask_of_cpu(smp_processor_id()); 1751 cpu_callout_map = cpumask_of_cpu(smp_processor_id());
1753 cpu_callin_map = CPU_MASK_NONE; 1752 cpu_callin_map = CPU_MASK_NONE;
1754 cpu_present_map = cpumask_of_cpu(smp_processor_id()); 1753 cpu_present_map = cpumask_of_cpu(smp_processor_id());
1755
1756} 1754}
1757 1755
1758static int __cpuinit voyager_cpu_up(unsigned int cpu) 1756static int __cpuinit voyager_cpu_up(unsigned int cpu)
@@ -1779,7 +1777,6 @@ static void __init voyager_smp_cpus_done(unsigned int max_cpus)
1779void __init smp_setup_processor_id(void) 1777void __init smp_setup_processor_id(void)
1780{ 1778{
1781 current_thread_info()->cpu = hard_smp_processor_id(); 1779 current_thread_info()->cpu = hard_smp_processor_id();
1782 x86_write_percpu(cpu_number, hard_smp_processor_id());
1783} 1780}
1784 1781
1785static void voyager_send_call_func(const struct cpumask *callmask) 1782static void voyager_send_call_func(const struct cpumask *callmask)
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index d8cc96a2738f..2b938a384910 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -1,6 +1,8 @@
1obj-y := init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \ 1obj-y := init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
2 pat.o pgtable.o gup.o 2 pat.o pgtable.o gup.o
3 3
4obj-$(CONFIG_SMP) += tlb.o
5
4obj-$(CONFIG_X86_32) += pgtable_32.o iomap_32.o 6obj-$(CONFIG_X86_32) += pgtable_32.o iomap_32.o
5 7
6obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o 8obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
index 7e8db53528a7..61b41ca3b5a2 100644
--- a/arch/x86/mm/extable.c
+++ b/arch/x86/mm/extable.c
@@ -23,6 +23,12 @@ int fixup_exception(struct pt_regs *regs)
23 23
24 fixup = search_exception_tables(regs->ip); 24 fixup = search_exception_tables(regs->ip);
25 if (fixup) { 25 if (fixup) {
26 /* If fixup is less than 16, it means uaccess error */
27 if (fixup->fixup < 16) {
28 current_thread_info()->uaccess_err = -EFAULT;
29 regs->ip += fixup->fixup;
30 return 1;
31 }
26 regs->ip = fixup->fixup; 32 regs->ip = fixup->fixup;
27 return 1; 33 return 1;
28 } 34 }
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index c76ef1d701c9..2a9ea3aee493 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -26,6 +26,7 @@
26#include <linux/kprobes.h> 26#include <linux/kprobes.h>
27#include <linux/uaccess.h> 27#include <linux/uaccess.h>
28#include <linux/kdebug.h> 28#include <linux/kdebug.h>
29#include <linux/magic.h>
29 30
30#include <asm/system.h> 31#include <asm/system.h>
31#include <asm/desc.h> 32#include <asm/desc.h>
@@ -91,8 +92,8 @@ static inline int notify_page_fault(struct pt_regs *regs)
91 * 92 *
92 * Opcode checker based on code by Richard Brunner 93 * Opcode checker based on code by Richard Brunner
93 */ 94 */
94static int is_prefetch(struct pt_regs *regs, unsigned long addr, 95static int is_prefetch(struct pt_regs *regs, unsigned long error_code,
95 unsigned long error_code) 96 unsigned long addr)
96{ 97{
97 unsigned char *instr; 98 unsigned char *instr;
98 int scan_more = 1; 99 int scan_more = 1;
@@ -409,17 +410,16 @@ static void show_fault_oops(struct pt_regs *regs, unsigned long error_code,
409} 410}
410 411
411#ifdef CONFIG_X86_64 412#ifdef CONFIG_X86_64
412static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs, 413static noinline void pgtable_bad(struct pt_regs *regs,
413 unsigned long error_code) 414 unsigned long error_code, unsigned long address)
414{ 415{
415 unsigned long flags = oops_begin(); 416 unsigned long flags = oops_begin();
416 int sig = SIGKILL; 417 int sig = SIGKILL;
417 struct task_struct *tsk; 418 struct task_struct *tsk = current;
418 419
419 printk(KERN_ALERT "%s: Corrupted page table at address %lx\n", 420 printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
420 current->comm, address); 421 tsk->comm, address);
421 dump_pagetable(address); 422 dump_pagetable(address);
422 tsk = current;
423 tsk->thread.cr2 = address; 423 tsk->thread.cr2 = address;
424 tsk->thread.trap_no = 14; 424 tsk->thread.trap_no = 14;
425 tsk->thread.error_code = error_code; 425 tsk->thread.error_code = error_code;
@@ -429,6 +429,196 @@ static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs,
429} 429}
430#endif 430#endif
431 431
432static noinline void no_context(struct pt_regs *regs,
433 unsigned long error_code, unsigned long address)
434{
435 struct task_struct *tsk = current;
436 unsigned long *stackend;
437
438#ifdef CONFIG_X86_64
439 unsigned long flags;
440 int sig;
441#endif
442
443 /* Are we prepared to handle this kernel fault? */
444 if (fixup_exception(regs))
445 return;
446
447 /*
448 * X86_32
449 * Valid to do another page fault here, because if this fault
450 * had been triggered by is_prefetch fixup_exception would have
451 * handled it.
452 *
453 * X86_64
454 * Hall of shame of CPU/BIOS bugs.
455 */
456 if (is_prefetch(regs, error_code, address))
457 return;
458
459 if (is_errata93(regs, address))
460 return;
461
462 /*
463 * Oops. The kernel tried to access some bad page. We'll have to
464 * terminate things with extreme prejudice.
465 */
466#ifdef CONFIG_X86_32
467 bust_spinlocks(1);
468#else
469 flags = oops_begin();
470#endif
471
472 show_fault_oops(regs, error_code, address);
473
474 stackend = end_of_stack(tsk);
475 if (*stackend != STACK_END_MAGIC)
476 printk(KERN_ALERT "Thread overran stack, or stack corrupted\n");
477
478 tsk->thread.cr2 = address;
479 tsk->thread.trap_no = 14;
480 tsk->thread.error_code = error_code;
481
482#ifdef CONFIG_X86_32
483 die("Oops", regs, error_code);
484 bust_spinlocks(0);
485 do_exit(SIGKILL);
486#else
487 sig = SIGKILL;
488 if (__die("Oops", regs, error_code))
489 sig = 0;
490 /* Executive summary in case the body of the oops scrolled away */
491 printk(KERN_EMERG "CR2: %016lx\n", address);
492 oops_end(flags, regs, sig);
493#endif
494}
495
496static void __bad_area_nosemaphore(struct pt_regs *regs,
497 unsigned long error_code, unsigned long address,
498 int si_code)
499{
500 struct task_struct *tsk = current;
501
502 /* User mode accesses just cause a SIGSEGV */
503 if (error_code & PF_USER) {
504 /*
505 * It's possible to have interrupts off here.
506 */
507 local_irq_enable();
508
509 /*
510 * Valid to do another page fault here because this one came
511 * from user space.
512 */
513 if (is_prefetch(regs, error_code, address))
514 return;
515
516 if (is_errata100(regs, address))
517 return;
518
519 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
520 printk_ratelimit()) {
521 printk(
522 "%s%s[%d]: segfault at %lx ip %p sp %p error %lx",
523 task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
524 tsk->comm, task_pid_nr(tsk), address,
525 (void *) regs->ip, (void *) regs->sp, error_code);
526 print_vma_addr(" in ", regs->ip);
527 printk("\n");
528 }
529
530 tsk->thread.cr2 = address;
531 /* Kernel addresses are always protection faults */
532 tsk->thread.error_code = error_code | (address >= TASK_SIZE);
533 tsk->thread.trap_no = 14;
534 force_sig_info_fault(SIGSEGV, si_code, address, tsk);
535 return;
536 }
537
538 if (is_f00f_bug(regs, address))
539 return;
540
541 no_context(regs, error_code, address);
542}
543
544static noinline void bad_area_nosemaphore(struct pt_regs *regs,
545 unsigned long error_code, unsigned long address)
546{
547 __bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR);
548}
549
550static void __bad_area(struct pt_regs *regs,
551 unsigned long error_code, unsigned long address,
552 int si_code)
553{
554 struct mm_struct *mm = current->mm;
555
556 /*
557 * Something tried to access memory that isn't in our memory map..
558 * Fix it, but check if it's kernel or user first..
559 */
560 up_read(&mm->mmap_sem);
561
562 __bad_area_nosemaphore(regs, error_code, address, si_code);
563}
564
565static noinline void bad_area(struct pt_regs *regs,
566 unsigned long error_code, unsigned long address)
567{
568 __bad_area(regs, error_code, address, SEGV_MAPERR);
569}
570
571static noinline void bad_area_access_error(struct pt_regs *regs,
572 unsigned long error_code, unsigned long address)
573{
574 __bad_area(regs, error_code, address, SEGV_ACCERR);
575}
576
577/* TODO: fixup for "mm-invoke-oom-killer-from-page-fault.patch" */
578static void out_of_memory(struct pt_regs *regs,
579 unsigned long error_code, unsigned long address)
580{
581 /*
582 * We ran out of memory, call the OOM killer, and return the userspace
583 * (which will retry the fault, or kill us if we got oom-killed).
584 */
585 up_read(&current->mm->mmap_sem);
586 pagefault_out_of_memory();
587}
588
589static void do_sigbus(struct pt_regs *regs,
590 unsigned long error_code, unsigned long address)
591{
592 struct task_struct *tsk = current;
593 struct mm_struct *mm = tsk->mm;
594
595 up_read(&mm->mmap_sem);
596
597 /* Kernel mode? Handle exceptions or die */
598 if (!(error_code & PF_USER))
599 no_context(regs, error_code, address);
600#ifdef CONFIG_X86_32
601 /* User space => ok to do another page fault */
602 if (is_prefetch(regs, error_code, address))
603 return;
604#endif
605 tsk->thread.cr2 = address;
606 tsk->thread.error_code = error_code;
607 tsk->thread.trap_no = 14;
608 force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
609}
610
611static noinline void mm_fault_error(struct pt_regs *regs,
612 unsigned long error_code, unsigned long address, unsigned int fault)
613{
614 if (fault & VM_FAULT_OOM)
615 out_of_memory(regs, error_code, address);
616 else if (fault & VM_FAULT_SIGBUS)
617 do_sigbus(regs, error_code, address);
618 else
619 BUG();
620}
621
432static int spurious_fault_check(unsigned long error_code, pte_t *pte) 622static int spurious_fault_check(unsigned long error_code, pte_t *pte)
433{ 623{
434 if ((error_code & PF_WRITE) && !pte_write(*pte)) 624 if ((error_code & PF_WRITE) && !pte_write(*pte))
@@ -448,8 +638,8 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
448 * There are no security implications to leaving a stale TLB when 638 * There are no security implications to leaving a stale TLB when
449 * increasing the permissions on a page. 639 * increasing the permissions on a page.
450 */ 640 */
451static int spurious_fault(unsigned long address, 641static noinline int spurious_fault(unsigned long error_code,
452 unsigned long error_code) 642 unsigned long address)
453{ 643{
454 pgd_t *pgd; 644 pgd_t *pgd;
455 pud_t *pud; 645 pud_t *pud;
@@ -494,7 +684,7 @@ static int spurious_fault(unsigned long address,
494 * 684 *
495 * This assumes no large pages in there. 685 * This assumes no large pages in there.
496 */ 686 */
497static int vmalloc_fault(unsigned long address) 687static noinline int vmalloc_fault(unsigned long address)
498{ 688{
499#ifdef CONFIG_X86_32 689#ifdef CONFIG_X86_32
500 unsigned long pgd_paddr; 690 unsigned long pgd_paddr;
@@ -573,6 +763,25 @@ static int vmalloc_fault(unsigned long address)
573 763
574int show_unhandled_signals = 1; 764int show_unhandled_signals = 1;
575 765
766static inline int access_error(unsigned long error_code, int write,
767 struct vm_area_struct *vma)
768{
769 if (write) {
770 /* write, present and write, not present */
771 if (unlikely(!(vma->vm_flags & VM_WRITE)))
772 return 1;
773 } else if (unlikely(error_code & PF_PROT)) {
774 /* read, present */
775 return 1;
776 } else {
777 /* read, not present */
778 if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
779 return 1;
780 }
781
782 return 0;
783}
784
576/* 785/*
577 * This routine handles page faults. It determines the address, 786 * This routine handles page faults. It determines the address,
578 * and the problem, and then passes it off to one of the appropriate 787 * and the problem, and then passes it off to one of the appropriate
@@ -583,16 +792,12 @@ asmlinkage
583#endif 792#endif
584void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) 793void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
585{ 794{
795 unsigned long address;
586 struct task_struct *tsk; 796 struct task_struct *tsk;
587 struct mm_struct *mm; 797 struct mm_struct *mm;
588 struct vm_area_struct *vma; 798 struct vm_area_struct *vma;
589 unsigned long address; 799 int write;
590 int write, si_code;
591 int fault; 800 int fault;
592#ifdef CONFIG_X86_64
593 unsigned long flags;
594 int sig;
595#endif
596 801
597 tsk = current; 802 tsk = current;
598 mm = tsk->mm; 803 mm = tsk->mm;
@@ -601,8 +806,6 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
601 /* get the address */ 806 /* get the address */
602 address = read_cr2(); 807 address = read_cr2();
603 808
604 si_code = SEGV_MAPERR;
605
606 if (unlikely(kmmio_fault(regs, address))) 809 if (unlikely(kmmio_fault(regs, address)))
607 return; 810 return;
608 811
@@ -629,7 +832,7 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
629 return; 832 return;
630 833
631 /* Can handle a stale RO->RW TLB */ 834 /* Can handle a stale RO->RW TLB */
632 if (spurious_fault(address, error_code)) 835 if (spurious_fault(error_code, address))
633 return; 836 return;
634 837
635 /* kprobes don't want to hook the spurious faults. */ 838 /* kprobes don't want to hook the spurious faults. */
@@ -639,13 +842,12 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
639 * Don't take the mm semaphore here. If we fixup a prefetch 842 * Don't take the mm semaphore here. If we fixup a prefetch
640 * fault we could otherwise deadlock. 843 * fault we could otherwise deadlock.
641 */ 844 */
642 goto bad_area_nosemaphore; 845 bad_area_nosemaphore(regs, error_code, address);
846 return;
643 } 847 }
644 848
645 /* kprobes don't want to hook the spurious faults. */ 849 if (unlikely(notify_page_fault(regs)))
646 if (notify_page_fault(regs))
647 return; 850 return;
648
649 /* 851 /*
650 * It's safe to allow irq's after cr2 has been saved and the 852 * It's safe to allow irq's after cr2 has been saved and the
651 * vmalloc fault has been handled. 853 * vmalloc fault has been handled.
@@ -661,15 +863,17 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
661 863
662#ifdef CONFIG_X86_64 864#ifdef CONFIG_X86_64
663 if (unlikely(error_code & PF_RSVD)) 865 if (unlikely(error_code & PF_RSVD))
664 pgtable_bad(address, regs, error_code); 866 pgtable_bad(regs, error_code, address);
665#endif 867#endif
666 868
667 /* 869 /*
668 * If we're in an interrupt, have no user context or are running in an 870 * If we're in an interrupt, have no user context or are running in an
669 * atomic region then we must not take the fault. 871 * atomic region then we must not take the fault.
670 */ 872 */
671 if (unlikely(in_atomic() || !mm)) 873 if (unlikely(in_atomic() || !mm)) {
672 goto bad_area_nosemaphore; 874 bad_area_nosemaphore(regs, error_code, address);
875 return;
876 }
673 877
674 /* 878 /*
675 * When running in the kernel we expect faults to occur only to 879 * When running in the kernel we expect faults to occur only to
@@ -687,20 +891,26 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
687 * source. If this is invalid we can skip the address space check, 891 * source. If this is invalid we can skip the address space check,
688 * thus avoiding the deadlock. 892 * thus avoiding the deadlock.
689 */ 893 */
690 if (!down_read_trylock(&mm->mmap_sem)) { 894 if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
691 if ((error_code & PF_USER) == 0 && 895 if ((error_code & PF_USER) == 0 &&
692 !search_exception_tables(regs->ip)) 896 !search_exception_tables(regs->ip)) {
693 goto bad_area_nosemaphore; 897 bad_area_nosemaphore(regs, error_code, address);
898 return;
899 }
694 down_read(&mm->mmap_sem); 900 down_read(&mm->mmap_sem);
695 } 901 }
696 902
697 vma = find_vma(mm, address); 903 vma = find_vma(mm, address);
698 if (!vma) 904 if (unlikely(!vma)) {
699 goto bad_area; 905 bad_area(regs, error_code, address);
700 if (vma->vm_start <= address) 906 return;
907 }
908 if (likely(vma->vm_start <= address))
701 goto good_area; 909 goto good_area;
702 if (!(vma->vm_flags & VM_GROWSDOWN)) 910 if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
703 goto bad_area; 911 bad_area(regs, error_code, address);
912 return;
913 }
704 if (error_code & PF_USER) { 914 if (error_code & PF_USER) {
705 /* 915 /*
706 * Accessing the stack below %sp is always a bug. 916 * Accessing the stack below %sp is always a bug.
@@ -708,31 +918,25 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
708 * and pusha to work. ("enter $65535,$31" pushes 918 * and pusha to work. ("enter $65535,$31" pushes
709 * 32 pointers and then decrements %sp by 65535.) 919 * 32 pointers and then decrements %sp by 65535.)
710 */ 920 */
711 if (address + 65536 + 32 * sizeof(unsigned long) < regs->sp) 921 if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
712 goto bad_area; 922 bad_area(regs, error_code, address);
923 return;
924 }
713 } 925 }
714 if (expand_stack(vma, address)) 926 if (unlikely(expand_stack(vma, address))) {
715 goto bad_area; 927 bad_area(regs, error_code, address);
716/* 928 return;
717 * Ok, we have a good vm_area for this memory access, so 929 }
718 * we can handle it.. 930
719 */ 931 /*
932 * Ok, we have a good vm_area for this memory access, so
933 * we can handle it..
934 */
720good_area: 935good_area:
721 si_code = SEGV_ACCERR; 936 write = error_code & PF_WRITE;
722 write = 0; 937 if (unlikely(access_error(error_code, write, vma))) {
723 switch (error_code & (PF_PROT|PF_WRITE)) { 938 bad_area_access_error(regs, error_code, address);
724 default: /* 3: write, present */ 939 return;
725 /* fall through */
726 case PF_WRITE: /* write, not present */
727 if (!(vma->vm_flags & VM_WRITE))
728 goto bad_area;
729 write++;
730 break;
731 case PF_PROT: /* read, present */
732 goto bad_area;
733 case 0: /* read, not present */
734 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
735 goto bad_area;
736 } 940 }
737 941
738 /* 942 /*
@@ -742,11 +946,8 @@ good_area:
742 */ 946 */
743 fault = handle_mm_fault(mm, vma, address, write); 947 fault = handle_mm_fault(mm, vma, address, write);
744 if (unlikely(fault & VM_FAULT_ERROR)) { 948 if (unlikely(fault & VM_FAULT_ERROR)) {
745 if (fault & VM_FAULT_OOM) 949 mm_fault_error(regs, error_code, address, fault);
746 goto out_of_memory; 950 return;
747 else if (fault & VM_FAULT_SIGBUS)
748 goto do_sigbus;
749 BUG();
750 } 951 }
751 if (fault & VM_FAULT_MAJOR) 952 if (fault & VM_FAULT_MAJOR)
752 tsk->maj_flt++; 953 tsk->maj_flt++;
@@ -764,128 +965,6 @@ good_area:
764 } 965 }
765#endif 966#endif
766 up_read(&mm->mmap_sem); 967 up_read(&mm->mmap_sem);
767 return;
768
769/*
770 * Something tried to access memory that isn't in our memory map..
771 * Fix it, but check if it's kernel or user first..
772 */
773bad_area:
774 up_read(&mm->mmap_sem);
775
776bad_area_nosemaphore:
777 /* User mode accesses just cause a SIGSEGV */
778 if (error_code & PF_USER) {
779 /*
780 * It's possible to have interrupts off here.
781 */
782 local_irq_enable();
783
784 /*
785 * Valid to do another page fault here because this one came
786 * from user space.
787 */
788 if (is_prefetch(regs, address, error_code))
789 return;
790
791 if (is_errata100(regs, address))
792 return;
793
794 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
795 printk_ratelimit()) {
796 printk(
797 "%s%s[%d]: segfault at %lx ip %p sp %p error %lx",
798 task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
799 tsk->comm, task_pid_nr(tsk), address,
800 (void *) regs->ip, (void *) regs->sp, error_code);
801 print_vma_addr(" in ", regs->ip);
802 printk("\n");
803 }
804
805 tsk->thread.cr2 = address;
806 /* Kernel addresses are always protection faults */
807 tsk->thread.error_code = error_code | (address >= TASK_SIZE);
808 tsk->thread.trap_no = 14;
809 force_sig_info_fault(SIGSEGV, si_code, address, tsk);
810 return;
811 }
812
813 if (is_f00f_bug(regs, address))
814 return;
815
816no_context:
817 /* Are we prepared to handle this kernel fault? */
818 if (fixup_exception(regs))
819 return;
820
821 /*
822 * X86_32
823 * Valid to do another page fault here, because if this fault
824 * had been triggered by is_prefetch fixup_exception would have
825 * handled it.
826 *
827 * X86_64
828 * Hall of shame of CPU/BIOS bugs.
829 */
830 if (is_prefetch(regs, address, error_code))
831 return;
832
833 if (is_errata93(regs, address))
834 return;
835
836/*
837 * Oops. The kernel tried to access some bad page. We'll have to
838 * terminate things with extreme prejudice.
839 */
840#ifdef CONFIG_X86_32
841 bust_spinlocks(1);
842#else
843 flags = oops_begin();
844#endif
845
846 show_fault_oops(regs, error_code, address);
847
848 tsk->thread.cr2 = address;
849 tsk->thread.trap_no = 14;
850 tsk->thread.error_code = error_code;
851
852#ifdef CONFIG_X86_32
853 die("Oops", regs, error_code);
854 bust_spinlocks(0);
855 do_exit(SIGKILL);
856#else
857 sig = SIGKILL;
858 if (__die("Oops", regs, error_code))
859 sig = 0;
860 /* Executive summary in case the body of the oops scrolled away */
861 printk(KERN_EMERG "CR2: %016lx\n", address);
862 oops_end(flags, regs, sig);
863#endif
864
865out_of_memory:
866 /*
867 * We ran out of memory, call the OOM killer, and return the userspace
868 * (which will retry the fault, or kill us if we got oom-killed).
869 */
870 up_read(&mm->mmap_sem);
871 pagefault_out_of_memory();
872 return;
873
874do_sigbus:
875 up_read(&mm->mmap_sem);
876
877 /* Kernel mode? Handle exceptions or die */
878 if (!(error_code & PF_USER))
879 goto no_context;
880#ifdef CONFIG_X86_32
881 /* User space => ok to do another page fault */
882 if (is_prefetch(regs, address, error_code))
883 return;
884#endif
885 tsk->thread.cr2 = address;
886 tsk->thread.error_code = error_code;
887 tsk->thread.trap_no = 14;
888 force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
889} 968}
890 969
891DEFINE_SPINLOCK(pgd_lock); 970DEFINE_SPINLOCK(pgd_lock);
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 2cef05074413..00263bf07a88 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -49,7 +49,6 @@
49#include <asm/paravirt.h> 49#include <asm/paravirt.h>
50#include <asm/setup.h> 50#include <asm/setup.h>
51#include <asm/cacheflush.h> 51#include <asm/cacheflush.h>
52#include <asm/smp.h>
53 52
54unsigned int __VMALLOC_RESERVE = 128 << 20; 53unsigned int __VMALLOC_RESERVE = 128 << 20;
55 54
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index af750ab973b6..1448bcb7f22f 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -367,7 +367,7 @@ EXPORT_SYMBOL(ioremap_nocache);
367 * 367 *
368 * Must be freed with iounmap. 368 * Must be freed with iounmap.
369 */ 369 */
370void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size) 370void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
371{ 371{
372 if (pat_enabled) 372 if (pat_enabled)
373 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC, 373 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index 56fe7124fbec..165829600566 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -4,7 +4,7 @@
4 * Based on code by Ingo Molnar and Andi Kleen, copyrighted 4 * Based on code by Ingo Molnar and Andi Kleen, copyrighted
5 * as follows: 5 * as follows:
6 * 6 *
7 * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina. 7 * Copyright 2003-2009 Red Hat Inc.
8 * All Rights Reserved. 8 * All Rights Reserved.
9 * Copyright 2005 Andi Kleen, SUSE Labs. 9 * Copyright 2005 Andi Kleen, SUSE Labs.
10 * Copyright 2007 Jiri Kosina, SUSE Labs. 10 * Copyright 2007 Jiri Kosina, SUSE Labs.
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 71a14f89f89e..08d140fbc31b 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -20,6 +20,12 @@
20#include <asm/acpi.h> 20#include <asm/acpi.h>
21#include <asm/k8.h> 21#include <asm/k8.h>
22 22
23#ifdef CONFIG_DEBUG_PER_CPU_MAPS
24# define DBG(x...) printk(KERN_DEBUG x)
25#else
26# define DBG(x...)
27#endif
28
23struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; 29struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
24EXPORT_SYMBOL(node_data); 30EXPORT_SYMBOL(node_data);
25 31
@@ -33,6 +39,21 @@ int numa_off __initdata;
33static unsigned long __initdata nodemap_addr; 39static unsigned long __initdata nodemap_addr;
34static unsigned long __initdata nodemap_size; 40static unsigned long __initdata nodemap_size;
35 41
42DEFINE_PER_CPU(int, node_number) = 0;
43EXPORT_PER_CPU_SYMBOL(node_number);
44
45/*
46 * Map cpu index to node index
47 */
48DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
49EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
50
51/*
52 * Which logical CPUs are on which nodes
53 */
54cpumask_t *node_to_cpumask_map;
55EXPORT_SYMBOL(node_to_cpumask_map);
56
36/* 57/*
37 * Given a shift value, try to populate memnodemap[] 58 * Given a shift value, try to populate memnodemap[]
38 * Returns : 59 * Returns :
@@ -640,3 +661,199 @@ void __init init_cpu_to_node(void)
640#endif 661#endif
641 662
642 663
664/*
665 * Allocate node_to_cpumask_map based on number of available nodes
666 * Requires node_possible_map to be valid.
667 *
668 * Note: node_to_cpumask() is not valid until after this is done.
669 * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
670 */
671void __init setup_node_to_cpumask_map(void)
672{
673 unsigned int node, num = 0;
674 cpumask_t *map;
675
676 /* setup nr_node_ids if not done yet */
677 if (nr_node_ids == MAX_NUMNODES) {
678 for_each_node_mask(node, node_possible_map)
679 num = node;
680 nr_node_ids = num + 1;
681 }
682
683 /* allocate the map */
684 map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t));
685 DBG("node_to_cpumask_map at %p for %d nodes\n", map, nr_node_ids);
686
687 pr_debug("Node to cpumask map at %p for %d nodes\n",
688 map, nr_node_ids);
689
690 /* node_to_cpumask() will now work */
691 node_to_cpumask_map = map;
692}
693
694void __cpuinit numa_set_node(int cpu, int node)
695{
696 int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
697
698 /* early setting, no percpu area yet */
699 if (cpu_to_node_map) {
700 cpu_to_node_map[cpu] = node;
701 return;
702 }
703
704#ifdef CONFIG_DEBUG_PER_CPU_MAPS
705 if (cpu >= nr_cpu_ids || !per_cpu_offset(cpu)) {
706 printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
707 dump_stack();
708 return;
709 }
710#endif
711 per_cpu(x86_cpu_to_node_map, cpu) = node;
712
713 if (node != NUMA_NO_NODE)
714 per_cpu(node_number, cpu) = node;
715}
716
717void __cpuinit numa_clear_node(int cpu)
718{
719 numa_set_node(cpu, NUMA_NO_NODE);
720}
721
722#ifndef CONFIG_DEBUG_PER_CPU_MAPS
723
724void __cpuinit numa_add_cpu(int cpu)
725{
726 cpu_set(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
727}
728
729void __cpuinit numa_remove_cpu(int cpu)
730{
731 cpu_clear(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
732}
733
734#else /* CONFIG_DEBUG_PER_CPU_MAPS */
735
736/*
737 * --------- debug versions of the numa functions ---------
738 */
739static void __cpuinit numa_set_cpumask(int cpu, int enable)
740{
741 int node = early_cpu_to_node(cpu);
742 cpumask_t *mask;
743 char buf[64];
744
745 if (node_to_cpumask_map == NULL) {
746 printk(KERN_ERR "node_to_cpumask_map NULL\n");
747 dump_stack();
748 return;
749 }
750
751 mask = &node_to_cpumask_map[node];
752 if (enable)
753 cpu_set(cpu, *mask);
754 else
755 cpu_clear(cpu, *mask);
756
757 cpulist_scnprintf(buf, sizeof(buf), mask);
758 printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
759 enable ? "numa_add_cpu" : "numa_remove_cpu", cpu, node, buf);
760}
761
762void __cpuinit numa_add_cpu(int cpu)
763{
764 numa_set_cpumask(cpu, 1);
765}
766
767void __cpuinit numa_remove_cpu(int cpu)
768{
769 numa_set_cpumask(cpu, 0);
770}
771
772int cpu_to_node(int cpu)
773{
774 if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
775 printk(KERN_WARNING
776 "cpu_to_node(%d): usage too early!\n", cpu);
777 dump_stack();
778 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
779 }
780 return per_cpu(x86_cpu_to_node_map, cpu);
781}
782EXPORT_SYMBOL(cpu_to_node);
783
784/*
785 * Same function as cpu_to_node() but used if called before the
786 * per_cpu areas are setup.
787 */
788int early_cpu_to_node(int cpu)
789{
790 if (early_per_cpu_ptr(x86_cpu_to_node_map))
791 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
792
793 if (!per_cpu_offset(cpu)) {
794 printk(KERN_WARNING
795 "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
796 dump_stack();
797 return NUMA_NO_NODE;
798 }
799 return per_cpu(x86_cpu_to_node_map, cpu);
800}
801
802
803/* empty cpumask */
804static const cpumask_t cpu_mask_none;
805
806/*
807 * Returns a pointer to the bitmask of CPUs on Node 'node'.
808 */
809const cpumask_t *cpumask_of_node(int node)
810{
811 if (node_to_cpumask_map == NULL) {
812 printk(KERN_WARNING
813 "cpumask_of_node(%d): no node_to_cpumask_map!\n",
814 node);
815 dump_stack();
816 return (const cpumask_t *)&cpu_online_map;
817 }
818 if (node >= nr_node_ids) {
819 printk(KERN_WARNING
820 "cpumask_of_node(%d): node > nr_node_ids(%d)\n",
821 node, nr_node_ids);
822 dump_stack();
823 return &cpu_mask_none;
824 }
825 return &node_to_cpumask_map[node];
826}
827EXPORT_SYMBOL(cpumask_of_node);
828
829/*
830 * Returns a bitmask of CPUs on Node 'node'.
831 *
832 * Side note: this function creates the returned cpumask on the stack
833 * so with a high NR_CPUS count, excessive stack space is used. The
834 * node_to_cpumask_ptr function should be used whenever possible.
835 */
836cpumask_t node_to_cpumask(int node)
837{
838 if (node_to_cpumask_map == NULL) {
839 printk(KERN_WARNING
840 "node_to_cpumask(%d): no node_to_cpumask_map!\n", node);
841 dump_stack();
842 return cpu_online_map;
843 }
844 if (node >= nr_node_ids) {
845 printk(KERN_WARNING
846 "node_to_cpumask(%d): node > nr_node_ids(%d)\n",
847 node, nr_node_ids);
848 dump_stack();
849 return cpu_mask_none;
850 }
851 return node_to_cpumask_map[node];
852}
853EXPORT_SYMBOL(node_to_cpumask);
854
855/*
856 * --------- end of debug versions of the numa functions ---------
857 */
858
859#endif /* CONFIG_DEBUG_PER_CPU_MAPS */
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 7b61036427df..9127e31c7268 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -30,7 +30,7 @@
30#ifdef CONFIG_X86_PAT 30#ifdef CONFIG_X86_PAT
31int __read_mostly pat_enabled = 1; 31int __read_mostly pat_enabled = 1;
32 32
33void __cpuinit pat_disable(char *reason) 33void __cpuinit pat_disable(const char *reason)
34{ 34{
35 pat_enabled = 0; 35 pat_enabled = 0;
36 printk(KERN_INFO "%s\n", reason); 36 printk(KERN_INFO "%s\n", reason);
@@ -42,6 +42,11 @@ static int __init nopat(char *str)
42 return 0; 42 return 0;
43} 43}
44early_param("nopat", nopat); 44early_param("nopat", nopat);
45#else
46static inline void pat_disable(const char *reason)
47{
48 (void)reason;
49}
45#endif 50#endif
46 51
47 52
@@ -78,16 +83,20 @@ void pat_init(void)
78 if (!pat_enabled) 83 if (!pat_enabled)
79 return; 84 return;
80 85
81 /* Paranoia check. */ 86 if (!cpu_has_pat) {
82 if (!cpu_has_pat && boot_pat_state) { 87 if (!boot_pat_state) {
83 /* 88 pat_disable("PAT not supported by CPU.");
84 * If this happens we are on a secondary CPU, but 89 return;
85 * switched to PAT on the boot CPU. We have no way to 90 } else {
86 * undo PAT. 91 /*
87 */ 92 * If this happens we are on a secondary CPU, but
88 printk(KERN_ERR "PAT enabled, " 93 * switched to PAT on the boot CPU. We have no way to
89 "but not supported by secondary CPU\n"); 94 * undo PAT.
90 BUG(); 95 */
96 printk(KERN_ERR "PAT enabled, "
97 "but not supported by secondary CPU\n");
98 BUG();
99 }
91 } 100 }
92 101
93 /* Set PWT to Write-Combining. All other bits stay the same */ 102 /* Set PWT to Write-Combining. All other bits stay the same */
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c
index 09737c8af074..15df1baee100 100644
--- a/arch/x86/mm/srat_64.c
+++ b/arch/x86/mm/srat_64.c
@@ -21,6 +21,7 @@
21#include <asm/numa.h> 21#include <asm/numa.h>
22#include <asm/e820.h> 22#include <asm/e820.h>
23#include <asm/genapic.h> 23#include <asm/genapic.h>
24#include <asm/uv/uv.h>
24 25
25int acpi_numa __initdata; 26int acpi_numa __initdata;
26 27
diff --git a/arch/x86/kernel/tlb_64.c b/arch/x86/mm/tlb.c
index f8be6f1d2e48..14c5af4d11e6 100644
--- a/arch/x86/kernel/tlb_64.c
+++ b/arch/x86/mm/tlb.c
@@ -1,24 +1,20 @@
1#include <linux/init.h> 1#include <linux/init.h>
2 2
3#include <linux/mm.h> 3#include <linux/mm.h>
4#include <linux/delay.h>
5#include <linux/spinlock.h> 4#include <linux/spinlock.h>
6#include <linux/smp.h> 5#include <linux/smp.h>
7#include <linux/kernel_stat.h>
8#include <linux/mc146818rtc.h>
9#include <linux/interrupt.h> 6#include <linux/interrupt.h>
7#include <linux/module.h>
10 8
11#include <asm/mtrr.h>
12#include <asm/pgalloc.h>
13#include <asm/tlbflush.h> 9#include <asm/tlbflush.h>
14#include <asm/mmu_context.h> 10#include <asm/mmu_context.h>
15#include <asm/proto.h> 11#include <asm/apic.h>
16#include <asm/apicdef.h> 12#include <asm/uv/uv.h>
17#include <asm/idle.h>
18#include <asm/uv/uv_hub.h>
19#include <asm/uv/uv_bau.h>
20 13
21#include <mach_ipi.h> 14DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate)
15 = { &init_mm, 0, };
16
17#include <asm/genapic.h>
22/* 18/*
23 * Smarter SMP flushing macros. 19 * Smarter SMP flushing macros.
24 * c/o Linus Torvalds. 20 * c/o Linus Torvalds.
@@ -33,7 +29,7 @@
33 * To avoid global state use 8 different call vectors. 29 * To avoid global state use 8 different call vectors.
34 * Each CPU uses a specific vector to trigger flushes on other 30 * Each CPU uses a specific vector to trigger flushes on other
35 * CPUs. Depending on the received vector the target CPUs look into 31 * CPUs. Depending on the received vector the target CPUs look into
36 * the right per cpu variable for the flush data. 32 * the right array slot for the flush data.
37 * 33 *
38 * With more than 8 CPUs they are hashed to the 8 available 34 * With more than 8 CPUs they are hashed to the 8 available
39 * vectors. The limited global vector space forces us to this right now. 35 * vectors. The limited global vector space forces us to this right now.
@@ -43,18 +39,18 @@
43 39
44union smp_flush_state { 40union smp_flush_state {
45 struct { 41 struct {
46 cpumask_t flush_cpumask;
47 struct mm_struct *flush_mm; 42 struct mm_struct *flush_mm;
48 unsigned long flush_va; 43 unsigned long flush_va;
49 spinlock_t tlbstate_lock; 44 spinlock_t tlbstate_lock;
45 DECLARE_BITMAP(flush_cpumask, NR_CPUS);
50 }; 46 };
51 char pad[SMP_CACHE_BYTES]; 47 char pad[CONFIG_X86_INTERNODE_CACHE_BYTES];
52} ____cacheline_aligned; 48} ____cacheline_internodealigned_in_smp;
53 49
54/* State is put into the per CPU data section, but padded 50/* State is put into the per CPU data section, but padded
55 to a full cache line because other CPUs can access it and we don't 51 to a full cache line because other CPUs can access it and we don't
56 want false sharing in the per cpu data segment. */ 52 want false sharing in the per cpu data segment. */
57static DEFINE_PER_CPU(union smp_flush_state, flush_state); 53static union smp_flush_state flush_state[NUM_INVALIDATE_TLB_VECTORS];
58 54
59/* 55/*
60 * We cannot call mmdrop() because we are in interrupt context, 56 * We cannot call mmdrop() because we are in interrupt context,
@@ -62,9 +58,9 @@ static DEFINE_PER_CPU(union smp_flush_state, flush_state);
62 */ 58 */
63void leave_mm(int cpu) 59void leave_mm(int cpu)
64{ 60{
65 if (read_pda(mmu_state) == TLBSTATE_OK) 61 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
66 BUG(); 62 BUG();
67 cpu_clear(cpu, read_pda(active_mm)->cpu_vm_mask); 63 cpu_clear(cpu, percpu_read(cpu_tlbstate.active_mm)->cpu_vm_mask);
68 load_cr3(swapper_pg_dir); 64 load_cr3(swapper_pg_dir);
69} 65}
70EXPORT_SYMBOL_GPL(leave_mm); 66EXPORT_SYMBOL_GPL(leave_mm);
@@ -117,10 +113,20 @@ EXPORT_SYMBOL_GPL(leave_mm);
117 * Interrupts are disabled. 113 * Interrupts are disabled.
118 */ 114 */
119 115
120asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs) 116/*
117 * FIXME: use of asmlinkage is not consistent. On x86_64 it's noop
118 * but still used for documentation purpose but the usage is slightly
119 * inconsistent. On x86_32, asmlinkage is regparm(0) but interrupt
120 * entry calls in with the first parameter in %eax. Maybe define
121 * intrlinkage?
122 */
123#ifdef CONFIG_X86_64
124asmlinkage
125#endif
126void smp_invalidate_interrupt(struct pt_regs *regs)
121{ 127{
122 int cpu; 128 unsigned int cpu;
123 int sender; 129 unsigned int sender;
124 union smp_flush_state *f; 130 union smp_flush_state *f;
125 131
126 cpu = smp_processor_id(); 132 cpu = smp_processor_id();
@@ -129,9 +135,9 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
129 * Use that to determine where the sender put the data. 135 * Use that to determine where the sender put the data.
130 */ 136 */
131 sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START; 137 sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START;
132 f = &per_cpu(flush_state, sender); 138 f = &flush_state[sender];
133 139
134 if (!cpu_isset(cpu, f->flush_cpumask)) 140 if (!cpumask_test_cpu(cpu, to_cpumask(f->flush_cpumask)))
135 goto out; 141 goto out;
136 /* 142 /*
137 * This was a BUG() but until someone can quote me the 143 * This was a BUG() but until someone can quote me the
@@ -142,8 +148,8 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
142 * BUG(); 148 * BUG();
143 */ 149 */
144 150
145 if (f->flush_mm == read_pda(active_mm)) { 151 if (f->flush_mm == percpu_read(cpu_tlbstate.active_mm)) {
146 if (read_pda(mmu_state) == TLBSTATE_OK) { 152 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
147 if (f->flush_va == TLB_FLUSH_ALL) 153 if (f->flush_va == TLB_FLUSH_ALL)
148 local_flush_tlb(); 154 local_flush_tlb();
149 else 155 else
@@ -153,23 +159,21 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
153 } 159 }
154out: 160out:
155 ack_APIC_irq(); 161 ack_APIC_irq();
156 cpu_clear(cpu, f->flush_cpumask); 162 smp_mb__before_clear_bit();
163 cpumask_clear_cpu(cpu, to_cpumask(f->flush_cpumask));
164 smp_mb__after_clear_bit();
157 inc_irq_stat(irq_tlb_count); 165 inc_irq_stat(irq_tlb_count);
158} 166}
159 167
160void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm, 168static void flush_tlb_others_ipi(const struct cpumask *cpumask,
161 unsigned long va) 169 struct mm_struct *mm, unsigned long va)
162{ 170{
163 int sender; 171 unsigned int sender;
164 union smp_flush_state *f; 172 union smp_flush_state *f;
165 cpumask_t cpumask = *cpumaskp;
166
167 if (is_uv_system() && uv_flush_tlb_others(&cpumask, mm, va))
168 return;
169 173
170 /* Caller has disabled preemption */ 174 /* Caller has disabled preemption */
171 sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS; 175 sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS;
172 f = &per_cpu(flush_state, sender); 176 f = &flush_state[sender];
173 177
174 /* 178 /*
175 * Could avoid this lock when 179 * Could avoid this lock when
@@ -180,7 +184,8 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
180 184
181 f->flush_mm = mm; 185 f->flush_mm = mm;
182 f->flush_va = va; 186 f->flush_va = va;
183 cpus_or(f->flush_cpumask, cpumask, f->flush_cpumask); 187 cpumask_andnot(to_cpumask(f->flush_cpumask),
188 cpumask, cpumask_of(smp_processor_id()));
184 189
185 /* 190 /*
186 * Make the above memory operations globally visible before 191 * Make the above memory operations globally visible before
@@ -191,9 +196,10 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
191 * We have to send the IPI only to 196 * We have to send the IPI only to
192 * CPUs affected. 197 * CPUs affected.
193 */ 198 */
194 send_IPI_mask(&cpumask, INVALIDATE_TLB_VECTOR_START + sender); 199 apic->send_IPI_mask(to_cpumask(f->flush_cpumask),
200 INVALIDATE_TLB_VECTOR_START + sender);
195 201
196 while (!cpus_empty(f->flush_cpumask)) 202 while (!cpumask_empty(to_cpumask(f->flush_cpumask)))
197 cpu_relax(); 203 cpu_relax();
198 204
199 f->flush_mm = NULL; 205 f->flush_mm = NULL;
@@ -201,12 +207,28 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
201 spin_unlock(&f->tlbstate_lock); 207 spin_unlock(&f->tlbstate_lock);
202} 208}
203 209
210void native_flush_tlb_others(const struct cpumask *cpumask,
211 struct mm_struct *mm, unsigned long va)
212{
213 if (is_uv_system()) {
214 unsigned int cpu;
215
216 cpu = get_cpu();
217 cpumask = uv_flush_tlb_others(cpumask, mm, va, cpu);
218 if (cpumask)
219 flush_tlb_others_ipi(cpumask, mm, va);
220 put_cpu();
221 return;
222 }
223 flush_tlb_others_ipi(cpumask, mm, va);
224}
225
204static int __cpuinit init_smp_flush(void) 226static int __cpuinit init_smp_flush(void)
205{ 227{
206 int i; 228 int i;
207 229
208 for_each_possible_cpu(i) 230 for (i = 0; i < ARRAY_SIZE(flush_state); i++)
209 spin_lock_init(&per_cpu(flush_state, i).tlbstate_lock); 231 spin_lock_init(&flush_state[i].tlbstate_lock);
210 232
211 return 0; 233 return 0;
212} 234}
@@ -215,25 +237,18 @@ core_initcall(init_smp_flush);
215void flush_tlb_current_task(void) 237void flush_tlb_current_task(void)
216{ 238{
217 struct mm_struct *mm = current->mm; 239 struct mm_struct *mm = current->mm;
218 cpumask_t cpu_mask;
219 240
220 preempt_disable(); 241 preempt_disable();
221 cpu_mask = mm->cpu_vm_mask;
222 cpu_clear(smp_processor_id(), cpu_mask);
223 242
224 local_flush_tlb(); 243 local_flush_tlb();
225 if (!cpus_empty(cpu_mask)) 244 if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
226 flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); 245 flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL);
227 preempt_enable(); 246 preempt_enable();
228} 247}
229 248
230void flush_tlb_mm(struct mm_struct *mm) 249void flush_tlb_mm(struct mm_struct *mm)
231{ 250{
232 cpumask_t cpu_mask;
233
234 preempt_disable(); 251 preempt_disable();
235 cpu_mask = mm->cpu_vm_mask;
236 cpu_clear(smp_processor_id(), cpu_mask);
237 252
238 if (current->active_mm == mm) { 253 if (current->active_mm == mm) {
239 if (current->mm) 254 if (current->mm)
@@ -241,8 +256,8 @@ void flush_tlb_mm(struct mm_struct *mm)
241 else 256 else
242 leave_mm(smp_processor_id()); 257 leave_mm(smp_processor_id());
243 } 258 }
244 if (!cpus_empty(cpu_mask)) 259 if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
245 flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); 260 flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL);
246 261
247 preempt_enable(); 262 preempt_enable();
248} 263}
@@ -250,11 +265,8 @@ void flush_tlb_mm(struct mm_struct *mm)
250void flush_tlb_page(struct vm_area_struct *vma, unsigned long va) 265void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
251{ 266{
252 struct mm_struct *mm = vma->vm_mm; 267 struct mm_struct *mm = vma->vm_mm;
253 cpumask_t cpu_mask;
254 268
255 preempt_disable(); 269 preempt_disable();
256 cpu_mask = mm->cpu_vm_mask;
257 cpu_clear(smp_processor_id(), cpu_mask);
258 270
259 if (current->active_mm == mm) { 271 if (current->active_mm == mm) {
260 if (current->mm) 272 if (current->mm)
@@ -263,8 +275,8 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
263 leave_mm(smp_processor_id()); 275 leave_mm(smp_processor_id());
264 } 276 }
265 277
266 if (!cpus_empty(cpu_mask)) 278 if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
267 flush_tlb_others(cpu_mask, mm, va); 279 flush_tlb_others(&mm->cpu_vm_mask, mm, va);
268 280
269 preempt_enable(); 281 preempt_enable();
270} 282}
@@ -274,7 +286,7 @@ static void do_flush_tlb_all(void *info)
274 unsigned long cpu = smp_processor_id(); 286 unsigned long cpu = smp_processor_id();
275 287
276 __flush_tlb_all(); 288 __flush_tlb_all();
277 if (read_pda(mmu_state) == TLBSTATE_LAZY) 289 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY)
278 leave_mm(cpu); 290 leave_mm(cpu);
279} 291}
280 292
diff --git a/arch/x86/pci/numaq_32.c b/arch/x86/pci/numaq_32.c
index 2089354968a2..5601e829c387 100644
--- a/arch/x86/pci/numaq_32.c
+++ b/arch/x86/pci/numaq_32.c
@@ -5,7 +5,7 @@
5#include <linux/pci.h> 5#include <linux/pci.h>
6#include <linux/init.h> 6#include <linux/init.h>
7#include <linux/nodemask.h> 7#include <linux/nodemask.h>
8#include <mach_apic.h> 8#include <asm/genapic.h>
9#include <asm/mpspec.h> 9#include <asm/mpspec.h>
10#include <asm/pci_x86.h> 10#include <asm/pci_x86.h>
11 11
@@ -18,10 +18,6 @@
18 18
19#define QUADLOCAL2BUS(quad,local) (quad_local_to_mp_bus_id[quad][local]) 19#define QUADLOCAL2BUS(quad,local) (quad_local_to_mp_bus_id[quad][local])
20 20
21/* Where the IO area was mapped on multiquad, always 0 otherwise */
22void *xquad_portio;
23EXPORT_SYMBOL(xquad_portio);
24
25#define XQUAD_PORT_ADDR(port, quad) (xquad_portio + (XQUAD_PORTIO_QUAD*quad) + port) 21#define XQUAD_PORT_ADDR(port, quad) (xquad_portio + (XQUAD_PORTIO_QUAD*quad) + port)
26 22
27#define PCI_CONF1_MQ_ADDRESS(bus, devfn, reg) \ 23#define PCI_CONF1_MQ_ADDRESS(bus, devfn, reg) \
diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
index b82cae970dfd..1c975cc9839e 100644
--- a/arch/x86/pci/pcbios.c
+++ b/arch/x86/pci/pcbios.c
@@ -7,7 +7,7 @@
7#include <linux/module.h> 7#include <linux/module.h>
8#include <linux/uaccess.h> 8#include <linux/uaccess.h>
9#include <asm/pci_x86.h> 9#include <asm/pci_x86.h>
10#include <asm/mach-default/pci-functions.h> 10#include <asm/pci-functions.h>
11 11
12/* BIOS32 signature: "_32_" */ 12/* BIOS32 signature: "_32_" */
13#define BIOS32_SIGNATURE (('_' << 0) + ('3' << 8) + ('2' << 16) + ('_' << 24)) 13#define BIOS32_SIGNATURE (('_' << 0) + ('3' << 8) + ('2' << 16) + ('_' << 24))
diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile
index 6dcefba7836f..3b767d03fd6a 100644
--- a/arch/x86/xen/Makefile
+++ b/arch/x86/xen/Makefile
@@ -6,7 +6,8 @@ CFLAGS_REMOVE_irq.o = -pg
6endif 6endif
7 7
8obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \ 8obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \
9 time.o xen-asm_$(BITS).o grant-table.o suspend.o 9 time.o xen-asm.o xen-asm_$(BITS).o \
10 grant-table.o suspend.o
10 11
11obj-$(CONFIG_SMP) += smp.o spinlock.o 12obj-$(CONFIG_SMP) += smp.o spinlock.o
12obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o \ No newline at end of file 13obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o \ No newline at end of file
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index bea215230b20..37230342c2c4 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -61,40 +61,13 @@ DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
61enum xen_domain_type xen_domain_type = XEN_NATIVE; 61enum xen_domain_type xen_domain_type = XEN_NATIVE;
62EXPORT_SYMBOL_GPL(xen_domain_type); 62EXPORT_SYMBOL_GPL(xen_domain_type);
63 63
64/*
65 * Identity map, in addition to plain kernel map. This needs to be
66 * large enough to allocate page table pages to allocate the rest.
67 * Each page can map 2MB.
68 */
69static pte_t level1_ident_pgt[PTRS_PER_PTE * 4] __page_aligned_bss;
70
71#ifdef CONFIG_X86_64
72/* l3 pud for userspace vsyscall mapping */
73static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
74#endif /* CONFIG_X86_64 */
75
76/*
77 * Note about cr3 (pagetable base) values:
78 *
79 * xen_cr3 contains the current logical cr3 value; it contains the
80 * last set cr3. This may not be the current effective cr3, because
81 * its update may be being lazily deferred. However, a vcpu looking
82 * at its own cr3 can use this value knowing that it everything will
83 * be self-consistent.
84 *
85 * xen_current_cr3 contains the actual vcpu cr3; it is set once the
86 * hypercall to set the vcpu cr3 is complete (so it may be a little
87 * out of date, but it will never be set early). If one vcpu is
88 * looking at another vcpu's cr3 value, it should use this variable.
89 */
90DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */
91DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */
92
93struct start_info *xen_start_info; 64struct start_info *xen_start_info;
94EXPORT_SYMBOL_GPL(xen_start_info); 65EXPORT_SYMBOL_GPL(xen_start_info);
95 66
96struct shared_info xen_dummy_shared_info; 67struct shared_info xen_dummy_shared_info;
97 68
69void *xen_initial_gdt;
70
98/* 71/*
99 * Point at some empty memory to start with. We map the real shared_info 72 * Point at some empty memory to start with. We map the real shared_info
100 * page as soon as fixmap is up and running. 73 * page as soon as fixmap is up and running.
@@ -114,14 +87,7 @@ struct shared_info *HYPERVISOR_shared_info = (void *)&xen_dummy_shared_info;
114 * 87 *
115 * 0: not available, 1: available 88 * 0: not available, 1: available
116 */ 89 */
117static int have_vcpu_info_placement = 90static int have_vcpu_info_placement = 1;
118#ifdef CONFIG_X86_32
119 1
120#else
121 0
122#endif
123 ;
124
125 91
126static void xen_vcpu_setup(int cpu) 92static void xen_vcpu_setup(int cpu)
127{ 93{
@@ -237,7 +203,7 @@ static unsigned long xen_get_debugreg(int reg)
237 return HYPERVISOR_get_debugreg(reg); 203 return HYPERVISOR_get_debugreg(reg);
238} 204}
239 205
240static void xen_leave_lazy(void) 206void xen_leave_lazy(void)
241{ 207{
242 paravirt_leave_lazy(paravirt_get_lazy_mode()); 208 paravirt_leave_lazy(paravirt_get_lazy_mode());
243 xen_mc_flush(); 209 xen_mc_flush();
@@ -598,83 +564,6 @@ static struct apic_ops xen_basic_apic_ops = {
598 564
599#endif 565#endif
600 566
601static void xen_flush_tlb(void)
602{
603 struct mmuext_op *op;
604 struct multicall_space mcs;
605
606 preempt_disable();
607
608 mcs = xen_mc_entry(sizeof(*op));
609
610 op = mcs.args;
611 op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
612 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
613
614 xen_mc_issue(PARAVIRT_LAZY_MMU);
615
616 preempt_enable();
617}
618
619static void xen_flush_tlb_single(unsigned long addr)
620{
621 struct mmuext_op *op;
622 struct multicall_space mcs;
623
624 preempt_disable();
625
626 mcs = xen_mc_entry(sizeof(*op));
627 op = mcs.args;
628 op->cmd = MMUEXT_INVLPG_LOCAL;
629 op->arg1.linear_addr = addr & PAGE_MASK;
630 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
631
632 xen_mc_issue(PARAVIRT_LAZY_MMU);
633
634 preempt_enable();
635}
636
637static void xen_flush_tlb_others(const cpumask_t *cpus, struct mm_struct *mm,
638 unsigned long va)
639{
640 struct {
641 struct mmuext_op op;
642 cpumask_t mask;
643 } *args;
644 cpumask_t cpumask = *cpus;
645 struct multicall_space mcs;
646
647 /*
648 * A couple of (to be removed) sanity checks:
649 *
650 * - current CPU must not be in mask
651 * - mask must exist :)
652 */
653 BUG_ON(cpus_empty(cpumask));
654 BUG_ON(cpu_isset(smp_processor_id(), cpumask));
655 BUG_ON(!mm);
656
657 /* If a CPU which we ran on has gone down, OK. */
658 cpus_and(cpumask, cpumask, cpu_online_map);
659 if (cpus_empty(cpumask))
660 return;
661
662 mcs = xen_mc_entry(sizeof(*args));
663 args = mcs.args;
664 args->mask = cpumask;
665 args->op.arg2.vcpumask = &args->mask;
666
667 if (va == TLB_FLUSH_ALL) {
668 args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
669 } else {
670 args->op.cmd = MMUEXT_INVLPG_MULTI;
671 args->op.arg1.linear_addr = va;
672 }
673
674 MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
675
676 xen_mc_issue(PARAVIRT_LAZY_MMU);
677}
678 567
679static void xen_clts(void) 568static void xen_clts(void)
680{ 569{
@@ -700,21 +589,6 @@ static void xen_write_cr0(unsigned long cr0)
700 xen_mc_issue(PARAVIRT_LAZY_CPU); 589 xen_mc_issue(PARAVIRT_LAZY_CPU);
701} 590}
702 591
703static void xen_write_cr2(unsigned long cr2)
704{
705 x86_read_percpu(xen_vcpu)->arch.cr2 = cr2;
706}
707
708static unsigned long xen_read_cr2(void)
709{
710 return x86_read_percpu(xen_vcpu)->arch.cr2;
711}
712
713static unsigned long xen_read_cr2_direct(void)
714{
715 return x86_read_percpu(xen_vcpu_info.arch.cr2);
716}
717
718static void xen_write_cr4(unsigned long cr4) 592static void xen_write_cr4(unsigned long cr4)
719{ 593{
720 cr4 &= ~X86_CR4_PGE; 594 cr4 &= ~X86_CR4_PGE;
@@ -723,71 +597,6 @@ static void xen_write_cr4(unsigned long cr4)
723 native_write_cr4(cr4); 597 native_write_cr4(cr4);
724} 598}
725 599
726static unsigned long xen_read_cr3(void)
727{
728 return x86_read_percpu(xen_cr3);
729}
730
731static void set_current_cr3(void *v)
732{
733 x86_write_percpu(xen_current_cr3, (unsigned long)v);
734}
735
736static void __xen_write_cr3(bool kernel, unsigned long cr3)
737{
738 struct mmuext_op *op;
739 struct multicall_space mcs;
740 unsigned long mfn;
741
742 if (cr3)
743 mfn = pfn_to_mfn(PFN_DOWN(cr3));
744 else
745 mfn = 0;
746
747 WARN_ON(mfn == 0 && kernel);
748
749 mcs = __xen_mc_entry(sizeof(*op));
750
751 op = mcs.args;
752 op->cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
753 op->arg1.mfn = mfn;
754
755 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
756
757 if (kernel) {
758 x86_write_percpu(xen_cr3, cr3);
759
760 /* Update xen_current_cr3 once the batch has actually
761 been submitted. */
762 xen_mc_callback(set_current_cr3, (void *)cr3);
763 }
764}
765
766static void xen_write_cr3(unsigned long cr3)
767{
768 BUG_ON(preemptible());
769
770 xen_mc_batch(); /* disables interrupts */
771
772 /* Update while interrupts are disabled, so its atomic with
773 respect to ipis */
774 x86_write_percpu(xen_cr3, cr3);
775
776 __xen_write_cr3(true, cr3);
777
778#ifdef CONFIG_X86_64
779 {
780 pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
781 if (user_pgd)
782 __xen_write_cr3(false, __pa(user_pgd));
783 else
784 __xen_write_cr3(false, 0);
785 }
786#endif
787
788 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
789}
790
791static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high) 600static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
792{ 601{
793 int ret; 602 int ret;
@@ -829,185 +638,6 @@ static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
829 return ret; 638 return ret;
830} 639}
831 640
832/* Early in boot, while setting up the initial pagetable, assume
833 everything is pinned. */
834static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
835{
836#ifdef CONFIG_FLATMEM
837 BUG_ON(mem_map); /* should only be used early */
838#endif
839 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
840}
841
842/* Early release_pte assumes that all pts are pinned, since there's
843 only init_mm and anything attached to that is pinned. */
844static void xen_release_pte_init(unsigned long pfn)
845{
846 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
847}
848
849static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
850{
851 struct mmuext_op op;
852 op.cmd = cmd;
853 op.arg1.mfn = pfn_to_mfn(pfn);
854 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
855 BUG();
856}
857
858/* This needs to make sure the new pte page is pinned iff its being
859 attached to a pinned pagetable. */
860static void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned level)
861{
862 struct page *page = pfn_to_page(pfn);
863
864 if (PagePinned(virt_to_page(mm->pgd))) {
865 SetPagePinned(page);
866
867 vm_unmap_aliases();
868 if (!PageHighMem(page)) {
869 make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn)));
870 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
871 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
872 } else {
873 /* make sure there are no stray mappings of
874 this page */
875 kmap_flush_unused();
876 }
877 }
878}
879
880static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
881{
882 xen_alloc_ptpage(mm, pfn, PT_PTE);
883}
884
885static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
886{
887 xen_alloc_ptpage(mm, pfn, PT_PMD);
888}
889
890static int xen_pgd_alloc(struct mm_struct *mm)
891{
892 pgd_t *pgd = mm->pgd;
893 int ret = 0;
894
895 BUG_ON(PagePinned(virt_to_page(pgd)));
896
897#ifdef CONFIG_X86_64
898 {
899 struct page *page = virt_to_page(pgd);
900 pgd_t *user_pgd;
901
902 BUG_ON(page->private != 0);
903
904 ret = -ENOMEM;
905
906 user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
907 page->private = (unsigned long)user_pgd;
908
909 if (user_pgd != NULL) {
910 user_pgd[pgd_index(VSYSCALL_START)] =
911 __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
912 ret = 0;
913 }
914
915 BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
916 }
917#endif
918
919 return ret;
920}
921
922static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
923{
924#ifdef CONFIG_X86_64
925 pgd_t *user_pgd = xen_get_user_pgd(pgd);
926
927 if (user_pgd)
928 free_page((unsigned long)user_pgd);
929#endif
930}
931
932/* This should never happen until we're OK to use struct page */
933static void xen_release_ptpage(unsigned long pfn, unsigned level)
934{
935 struct page *page = pfn_to_page(pfn);
936
937 if (PagePinned(page)) {
938 if (!PageHighMem(page)) {
939 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
940 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
941 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
942 }
943 ClearPagePinned(page);
944 }
945}
946
947static void xen_release_pte(unsigned long pfn)
948{
949 xen_release_ptpage(pfn, PT_PTE);
950}
951
952static void xen_release_pmd(unsigned long pfn)
953{
954 xen_release_ptpage(pfn, PT_PMD);
955}
956
957#if PAGETABLE_LEVELS == 4
958static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
959{
960 xen_alloc_ptpage(mm, pfn, PT_PUD);
961}
962
963static void xen_release_pud(unsigned long pfn)
964{
965 xen_release_ptpage(pfn, PT_PUD);
966}
967#endif
968
969#ifdef CONFIG_HIGHPTE
970static void *xen_kmap_atomic_pte(struct page *page, enum km_type type)
971{
972 pgprot_t prot = PAGE_KERNEL;
973
974 if (PagePinned(page))
975 prot = PAGE_KERNEL_RO;
976
977 if (0 && PageHighMem(page))
978 printk("mapping highpte %lx type %d prot %s\n",
979 page_to_pfn(page), type,
980 (unsigned long)pgprot_val(prot) & _PAGE_RW ? "WRITE" : "READ");
981
982 return kmap_atomic_prot(page, type, prot);
983}
984#endif
985
986#ifdef CONFIG_X86_32
987static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
988{
989 /* If there's an existing pte, then don't allow _PAGE_RW to be set */
990 if (pte_val_ma(*ptep) & _PAGE_PRESENT)
991 pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
992 pte_val_ma(pte));
993
994 return pte;
995}
996
997/* Init-time set_pte while constructing initial pagetables, which
998 doesn't allow RO pagetable pages to be remapped RW */
999static __init void xen_set_pte_init(pte_t *ptep, pte_t pte)
1000{
1001 pte = mask_rw_pte(ptep, pte);
1002
1003 xen_set_pte(ptep, pte);
1004}
1005#endif
1006
1007static __init void xen_pagetable_setup_start(pgd_t *base)
1008{
1009}
1010
1011void xen_setup_shared_info(void) 641void xen_setup_shared_info(void)
1012{ 642{
1013 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 643 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
@@ -1028,37 +658,6 @@ void xen_setup_shared_info(void)
1028 xen_setup_mfn_list_list(); 658 xen_setup_mfn_list_list();
1029} 659}
1030 660
1031static __init void xen_pagetable_setup_done(pgd_t *base)
1032{
1033 xen_setup_shared_info();
1034}
1035
1036static __init void xen_post_allocator_init(void)
1037{
1038 pv_mmu_ops.set_pte = xen_set_pte;
1039 pv_mmu_ops.set_pmd = xen_set_pmd;
1040 pv_mmu_ops.set_pud = xen_set_pud;
1041#if PAGETABLE_LEVELS == 4
1042 pv_mmu_ops.set_pgd = xen_set_pgd;
1043#endif
1044
1045 /* This will work as long as patching hasn't happened yet
1046 (which it hasn't) */
1047 pv_mmu_ops.alloc_pte = xen_alloc_pte;
1048 pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
1049 pv_mmu_ops.release_pte = xen_release_pte;
1050 pv_mmu_ops.release_pmd = xen_release_pmd;
1051#if PAGETABLE_LEVELS == 4
1052 pv_mmu_ops.alloc_pud = xen_alloc_pud;
1053 pv_mmu_ops.release_pud = xen_release_pud;
1054#endif
1055
1056#ifdef CONFIG_X86_64
1057 SetPagePinned(virt_to_page(level3_user_vsyscall));
1058#endif
1059 xen_mark_init_mm_pinned();
1060}
1061
1062/* This is called once we have the cpu_possible_map */ 661/* This is called once we have the cpu_possible_map */
1063void xen_setup_vcpu_info_placement(void) 662void xen_setup_vcpu_info_placement(void)
1064{ 663{
@@ -1072,10 +671,10 @@ void xen_setup_vcpu_info_placement(void)
1072 if (have_vcpu_info_placement) { 671 if (have_vcpu_info_placement) {
1073 printk(KERN_INFO "Xen: using vcpu_info placement\n"); 672 printk(KERN_INFO "Xen: using vcpu_info placement\n");
1074 673
1075 pv_irq_ops.save_fl = xen_save_fl_direct; 674 pv_irq_ops.save_fl = __PV_IS_CALLEE_SAVE(xen_save_fl_direct);
1076 pv_irq_ops.restore_fl = xen_restore_fl_direct; 675 pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(xen_restore_fl_direct);
1077 pv_irq_ops.irq_disable = xen_irq_disable_direct; 676 pv_irq_ops.irq_disable = __PV_IS_CALLEE_SAVE(xen_irq_disable_direct);
1078 pv_irq_ops.irq_enable = xen_irq_enable_direct; 677 pv_irq_ops.irq_enable = __PV_IS_CALLEE_SAVE(xen_irq_enable_direct);
1079 pv_mmu_ops.read_cr2 = xen_read_cr2_direct; 678 pv_mmu_ops.read_cr2 = xen_read_cr2_direct;
1080 } 679 }
1081} 680}
@@ -1133,49 +732,6 @@ static unsigned xen_patch(u8 type, u16 clobbers, void *insnbuf,
1133 return ret; 732 return ret;
1134} 733}
1135 734
1136static void xen_set_fixmap(unsigned idx, unsigned long phys, pgprot_t prot)
1137{
1138 pte_t pte;
1139
1140 phys >>= PAGE_SHIFT;
1141
1142 switch (idx) {
1143 case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
1144#ifdef CONFIG_X86_F00F_BUG
1145 case FIX_F00F_IDT:
1146#endif
1147#ifdef CONFIG_X86_32
1148 case FIX_WP_TEST:
1149 case FIX_VDSO:
1150# ifdef CONFIG_HIGHMEM
1151 case FIX_KMAP_BEGIN ... FIX_KMAP_END:
1152# endif
1153#else
1154 case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE:
1155#endif
1156#ifdef CONFIG_X86_LOCAL_APIC
1157 case FIX_APIC_BASE: /* maps dummy local APIC */
1158#endif
1159 pte = pfn_pte(phys, prot);
1160 break;
1161
1162 default:
1163 pte = mfn_pte(phys, prot);
1164 break;
1165 }
1166
1167 __native_set_fixmap(idx, pte);
1168
1169#ifdef CONFIG_X86_64
1170 /* Replicate changes to map the vsyscall page into the user
1171 pagetable vsyscall mapping. */
1172 if (idx >= VSYSCALL_LAST_PAGE && idx <= VSYSCALL_FIRST_PAGE) {
1173 unsigned long vaddr = __fix_to_virt(idx);
1174 set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
1175 }
1176#endif
1177}
1178
1179static const struct pv_info xen_info __initdata = { 735static const struct pv_info xen_info __initdata = {
1180 .paravirt_enabled = 1, 736 .paravirt_enabled = 1,
1181 .shared_kernel_pmd = 0, 737 .shared_kernel_pmd = 0,
@@ -1271,87 +827,6 @@ static const struct pv_apic_ops xen_apic_ops __initdata = {
1271#endif 827#endif
1272}; 828};
1273 829
1274static const struct pv_mmu_ops xen_mmu_ops __initdata = {
1275 .pagetable_setup_start = xen_pagetable_setup_start,
1276 .pagetable_setup_done = xen_pagetable_setup_done,
1277
1278 .read_cr2 = xen_read_cr2,
1279 .write_cr2 = xen_write_cr2,
1280
1281 .read_cr3 = xen_read_cr3,
1282 .write_cr3 = xen_write_cr3,
1283
1284 .flush_tlb_user = xen_flush_tlb,
1285 .flush_tlb_kernel = xen_flush_tlb,
1286 .flush_tlb_single = xen_flush_tlb_single,
1287 .flush_tlb_others = xen_flush_tlb_others,
1288
1289 .pte_update = paravirt_nop,
1290 .pte_update_defer = paravirt_nop,
1291
1292 .pgd_alloc = xen_pgd_alloc,
1293 .pgd_free = xen_pgd_free,
1294
1295 .alloc_pte = xen_alloc_pte_init,
1296 .release_pte = xen_release_pte_init,
1297 .alloc_pmd = xen_alloc_pte_init,
1298 .alloc_pmd_clone = paravirt_nop,
1299 .release_pmd = xen_release_pte_init,
1300
1301#ifdef CONFIG_HIGHPTE
1302 .kmap_atomic_pte = xen_kmap_atomic_pte,
1303#endif
1304
1305#ifdef CONFIG_X86_64
1306 .set_pte = xen_set_pte,
1307#else
1308 .set_pte = xen_set_pte_init,
1309#endif
1310 .set_pte_at = xen_set_pte_at,
1311 .set_pmd = xen_set_pmd_hyper,
1312
1313 .ptep_modify_prot_start = __ptep_modify_prot_start,
1314 .ptep_modify_prot_commit = __ptep_modify_prot_commit,
1315
1316 .pte_val = xen_pte_val,
1317 .pte_flags = native_pte_flags,
1318 .pgd_val = xen_pgd_val,
1319
1320 .make_pte = xen_make_pte,
1321 .make_pgd = xen_make_pgd,
1322
1323#ifdef CONFIG_X86_PAE
1324 .set_pte_atomic = xen_set_pte_atomic,
1325 .set_pte_present = xen_set_pte_at,
1326 .pte_clear = xen_pte_clear,
1327 .pmd_clear = xen_pmd_clear,
1328#endif /* CONFIG_X86_PAE */
1329 .set_pud = xen_set_pud_hyper,
1330
1331 .make_pmd = xen_make_pmd,
1332 .pmd_val = xen_pmd_val,
1333
1334#if PAGETABLE_LEVELS == 4
1335 .pud_val = xen_pud_val,
1336 .make_pud = xen_make_pud,
1337 .set_pgd = xen_set_pgd_hyper,
1338
1339 .alloc_pud = xen_alloc_pte_init,
1340 .release_pud = xen_release_pte_init,
1341#endif /* PAGETABLE_LEVELS == 4 */
1342
1343 .activate_mm = xen_activate_mm,
1344 .dup_mmap = xen_dup_mmap,
1345 .exit_mmap = xen_exit_mmap,
1346
1347 .lazy_mode = {
1348 .enter = paravirt_enter_lazy_mmu,
1349 .leave = xen_leave_lazy,
1350 },
1351
1352 .set_fixmap = xen_set_fixmap,
1353};
1354
1355static void xen_reboot(int reason) 830static void xen_reboot(int reason)
1356{ 831{
1357 struct sched_shutdown r = { .reason = reason }; 832 struct sched_shutdown r = { .reason = reason };
@@ -1394,223 +869,6 @@ static const struct machine_ops __initdata xen_machine_ops = {
1394}; 869};
1395 870
1396 871
1397static void __init xen_reserve_top(void)
1398{
1399#ifdef CONFIG_X86_32
1400 unsigned long top = HYPERVISOR_VIRT_START;
1401 struct xen_platform_parameters pp;
1402
1403 if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
1404 top = pp.virt_start;
1405
1406 reserve_top_address(-top);
1407#endif /* CONFIG_X86_32 */
1408}
1409
1410/*
1411 * Like __va(), but returns address in the kernel mapping (which is
1412 * all we have until the physical memory mapping has been set up.
1413 */
1414static void *__ka(phys_addr_t paddr)
1415{
1416#ifdef CONFIG_X86_64
1417 return (void *)(paddr + __START_KERNEL_map);
1418#else
1419 return __va(paddr);
1420#endif
1421}
1422
1423/* Convert a machine address to physical address */
1424static unsigned long m2p(phys_addr_t maddr)
1425{
1426 phys_addr_t paddr;
1427
1428 maddr &= PTE_PFN_MASK;
1429 paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
1430
1431 return paddr;
1432}
1433
1434/* Convert a machine address to kernel virtual */
1435static void *m2v(phys_addr_t maddr)
1436{
1437 return __ka(m2p(maddr));
1438}
1439
1440static void set_page_prot(void *addr, pgprot_t prot)
1441{
1442 unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
1443 pte_t pte = pfn_pte(pfn, prot);
1444
1445 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0))
1446 BUG();
1447}
1448
1449static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
1450{
1451 unsigned pmdidx, pteidx;
1452 unsigned ident_pte;
1453 unsigned long pfn;
1454
1455 ident_pte = 0;
1456 pfn = 0;
1457 for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
1458 pte_t *pte_page;
1459
1460 /* Reuse or allocate a page of ptes */
1461 if (pmd_present(pmd[pmdidx]))
1462 pte_page = m2v(pmd[pmdidx].pmd);
1463 else {
1464 /* Check for free pte pages */
1465 if (ident_pte == ARRAY_SIZE(level1_ident_pgt))
1466 break;
1467
1468 pte_page = &level1_ident_pgt[ident_pte];
1469 ident_pte += PTRS_PER_PTE;
1470
1471 pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE);
1472 }
1473
1474 /* Install mappings */
1475 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
1476 pte_t pte;
1477
1478 if (pfn > max_pfn_mapped)
1479 max_pfn_mapped = pfn;
1480
1481 if (!pte_none(pte_page[pteidx]))
1482 continue;
1483
1484 pte = pfn_pte(pfn, PAGE_KERNEL_EXEC);
1485 pte_page[pteidx] = pte;
1486 }
1487 }
1488
1489 for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
1490 set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO);
1491
1492 set_page_prot(pmd, PAGE_KERNEL_RO);
1493}
1494
1495#ifdef CONFIG_X86_64
1496static void convert_pfn_mfn(void *v)
1497{
1498 pte_t *pte = v;
1499 int i;
1500
1501 /* All levels are converted the same way, so just treat them
1502 as ptes. */
1503 for (i = 0; i < PTRS_PER_PTE; i++)
1504 pte[i] = xen_make_pte(pte[i].pte);
1505}
1506
1507/*
1508 * Set up the inital kernel pagetable.
1509 *
1510 * We can construct this by grafting the Xen provided pagetable into
1511 * head_64.S's preconstructed pagetables. We copy the Xen L2's into
1512 * level2_ident_pgt, level2_kernel_pgt and level2_fixmap_pgt. This
1513 * means that only the kernel has a physical mapping to start with -
1514 * but that's enough to get __va working. We need to fill in the rest
1515 * of the physical mapping once some sort of allocator has been set
1516 * up.
1517 */
1518static __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
1519 unsigned long max_pfn)
1520{
1521 pud_t *l3;
1522 pmd_t *l2;
1523
1524 /* Zap identity mapping */
1525 init_level4_pgt[0] = __pgd(0);
1526
1527 /* Pre-constructed entries are in pfn, so convert to mfn */
1528 convert_pfn_mfn(init_level4_pgt);
1529 convert_pfn_mfn(level3_ident_pgt);
1530 convert_pfn_mfn(level3_kernel_pgt);
1531
1532 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
1533 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
1534
1535 memcpy(level2_ident_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1536 memcpy(level2_kernel_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1537
1538 l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd);
1539 l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud);
1540 memcpy(level2_fixmap_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1541
1542 /* Set up identity map */
1543 xen_map_identity_early(level2_ident_pgt, max_pfn);
1544
1545 /* Make pagetable pieces RO */
1546 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
1547 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
1548 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
1549 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
1550 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1551 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
1552
1553 /* Pin down new L4 */
1554 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
1555 PFN_DOWN(__pa_symbol(init_level4_pgt)));
1556
1557 /* Unpin Xen-provided one */
1558 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1559
1560 /* Switch over */
1561 pgd = init_level4_pgt;
1562
1563 /*
1564 * At this stage there can be no user pgd, and no page
1565 * structure to attach it to, so make sure we just set kernel
1566 * pgd.
1567 */
1568 xen_mc_batch();
1569 __xen_write_cr3(true, __pa(pgd));
1570 xen_mc_issue(PARAVIRT_LAZY_CPU);
1571
1572 reserve_early(__pa(xen_start_info->pt_base),
1573 __pa(xen_start_info->pt_base +
1574 xen_start_info->nr_pt_frames * PAGE_SIZE),
1575 "XEN PAGETABLES");
1576
1577 return pgd;
1578}
1579#else /* !CONFIG_X86_64 */
1580static pmd_t level2_kernel_pgt[PTRS_PER_PMD] __page_aligned_bss;
1581
1582static __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
1583 unsigned long max_pfn)
1584{
1585 pmd_t *kernel_pmd;
1586
1587 init_pg_tables_start = __pa(pgd);
1588 init_pg_tables_end = __pa(pgd) + xen_start_info->nr_pt_frames*PAGE_SIZE;
1589 max_pfn_mapped = PFN_DOWN(init_pg_tables_end + 512*1024);
1590
1591 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
1592 memcpy(level2_kernel_pgt, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD);
1593
1594 xen_map_identity_early(level2_kernel_pgt, max_pfn);
1595
1596 memcpy(swapper_pg_dir, pgd, sizeof(pgd_t) * PTRS_PER_PGD);
1597 set_pgd(&swapper_pg_dir[KERNEL_PGD_BOUNDARY],
1598 __pgd(__pa(level2_kernel_pgt) | _PAGE_PRESENT));
1599
1600 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1601 set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
1602 set_page_prot(empty_zero_page, PAGE_KERNEL_RO);
1603
1604 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1605
1606 xen_write_cr3(__pa(swapper_pg_dir));
1607
1608 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(swapper_pg_dir)));
1609
1610 return swapper_pg_dir;
1611}
1612#endif /* CONFIG_X86_64 */
1613
1614/* First C function to be called on Xen boot */ 872/* First C function to be called on Xen boot */
1615asmlinkage void __init xen_start_kernel(void) 873asmlinkage void __init xen_start_kernel(void)
1616{ 874{
@@ -1650,10 +908,18 @@ asmlinkage void __init xen_start_kernel(void)
1650 machine_ops = xen_machine_ops; 908 machine_ops = xen_machine_ops;
1651 909
1652#ifdef CONFIG_X86_64 910#ifdef CONFIG_X86_64
1653 /* Disable until direct per-cpu data access. */ 911 /*
1654 have_vcpu_info_placement = 0; 912 * Setup percpu state. We only need to do this for 64-bit
1655 x86_64_init_pda(); 913 * because 32-bit already has %fs set properly.
914 */
915 load_percpu_segment(0);
1656#endif 916#endif
917 /*
918 * The only reliable way to retain the initial address of the
919 * percpu gdt_page is to remember it here, so we can go and
920 * mark it RW later, when the initial percpu area is freed.
921 */
922 xen_initial_gdt = &per_cpu(gdt_page, 0);
1657 923
1658 xen_smp_init(); 924 xen_smp_init();
1659 925
diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c
index bb042608c602..cfd17799bd6d 100644
--- a/arch/x86/xen/irq.c
+++ b/arch/x86/xen/irq.c
@@ -19,27 +19,12 @@ void xen_force_evtchn_callback(void)
19 (void)HYPERVISOR_xen_version(0, NULL); 19 (void)HYPERVISOR_xen_version(0, NULL);
20} 20}
21 21
22static void __init __xen_init_IRQ(void)
23{
24 int i;
25
26 /* Create identity vector->irq map */
27 for(i = 0; i < NR_VECTORS; i++) {
28 int cpu;
29
30 for_each_possible_cpu(cpu)
31 per_cpu(vector_irq, cpu)[i] = i;
32 }
33
34 xen_init_IRQ();
35}
36
37static unsigned long xen_save_fl(void) 22static unsigned long xen_save_fl(void)
38{ 23{
39 struct vcpu_info *vcpu; 24 struct vcpu_info *vcpu;
40 unsigned long flags; 25 unsigned long flags;
41 26
42 vcpu = x86_read_percpu(xen_vcpu); 27 vcpu = percpu_read(xen_vcpu);
43 28
44 /* flag has opposite sense of mask */ 29 /* flag has opposite sense of mask */
45 flags = !vcpu->evtchn_upcall_mask; 30 flags = !vcpu->evtchn_upcall_mask;
@@ -50,6 +35,7 @@ static unsigned long xen_save_fl(void)
50 */ 35 */
51 return (-flags) & X86_EFLAGS_IF; 36 return (-flags) & X86_EFLAGS_IF;
52} 37}
38PV_CALLEE_SAVE_REGS_THUNK(xen_save_fl);
53 39
54static void xen_restore_fl(unsigned long flags) 40static void xen_restore_fl(unsigned long flags)
55{ 41{
@@ -62,7 +48,7 @@ static void xen_restore_fl(unsigned long flags)
62 make sure we're don't switch CPUs between getting the vcpu 48 make sure we're don't switch CPUs between getting the vcpu
63 pointer and updating the mask. */ 49 pointer and updating the mask. */
64 preempt_disable(); 50 preempt_disable();
65 vcpu = x86_read_percpu(xen_vcpu); 51 vcpu = percpu_read(xen_vcpu);
66 vcpu->evtchn_upcall_mask = flags; 52 vcpu->evtchn_upcall_mask = flags;
67 preempt_enable_no_resched(); 53 preempt_enable_no_resched();
68 54
@@ -76,6 +62,7 @@ static void xen_restore_fl(unsigned long flags)
76 xen_force_evtchn_callback(); 62 xen_force_evtchn_callback();
77 } 63 }
78} 64}
65PV_CALLEE_SAVE_REGS_THUNK(xen_restore_fl);
79 66
80static void xen_irq_disable(void) 67static void xen_irq_disable(void)
81{ 68{
@@ -83,9 +70,10 @@ static void xen_irq_disable(void)
83 make sure we're don't switch CPUs between getting the vcpu 70 make sure we're don't switch CPUs between getting the vcpu
84 pointer and updating the mask. */ 71 pointer and updating the mask. */
85 preempt_disable(); 72 preempt_disable();
86 x86_read_percpu(xen_vcpu)->evtchn_upcall_mask = 1; 73 percpu_read(xen_vcpu)->evtchn_upcall_mask = 1;
87 preempt_enable_no_resched(); 74 preempt_enable_no_resched();
88} 75}
76PV_CALLEE_SAVE_REGS_THUNK(xen_irq_disable);
89 77
90static void xen_irq_enable(void) 78static void xen_irq_enable(void)
91{ 79{
@@ -96,7 +84,7 @@ static void xen_irq_enable(void)
96 the caller is confused and is trying to re-enable interrupts 84 the caller is confused and is trying to re-enable interrupts
97 on an indeterminate processor. */ 85 on an indeterminate processor. */
98 86
99 vcpu = x86_read_percpu(xen_vcpu); 87 vcpu = percpu_read(xen_vcpu);
100 vcpu->evtchn_upcall_mask = 0; 88 vcpu->evtchn_upcall_mask = 0;
101 89
102 /* Doesn't matter if we get preempted here, because any 90 /* Doesn't matter if we get preempted here, because any
@@ -106,6 +94,7 @@ static void xen_irq_enable(void)
106 if (unlikely(vcpu->evtchn_upcall_pending)) 94 if (unlikely(vcpu->evtchn_upcall_pending))
107 xen_force_evtchn_callback(); 95 xen_force_evtchn_callback();
108} 96}
97PV_CALLEE_SAVE_REGS_THUNK(xen_irq_enable);
109 98
110static void xen_safe_halt(void) 99static void xen_safe_halt(void)
111{ 100{
@@ -123,11 +112,13 @@ static void xen_halt(void)
123} 112}
124 113
125static const struct pv_irq_ops xen_irq_ops __initdata = { 114static const struct pv_irq_ops xen_irq_ops __initdata = {
126 .init_IRQ = __xen_init_IRQ, 115 .init_IRQ = xen_init_IRQ,
127 .save_fl = xen_save_fl, 116
128 .restore_fl = xen_restore_fl, 117 .save_fl = PV_CALLEE_SAVE(xen_save_fl),
129 .irq_disable = xen_irq_disable, 118 .restore_fl = PV_CALLEE_SAVE(xen_restore_fl),
130 .irq_enable = xen_irq_enable, 119 .irq_disable = PV_CALLEE_SAVE(xen_irq_disable),
120 .irq_enable = PV_CALLEE_SAVE(xen_irq_enable),
121
131 .safe_halt = xen_safe_halt, 122 .safe_halt = xen_safe_halt,
132 .halt = xen_halt, 123 .halt = xen_halt,
133#ifdef CONFIG_X86_64 124#ifdef CONFIG_X86_64
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 503c240e26c7..d2e8ed1aff3d 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -47,6 +47,7 @@
47#include <asm/tlbflush.h> 47#include <asm/tlbflush.h>
48#include <asm/fixmap.h> 48#include <asm/fixmap.h>
49#include <asm/mmu_context.h> 49#include <asm/mmu_context.h>
50#include <asm/setup.h>
50#include <asm/paravirt.h> 51#include <asm/paravirt.h>
51#include <asm/linkage.h> 52#include <asm/linkage.h>
52 53
@@ -55,6 +56,8 @@
55 56
56#include <xen/page.h> 57#include <xen/page.h>
57#include <xen/interface/xen.h> 58#include <xen/interface/xen.h>
59#include <xen/interface/version.h>
60#include <xen/hvc-console.h>
58 61
59#include "multicalls.h" 62#include "multicalls.h"
60#include "mmu.h" 63#include "mmu.h"
@@ -114,6 +117,37 @@ static inline void check_zero(void)
114 117
115#endif /* CONFIG_XEN_DEBUG_FS */ 118#endif /* CONFIG_XEN_DEBUG_FS */
116 119
120
121/*
122 * Identity map, in addition to plain kernel map. This needs to be
123 * large enough to allocate page table pages to allocate the rest.
124 * Each page can map 2MB.
125 */
126static pte_t level1_ident_pgt[PTRS_PER_PTE * 4] __page_aligned_bss;
127
128#ifdef CONFIG_X86_64
129/* l3 pud for userspace vsyscall mapping */
130static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
131#endif /* CONFIG_X86_64 */
132
133/*
134 * Note about cr3 (pagetable base) values:
135 *
136 * xen_cr3 contains the current logical cr3 value; it contains the
137 * last set cr3. This may not be the current effective cr3, because
138 * its update may be being lazily deferred. However, a vcpu looking
139 * at its own cr3 can use this value knowing that it everything will
140 * be self-consistent.
141 *
142 * xen_current_cr3 contains the actual vcpu cr3; it is set once the
143 * hypercall to set the vcpu cr3 is complete (so it may be a little
144 * out of date, but it will never be set early). If one vcpu is
145 * looking at another vcpu's cr3 value, it should use this variable.
146 */
147DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */
148DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */
149
150
117/* 151/*
118 * Just beyond the highest usermode address. STACK_TOP_MAX has a 152 * Just beyond the highest usermode address. STACK_TOP_MAX has a
119 * redzone above it, so round it up to a PGD boundary. 153 * redzone above it, so round it up to a PGD boundary.
@@ -458,28 +492,33 @@ pteval_t xen_pte_val(pte_t pte)
458{ 492{
459 return pte_mfn_to_pfn(pte.pte); 493 return pte_mfn_to_pfn(pte.pte);
460} 494}
495PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
461 496
462pgdval_t xen_pgd_val(pgd_t pgd) 497pgdval_t xen_pgd_val(pgd_t pgd)
463{ 498{
464 return pte_mfn_to_pfn(pgd.pgd); 499 return pte_mfn_to_pfn(pgd.pgd);
465} 500}
501PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
466 502
467pte_t xen_make_pte(pteval_t pte) 503pte_t xen_make_pte(pteval_t pte)
468{ 504{
469 pte = pte_pfn_to_mfn(pte); 505 pte = pte_pfn_to_mfn(pte);
470 return native_make_pte(pte); 506 return native_make_pte(pte);
471} 507}
508PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
472 509
473pgd_t xen_make_pgd(pgdval_t pgd) 510pgd_t xen_make_pgd(pgdval_t pgd)
474{ 511{
475 pgd = pte_pfn_to_mfn(pgd); 512 pgd = pte_pfn_to_mfn(pgd);
476 return native_make_pgd(pgd); 513 return native_make_pgd(pgd);
477} 514}
515PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
478 516
479pmdval_t xen_pmd_val(pmd_t pmd) 517pmdval_t xen_pmd_val(pmd_t pmd)
480{ 518{
481 return pte_mfn_to_pfn(pmd.pmd); 519 return pte_mfn_to_pfn(pmd.pmd);
482} 520}
521PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val);
483 522
484void xen_set_pud_hyper(pud_t *ptr, pud_t val) 523void xen_set_pud_hyper(pud_t *ptr, pud_t val)
485{ 524{
@@ -556,12 +595,14 @@ pmd_t xen_make_pmd(pmdval_t pmd)
556 pmd = pte_pfn_to_mfn(pmd); 595 pmd = pte_pfn_to_mfn(pmd);
557 return native_make_pmd(pmd); 596 return native_make_pmd(pmd);
558} 597}
598PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
559 599
560#if PAGETABLE_LEVELS == 4 600#if PAGETABLE_LEVELS == 4
561pudval_t xen_pud_val(pud_t pud) 601pudval_t xen_pud_val(pud_t pud)
562{ 602{
563 return pte_mfn_to_pfn(pud.pud); 603 return pte_mfn_to_pfn(pud.pud);
564} 604}
605PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
565 606
566pud_t xen_make_pud(pudval_t pud) 607pud_t xen_make_pud(pudval_t pud)
567{ 608{
@@ -569,6 +610,7 @@ pud_t xen_make_pud(pudval_t pud)
569 610
570 return native_make_pud(pud); 611 return native_make_pud(pud);
571} 612}
613PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud);
572 614
573pgd_t *xen_get_user_pgd(pgd_t *pgd) 615pgd_t *xen_get_user_pgd(pgd_t *pgd)
574{ 616{
@@ -1063,18 +1105,14 @@ static void drop_other_mm_ref(void *info)
1063 struct mm_struct *mm = info; 1105 struct mm_struct *mm = info;
1064 struct mm_struct *active_mm; 1106 struct mm_struct *active_mm;
1065 1107
1066#ifdef CONFIG_X86_64 1108 active_mm = percpu_read(cpu_tlbstate.active_mm);
1067 active_mm = read_pda(active_mm);
1068#else
1069 active_mm = __get_cpu_var(cpu_tlbstate).active_mm;
1070#endif
1071 1109
1072 if (active_mm == mm) 1110 if (active_mm == mm)
1073 leave_mm(smp_processor_id()); 1111 leave_mm(smp_processor_id());
1074 1112
1075 /* If this cpu still has a stale cr3 reference, then make sure 1113 /* If this cpu still has a stale cr3 reference, then make sure
1076 it has been flushed. */ 1114 it has been flushed. */
1077 if (x86_read_percpu(xen_current_cr3) == __pa(mm->pgd)) { 1115 if (percpu_read(xen_current_cr3) == __pa(mm->pgd)) {
1078 load_cr3(swapper_pg_dir); 1116 load_cr3(swapper_pg_dir);
1079 arch_flush_lazy_cpu_mode(); 1117 arch_flush_lazy_cpu_mode();
1080 } 1118 }
@@ -1156,6 +1194,709 @@ void xen_exit_mmap(struct mm_struct *mm)
1156 spin_unlock(&mm->page_table_lock); 1194 spin_unlock(&mm->page_table_lock);
1157} 1195}
1158 1196
1197static __init void xen_pagetable_setup_start(pgd_t *base)
1198{
1199}
1200
1201static __init void xen_pagetable_setup_done(pgd_t *base)
1202{
1203 xen_setup_shared_info();
1204}
1205
1206static void xen_write_cr2(unsigned long cr2)
1207{
1208 percpu_read(xen_vcpu)->arch.cr2 = cr2;
1209}
1210
1211static unsigned long xen_read_cr2(void)
1212{
1213 return percpu_read(xen_vcpu)->arch.cr2;
1214}
1215
1216unsigned long xen_read_cr2_direct(void)
1217{
1218 return percpu_read(xen_vcpu_info.arch.cr2);
1219}
1220
1221static void xen_flush_tlb(void)
1222{
1223 struct mmuext_op *op;
1224 struct multicall_space mcs;
1225
1226 preempt_disable();
1227
1228 mcs = xen_mc_entry(sizeof(*op));
1229
1230 op = mcs.args;
1231 op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
1232 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1233
1234 xen_mc_issue(PARAVIRT_LAZY_MMU);
1235
1236 preempt_enable();
1237}
1238
1239static void xen_flush_tlb_single(unsigned long addr)
1240{
1241 struct mmuext_op *op;
1242 struct multicall_space mcs;
1243
1244 preempt_disable();
1245
1246 mcs = xen_mc_entry(sizeof(*op));
1247 op = mcs.args;
1248 op->cmd = MMUEXT_INVLPG_LOCAL;
1249 op->arg1.linear_addr = addr & PAGE_MASK;
1250 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1251
1252 xen_mc_issue(PARAVIRT_LAZY_MMU);
1253
1254 preempt_enable();
1255}
1256
1257static void xen_flush_tlb_others(const struct cpumask *cpus,
1258 struct mm_struct *mm, unsigned long va)
1259{
1260 struct {
1261 struct mmuext_op op;
1262 DECLARE_BITMAP(mask, NR_CPUS);
1263 } *args;
1264 struct multicall_space mcs;
1265
1266 BUG_ON(cpumask_empty(cpus));
1267 BUG_ON(!mm);
1268
1269 mcs = xen_mc_entry(sizeof(*args));
1270 args = mcs.args;
1271 args->op.arg2.vcpumask = to_cpumask(args->mask);
1272
1273 /* Remove us, and any offline CPUS. */
1274 cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
1275 cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
1276 if (unlikely(cpumask_empty(to_cpumask(args->mask))))
1277 goto issue;
1278
1279 if (va == TLB_FLUSH_ALL) {
1280 args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
1281 } else {
1282 args->op.cmd = MMUEXT_INVLPG_MULTI;
1283 args->op.arg1.linear_addr = va;
1284 }
1285
1286 MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
1287
1288issue:
1289 xen_mc_issue(PARAVIRT_LAZY_MMU);
1290}
1291
1292static unsigned long xen_read_cr3(void)
1293{
1294 return percpu_read(xen_cr3);
1295}
1296
1297static void set_current_cr3(void *v)
1298{
1299 percpu_write(xen_current_cr3, (unsigned long)v);
1300}
1301
1302static void __xen_write_cr3(bool kernel, unsigned long cr3)
1303{
1304 struct mmuext_op *op;
1305 struct multicall_space mcs;
1306 unsigned long mfn;
1307
1308 if (cr3)
1309 mfn = pfn_to_mfn(PFN_DOWN(cr3));
1310 else
1311 mfn = 0;
1312
1313 WARN_ON(mfn == 0 && kernel);
1314
1315 mcs = __xen_mc_entry(sizeof(*op));
1316
1317 op = mcs.args;
1318 op->cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
1319 op->arg1.mfn = mfn;
1320
1321 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1322
1323 if (kernel) {
1324 percpu_write(xen_cr3, cr3);
1325
1326 /* Update xen_current_cr3 once the batch has actually
1327 been submitted. */
1328 xen_mc_callback(set_current_cr3, (void *)cr3);
1329 }
1330}
1331
1332static void xen_write_cr3(unsigned long cr3)
1333{
1334 BUG_ON(preemptible());
1335
1336 xen_mc_batch(); /* disables interrupts */
1337
1338 /* Update while interrupts are disabled, so its atomic with
1339 respect to ipis */
1340 percpu_write(xen_cr3, cr3);
1341
1342 __xen_write_cr3(true, cr3);
1343
1344#ifdef CONFIG_X86_64
1345 {
1346 pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
1347 if (user_pgd)
1348 __xen_write_cr3(false, __pa(user_pgd));
1349 else
1350 __xen_write_cr3(false, 0);
1351 }
1352#endif
1353
1354 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
1355}
1356
1357static int xen_pgd_alloc(struct mm_struct *mm)
1358{
1359 pgd_t *pgd = mm->pgd;
1360 int ret = 0;
1361
1362 BUG_ON(PagePinned(virt_to_page(pgd)));
1363
1364#ifdef CONFIG_X86_64
1365 {
1366 struct page *page = virt_to_page(pgd);
1367 pgd_t *user_pgd;
1368
1369 BUG_ON(page->private != 0);
1370
1371 ret = -ENOMEM;
1372
1373 user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
1374 page->private = (unsigned long)user_pgd;
1375
1376 if (user_pgd != NULL) {
1377 user_pgd[pgd_index(VSYSCALL_START)] =
1378 __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
1379 ret = 0;
1380 }
1381
1382 BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
1383 }
1384#endif
1385
1386 return ret;
1387}
1388
1389static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1390{
1391#ifdef CONFIG_X86_64
1392 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1393
1394 if (user_pgd)
1395 free_page((unsigned long)user_pgd);
1396#endif
1397}
1398
1399#ifdef CONFIG_HIGHPTE
1400static void *xen_kmap_atomic_pte(struct page *page, enum km_type type)
1401{
1402 pgprot_t prot = PAGE_KERNEL;
1403
1404 if (PagePinned(page))
1405 prot = PAGE_KERNEL_RO;
1406
1407 if (0 && PageHighMem(page))
1408 printk("mapping highpte %lx type %d prot %s\n",
1409 page_to_pfn(page), type,
1410 (unsigned long)pgprot_val(prot) & _PAGE_RW ? "WRITE" : "READ");
1411
1412 return kmap_atomic_prot(page, type, prot);
1413}
1414#endif
1415
1416#ifdef CONFIG_X86_32
1417static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
1418{
1419 /* If there's an existing pte, then don't allow _PAGE_RW to be set */
1420 if (pte_val_ma(*ptep) & _PAGE_PRESENT)
1421 pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
1422 pte_val_ma(pte));
1423
1424 return pte;
1425}
1426
1427/* Init-time set_pte while constructing initial pagetables, which
1428 doesn't allow RO pagetable pages to be remapped RW */
1429static __init void xen_set_pte_init(pte_t *ptep, pte_t pte)
1430{
1431 pte = mask_rw_pte(ptep, pte);
1432
1433 xen_set_pte(ptep, pte);
1434}
1435#endif
1436
1437/* Early in boot, while setting up the initial pagetable, assume
1438 everything is pinned. */
1439static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
1440{
1441#ifdef CONFIG_FLATMEM
1442 BUG_ON(mem_map); /* should only be used early */
1443#endif
1444 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
1445}
1446
1447/* Early release_pte assumes that all pts are pinned, since there's
1448 only init_mm and anything attached to that is pinned. */
1449static void xen_release_pte_init(unsigned long pfn)
1450{
1451 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1452}
1453
1454static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1455{
1456 struct mmuext_op op;
1457 op.cmd = cmd;
1458 op.arg1.mfn = pfn_to_mfn(pfn);
1459 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
1460 BUG();
1461}
1462
1463/* This needs to make sure the new pte page is pinned iff its being
1464 attached to a pinned pagetable. */
1465static void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned level)
1466{
1467 struct page *page = pfn_to_page(pfn);
1468
1469 if (PagePinned(virt_to_page(mm->pgd))) {
1470 SetPagePinned(page);
1471
1472 vm_unmap_aliases();
1473 if (!PageHighMem(page)) {
1474 make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn)));
1475 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
1476 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1477 } else {
1478 /* make sure there are no stray mappings of
1479 this page */
1480 kmap_flush_unused();
1481 }
1482 }
1483}
1484
1485static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
1486{
1487 xen_alloc_ptpage(mm, pfn, PT_PTE);
1488}
1489
1490static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
1491{
1492 xen_alloc_ptpage(mm, pfn, PT_PMD);
1493}
1494
1495/* This should never happen until we're OK to use struct page */
1496static void xen_release_ptpage(unsigned long pfn, unsigned level)
1497{
1498 struct page *page = pfn_to_page(pfn);
1499
1500 if (PagePinned(page)) {
1501 if (!PageHighMem(page)) {
1502 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
1503 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1504 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1505 }
1506 ClearPagePinned(page);
1507 }
1508}
1509
1510static void xen_release_pte(unsigned long pfn)
1511{
1512 xen_release_ptpage(pfn, PT_PTE);
1513}
1514
1515static void xen_release_pmd(unsigned long pfn)
1516{
1517 xen_release_ptpage(pfn, PT_PMD);
1518}
1519
1520#if PAGETABLE_LEVELS == 4
1521static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
1522{
1523 xen_alloc_ptpage(mm, pfn, PT_PUD);
1524}
1525
1526static void xen_release_pud(unsigned long pfn)
1527{
1528 xen_release_ptpage(pfn, PT_PUD);
1529}
1530#endif
1531
1532void __init xen_reserve_top(void)
1533{
1534#ifdef CONFIG_X86_32
1535 unsigned long top = HYPERVISOR_VIRT_START;
1536 struct xen_platform_parameters pp;
1537
1538 if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
1539 top = pp.virt_start;
1540
1541 reserve_top_address(-top);
1542#endif /* CONFIG_X86_32 */
1543}
1544
1545/*
1546 * Like __va(), but returns address in the kernel mapping (which is
1547 * all we have until the physical memory mapping has been set up.
1548 */
1549static void *__ka(phys_addr_t paddr)
1550{
1551#ifdef CONFIG_X86_64
1552 return (void *)(paddr + __START_KERNEL_map);
1553#else
1554 return __va(paddr);
1555#endif
1556}
1557
1558/* Convert a machine address to physical address */
1559static unsigned long m2p(phys_addr_t maddr)
1560{
1561 phys_addr_t paddr;
1562
1563 maddr &= PTE_PFN_MASK;
1564 paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
1565
1566 return paddr;
1567}
1568
1569/* Convert a machine address to kernel virtual */
1570static void *m2v(phys_addr_t maddr)
1571{
1572 return __ka(m2p(maddr));
1573}
1574
1575static void set_page_prot(void *addr, pgprot_t prot)
1576{
1577 unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
1578 pte_t pte = pfn_pte(pfn, prot);
1579
1580 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0))
1581 BUG();
1582}
1583
1584static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
1585{
1586 unsigned pmdidx, pteidx;
1587 unsigned ident_pte;
1588 unsigned long pfn;
1589
1590 ident_pte = 0;
1591 pfn = 0;
1592 for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
1593 pte_t *pte_page;
1594
1595 /* Reuse or allocate a page of ptes */
1596 if (pmd_present(pmd[pmdidx]))
1597 pte_page = m2v(pmd[pmdidx].pmd);
1598 else {
1599 /* Check for free pte pages */
1600 if (ident_pte == ARRAY_SIZE(level1_ident_pgt))
1601 break;
1602
1603 pte_page = &level1_ident_pgt[ident_pte];
1604 ident_pte += PTRS_PER_PTE;
1605
1606 pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE);
1607 }
1608
1609 /* Install mappings */
1610 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
1611 pte_t pte;
1612
1613 if (pfn > max_pfn_mapped)
1614 max_pfn_mapped = pfn;
1615
1616 if (!pte_none(pte_page[pteidx]))
1617 continue;
1618
1619 pte = pfn_pte(pfn, PAGE_KERNEL_EXEC);
1620 pte_page[pteidx] = pte;
1621 }
1622 }
1623
1624 for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
1625 set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO);
1626
1627 set_page_prot(pmd, PAGE_KERNEL_RO);
1628}
1629
1630#ifdef CONFIG_X86_64
1631static void convert_pfn_mfn(void *v)
1632{
1633 pte_t *pte = v;
1634 int i;
1635
1636 /* All levels are converted the same way, so just treat them
1637 as ptes. */
1638 for (i = 0; i < PTRS_PER_PTE; i++)
1639 pte[i] = xen_make_pte(pte[i].pte);
1640}
1641
1642/*
1643 * Set up the inital kernel pagetable.
1644 *
1645 * We can construct this by grafting the Xen provided pagetable into
1646 * head_64.S's preconstructed pagetables. We copy the Xen L2's into
1647 * level2_ident_pgt, level2_kernel_pgt and level2_fixmap_pgt. This
1648 * means that only the kernel has a physical mapping to start with -
1649 * but that's enough to get __va working. We need to fill in the rest
1650 * of the physical mapping once some sort of allocator has been set
1651 * up.
1652 */
1653__init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
1654 unsigned long max_pfn)
1655{
1656 pud_t *l3;
1657 pmd_t *l2;
1658
1659 /* Zap identity mapping */
1660 init_level4_pgt[0] = __pgd(0);
1661
1662 /* Pre-constructed entries are in pfn, so convert to mfn */
1663 convert_pfn_mfn(init_level4_pgt);
1664 convert_pfn_mfn(level3_ident_pgt);
1665 convert_pfn_mfn(level3_kernel_pgt);
1666
1667 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
1668 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
1669
1670 memcpy(level2_ident_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1671 memcpy(level2_kernel_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1672
1673 l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd);
1674 l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud);
1675 memcpy(level2_fixmap_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1676
1677 /* Set up identity map */
1678 xen_map_identity_early(level2_ident_pgt, max_pfn);
1679
1680 /* Make pagetable pieces RO */
1681 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
1682 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
1683 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
1684 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
1685 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1686 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
1687
1688 /* Pin down new L4 */
1689 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
1690 PFN_DOWN(__pa_symbol(init_level4_pgt)));
1691
1692 /* Unpin Xen-provided one */
1693 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1694
1695 /* Switch over */
1696 pgd = init_level4_pgt;
1697
1698 /*
1699 * At this stage there can be no user pgd, and no page
1700 * structure to attach it to, so make sure we just set kernel
1701 * pgd.
1702 */
1703 xen_mc_batch();
1704 __xen_write_cr3(true, __pa(pgd));
1705 xen_mc_issue(PARAVIRT_LAZY_CPU);
1706
1707 reserve_early(__pa(xen_start_info->pt_base),
1708 __pa(xen_start_info->pt_base +
1709 xen_start_info->nr_pt_frames * PAGE_SIZE),
1710 "XEN PAGETABLES");
1711
1712 return pgd;
1713}
1714#else /* !CONFIG_X86_64 */
1715static pmd_t level2_kernel_pgt[PTRS_PER_PMD] __page_aligned_bss;
1716
1717__init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
1718 unsigned long max_pfn)
1719{
1720 pmd_t *kernel_pmd;
1721
1722 init_pg_tables_start = __pa(pgd);
1723 init_pg_tables_end = __pa(pgd) + xen_start_info->nr_pt_frames*PAGE_SIZE;
1724 max_pfn_mapped = PFN_DOWN(init_pg_tables_end + 512*1024);
1725
1726 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
1727 memcpy(level2_kernel_pgt, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD);
1728
1729 xen_map_identity_early(level2_kernel_pgt, max_pfn);
1730
1731 memcpy(swapper_pg_dir, pgd, sizeof(pgd_t) * PTRS_PER_PGD);
1732 set_pgd(&swapper_pg_dir[KERNEL_PGD_BOUNDARY],
1733 __pgd(__pa(level2_kernel_pgt) | _PAGE_PRESENT));
1734
1735 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1736 set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
1737 set_page_prot(empty_zero_page, PAGE_KERNEL_RO);
1738
1739 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1740
1741 xen_write_cr3(__pa(swapper_pg_dir));
1742
1743 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(swapper_pg_dir)));
1744
1745 return swapper_pg_dir;
1746}
1747#endif /* CONFIG_X86_64 */
1748
1749static void xen_set_fixmap(unsigned idx, unsigned long phys, pgprot_t prot)
1750{
1751 pte_t pte;
1752
1753 phys >>= PAGE_SHIFT;
1754
1755 switch (idx) {
1756 case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
1757#ifdef CONFIG_X86_F00F_BUG
1758 case FIX_F00F_IDT:
1759#endif
1760#ifdef CONFIG_X86_32
1761 case FIX_WP_TEST:
1762 case FIX_VDSO:
1763# ifdef CONFIG_HIGHMEM
1764 case FIX_KMAP_BEGIN ... FIX_KMAP_END:
1765# endif
1766#else
1767 case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE:
1768#endif
1769#ifdef CONFIG_X86_LOCAL_APIC
1770 case FIX_APIC_BASE: /* maps dummy local APIC */
1771#endif
1772 pte = pfn_pte(phys, prot);
1773 break;
1774
1775 default:
1776 pte = mfn_pte(phys, prot);
1777 break;
1778 }
1779
1780 __native_set_fixmap(idx, pte);
1781
1782#ifdef CONFIG_X86_64
1783 /* Replicate changes to map the vsyscall page into the user
1784 pagetable vsyscall mapping. */
1785 if (idx >= VSYSCALL_LAST_PAGE && idx <= VSYSCALL_FIRST_PAGE) {
1786 unsigned long vaddr = __fix_to_virt(idx);
1787 set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
1788 }
1789#endif
1790}
1791
1792__init void xen_post_allocator_init(void)
1793{
1794 pv_mmu_ops.set_pte = xen_set_pte;
1795 pv_mmu_ops.set_pmd = xen_set_pmd;
1796 pv_mmu_ops.set_pud = xen_set_pud;
1797#if PAGETABLE_LEVELS == 4
1798 pv_mmu_ops.set_pgd = xen_set_pgd;
1799#endif
1800
1801 /* This will work as long as patching hasn't happened yet
1802 (which it hasn't) */
1803 pv_mmu_ops.alloc_pte = xen_alloc_pte;
1804 pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
1805 pv_mmu_ops.release_pte = xen_release_pte;
1806 pv_mmu_ops.release_pmd = xen_release_pmd;
1807#if PAGETABLE_LEVELS == 4
1808 pv_mmu_ops.alloc_pud = xen_alloc_pud;
1809 pv_mmu_ops.release_pud = xen_release_pud;
1810#endif
1811
1812#ifdef CONFIG_X86_64
1813 SetPagePinned(virt_to_page(level3_user_vsyscall));
1814#endif
1815 xen_mark_init_mm_pinned();
1816}
1817
1818
1819const struct pv_mmu_ops xen_mmu_ops __initdata = {
1820 .pagetable_setup_start = xen_pagetable_setup_start,
1821 .pagetable_setup_done = xen_pagetable_setup_done,
1822
1823 .read_cr2 = xen_read_cr2,
1824 .write_cr2 = xen_write_cr2,
1825
1826 .read_cr3 = xen_read_cr3,
1827 .write_cr3 = xen_write_cr3,
1828
1829 .flush_tlb_user = xen_flush_tlb,
1830 .flush_tlb_kernel = xen_flush_tlb,
1831 .flush_tlb_single = xen_flush_tlb_single,
1832 .flush_tlb_others = xen_flush_tlb_others,
1833
1834 .pte_update = paravirt_nop,
1835 .pte_update_defer = paravirt_nop,
1836
1837 .pgd_alloc = xen_pgd_alloc,
1838 .pgd_free = xen_pgd_free,
1839
1840 .alloc_pte = xen_alloc_pte_init,
1841 .release_pte = xen_release_pte_init,
1842 .alloc_pmd = xen_alloc_pte_init,
1843 .alloc_pmd_clone = paravirt_nop,
1844 .release_pmd = xen_release_pte_init,
1845
1846#ifdef CONFIG_HIGHPTE
1847 .kmap_atomic_pte = xen_kmap_atomic_pte,
1848#endif
1849
1850#ifdef CONFIG_X86_64
1851 .set_pte = xen_set_pte,
1852#else
1853 .set_pte = xen_set_pte_init,
1854#endif
1855 .set_pte_at = xen_set_pte_at,
1856 .set_pmd = xen_set_pmd_hyper,
1857
1858 .ptep_modify_prot_start = __ptep_modify_prot_start,
1859 .ptep_modify_prot_commit = __ptep_modify_prot_commit,
1860
1861 .pte_val = PV_CALLEE_SAVE(xen_pte_val),
1862 .pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
1863
1864 .make_pte = PV_CALLEE_SAVE(xen_make_pte),
1865 .make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
1866
1867#ifdef CONFIG_X86_PAE
1868 .set_pte_atomic = xen_set_pte_atomic,
1869 .set_pte_present = xen_set_pte_at,
1870 .pte_clear = xen_pte_clear,
1871 .pmd_clear = xen_pmd_clear,
1872#endif /* CONFIG_X86_PAE */
1873 .set_pud = xen_set_pud_hyper,
1874
1875 .make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
1876 .pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
1877
1878#if PAGETABLE_LEVELS == 4
1879 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
1880 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
1881 .set_pgd = xen_set_pgd_hyper,
1882
1883 .alloc_pud = xen_alloc_pte_init,
1884 .release_pud = xen_release_pte_init,
1885#endif /* PAGETABLE_LEVELS == 4 */
1886
1887 .activate_mm = xen_activate_mm,
1888 .dup_mmap = xen_dup_mmap,
1889 .exit_mmap = xen_exit_mmap,
1890
1891 .lazy_mode = {
1892 .enter = paravirt_enter_lazy_mmu,
1893 .leave = xen_leave_lazy,
1894 },
1895
1896 .set_fixmap = xen_set_fixmap,
1897};
1898
1899
1159#ifdef CONFIG_XEN_DEBUG_FS 1900#ifdef CONFIG_XEN_DEBUG_FS
1160 1901
1161static struct dentry *d_mmu_debug; 1902static struct dentry *d_mmu_debug;
diff --git a/arch/x86/xen/mmu.h b/arch/x86/xen/mmu.h
index 98d71659da5a..24d1b44a337d 100644
--- a/arch/x86/xen/mmu.h
+++ b/arch/x86/xen/mmu.h
@@ -54,4 +54,7 @@ pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, pte_t
54void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, 54void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
55 pte_t *ptep, pte_t pte); 55 pte_t *ptep, pte_t pte);
56 56
57unsigned long xen_read_cr2_direct(void);
58
59extern const struct pv_mmu_ops xen_mmu_ops;
57#endif /* _XEN_MMU_H */ 60#endif /* _XEN_MMU_H */
diff --git a/arch/x86/xen/multicalls.h b/arch/x86/xen/multicalls.h
index fa3e10725d98..9e565da5d1f7 100644
--- a/arch/x86/xen/multicalls.h
+++ b/arch/x86/xen/multicalls.h
@@ -41,7 +41,7 @@ static inline void xen_mc_issue(unsigned mode)
41 xen_mc_flush(); 41 xen_mc_flush();
42 42
43 /* restore flags saved in xen_mc_batch */ 43 /* restore flags saved in xen_mc_batch */
44 local_irq_restore(x86_read_percpu(xen_mc_irq_flags)); 44 local_irq_restore(percpu_read(xen_mc_irq_flags));
45} 45}
46 46
47/* Set up a callback to be called when the current batch is flushed */ 47/* Set up a callback to be called when the current batch is flushed */
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index c44e2069c7c7..035582ae815d 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -50,11 +50,7 @@ static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
50 */ 50 */
51static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id) 51static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
52{ 52{
53#ifdef CONFIG_X86_32 53 inc_irq_stat(irq_resched_count);
54 __get_cpu_var(irq_stat).irq_resched_count++;
55#else
56 add_pda(irq_resched_count, 1);
57#endif
58 54
59 return IRQ_HANDLED; 55 return IRQ_HANDLED;
60} 56}
@@ -78,7 +74,7 @@ static __cpuinit void cpu_bringup(void)
78 xen_setup_cpu_clockevents(); 74 xen_setup_cpu_clockevents();
79 75
80 cpu_set(cpu, cpu_online_map); 76 cpu_set(cpu, cpu_online_map);
81 x86_write_percpu(cpu_state, CPU_ONLINE); 77 percpu_write(cpu_state, CPU_ONLINE);
82 wmb(); 78 wmb();
83 79
84 /* We can take interrupts now: we're officially "up". */ 80 /* We can take interrupts now: we're officially "up". */
@@ -174,7 +170,7 @@ static void __init xen_smp_prepare_boot_cpu(void)
174 170
175 /* We've switched to the "real" per-cpu gdt, so make sure the 171 /* We've switched to the "real" per-cpu gdt, so make sure the
176 old memory can be recycled */ 172 old memory can be recycled */
177 make_lowmem_page_readwrite(&per_cpu_var(gdt_page)); 173 make_lowmem_page_readwrite(xen_initial_gdt);
178 174
179 xen_setup_vcpu_info_placement(); 175 xen_setup_vcpu_info_placement();
180} 176}
@@ -239,6 +235,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
239 ctxt->user_regs.ss = __KERNEL_DS; 235 ctxt->user_regs.ss = __KERNEL_DS;
240#ifdef CONFIG_X86_32 236#ifdef CONFIG_X86_32
241 ctxt->user_regs.fs = __KERNEL_PERCPU; 237 ctxt->user_regs.fs = __KERNEL_PERCPU;
238#else
239 ctxt->gs_base_kernel = per_cpu_offset(cpu);
242#endif 240#endif
243 ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle; 241 ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
244 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */ 242 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
@@ -283,23 +281,14 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
283 struct task_struct *idle = idle_task(cpu); 281 struct task_struct *idle = idle_task(cpu);
284 int rc; 282 int rc;
285 283
286#ifdef CONFIG_X86_64
287 /* Allocate node local memory for AP pdas */
288 WARN_ON(cpu == 0);
289 if (cpu > 0) {
290 rc = get_local_pda(cpu);
291 if (rc)
292 return rc;
293 }
294#endif
295
296#ifdef CONFIG_X86_32
297 init_gdt(cpu);
298 per_cpu(current_task, cpu) = idle; 284 per_cpu(current_task, cpu) = idle;
285#ifdef CONFIG_X86_32
299 irq_ctx_init(cpu); 286 irq_ctx_init(cpu);
300#else 287#else
301 cpu_pda(cpu)->pcurrent = idle;
302 clear_tsk_thread_flag(idle, TIF_FORK); 288 clear_tsk_thread_flag(idle, TIF_FORK);
289 per_cpu(kernel_stack, cpu) =
290 (unsigned long)task_stack_page(idle) -
291 KERNEL_STACK_OFFSET + THREAD_SIZE;
303#endif 292#endif
304 xen_setup_timer(cpu); 293 xen_setup_timer(cpu);
305 xen_init_lock_cpu(cpu); 294 xen_init_lock_cpu(cpu);
@@ -445,11 +434,7 @@ static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
445{ 434{
446 irq_enter(); 435 irq_enter();
447 generic_smp_call_function_interrupt(); 436 generic_smp_call_function_interrupt();
448#ifdef CONFIG_X86_32 437 inc_irq_stat(irq_call_count);
449 __get_cpu_var(irq_stat).irq_call_count++;
450#else
451 add_pda(irq_call_count, 1);
452#endif
453 irq_exit(); 438 irq_exit();
454 439
455 return IRQ_HANDLED; 440 return IRQ_HANDLED;
@@ -459,11 +444,7 @@ static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
459{ 444{
460 irq_enter(); 445 irq_enter();
461 generic_smp_call_function_single_interrupt(); 446 generic_smp_call_function_single_interrupt();
462#ifdef CONFIG_X86_32 447 inc_irq_stat(irq_call_count);
463 __get_cpu_var(irq_stat).irq_call_count++;
464#else
465 add_pda(irq_call_count, 1);
466#endif
467 irq_exit(); 448 irq_exit();
468 449
469 return IRQ_HANDLED; 450 return IRQ_HANDLED;
diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c
index 212ffe012b76..95be7b434724 100644
--- a/arch/x86/xen/suspend.c
+++ b/arch/x86/xen/suspend.c
@@ -6,6 +6,7 @@
6 6
7#include <asm/xen/hypercall.h> 7#include <asm/xen/hypercall.h>
8#include <asm/xen/page.h> 8#include <asm/xen/page.h>
9#include <asm/fixmap.h>
9 10
10#include "xen-ops.h" 11#include "xen-ops.h"
11#include "mmu.h" 12#include "mmu.h"
diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S
new file mode 100644
index 000000000000..4c6f96799131
--- /dev/null
+++ b/arch/x86/xen/xen-asm.S
@@ -0,0 +1,140 @@
1/*
2 Asm versions of Xen pv-ops, suitable for either direct use or inlining.
3 The inline versions are the same as the direct-use versions, with the
4 pre- and post-amble chopped off.
5
6 This code is encoded for size rather than absolute efficiency,
7 with a view to being able to inline as much as possible.
8
9 We only bother with direct forms (ie, vcpu in percpu data) of
10 the operations here; the indirect forms are better handled in
11 C, since they're generally too large to inline anyway.
12 */
13
14#include <asm/asm-offsets.h>
15#include <asm/percpu.h>
16#include <asm/processor-flags.h>
17
18#include "xen-asm.h"
19
20/*
21 Enable events. This clears the event mask and tests the pending
22 event status with one and operation. If there are pending
23 events, then enter the hypervisor to get them handled.
24 */
25ENTRY(xen_irq_enable_direct)
26 /* Unmask events */
27 movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
28
29 /* Preempt here doesn't matter because that will deal with
30 any pending interrupts. The pending check may end up being
31 run on the wrong CPU, but that doesn't hurt. */
32
33 /* Test for pending */
34 testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
35 jz 1f
36
372: call check_events
381:
39ENDPATCH(xen_irq_enable_direct)
40 ret
41 ENDPROC(xen_irq_enable_direct)
42 RELOC(xen_irq_enable_direct, 2b+1)
43
44
45/*
46 Disabling events is simply a matter of making the event mask
47 non-zero.
48 */
49ENTRY(xen_irq_disable_direct)
50 movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
51ENDPATCH(xen_irq_disable_direct)
52 ret
53 ENDPROC(xen_irq_disable_direct)
54 RELOC(xen_irq_disable_direct, 0)
55
56/*
57 (xen_)save_fl is used to get the current interrupt enable status.
58 Callers expect the status to be in X86_EFLAGS_IF, and other bits
59 may be set in the return value. We take advantage of this by
60 making sure that X86_EFLAGS_IF has the right value (and other bits
61 in that byte are 0), but other bits in the return value are
62 undefined. We need to toggle the state of the bit, because
63 Xen and x86 use opposite senses (mask vs enable).
64 */
65ENTRY(xen_save_fl_direct)
66 testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
67 setz %ah
68 addb %ah,%ah
69ENDPATCH(xen_save_fl_direct)
70 ret
71 ENDPROC(xen_save_fl_direct)
72 RELOC(xen_save_fl_direct, 0)
73
74
75/*
76 In principle the caller should be passing us a value return
77 from xen_save_fl_direct, but for robustness sake we test only
78 the X86_EFLAGS_IF flag rather than the whole byte. After
79 setting the interrupt mask state, it checks for unmasked
80 pending events and enters the hypervisor to get them delivered
81 if so.
82 */
83ENTRY(xen_restore_fl_direct)
84#ifdef CONFIG_X86_64
85 testw $X86_EFLAGS_IF, %di
86#else
87 testb $X86_EFLAGS_IF>>8, %ah
88#endif
89 setz PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
90 /* Preempt here doesn't matter because that will deal with
91 any pending interrupts. The pending check may end up being
92 run on the wrong CPU, but that doesn't hurt. */
93
94 /* check for unmasked and pending */
95 cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
96 jz 1f
972: call check_events
981:
99ENDPATCH(xen_restore_fl_direct)
100 ret
101 ENDPROC(xen_restore_fl_direct)
102 RELOC(xen_restore_fl_direct, 2b+1)
103
104
105/*
106 Force an event check by making a hypercall,
107 but preserve regs before making the call.
108 */
109check_events:
110#ifdef CONFIG_X86_32
111 push %eax
112 push %ecx
113 push %edx
114 call xen_force_evtchn_callback
115 pop %edx
116 pop %ecx
117 pop %eax
118#else
119 push %rax
120 push %rcx
121 push %rdx
122 push %rsi
123 push %rdi
124 push %r8
125 push %r9
126 push %r10
127 push %r11
128 call xen_force_evtchn_callback
129 pop %r11
130 pop %r10
131 pop %r9
132 pop %r8
133 pop %rdi
134 pop %rsi
135 pop %rdx
136 pop %rcx
137 pop %rax
138#endif
139 ret
140
diff --git a/arch/x86/xen/xen-asm.h b/arch/x86/xen/xen-asm.h
new file mode 100644
index 000000000000..465276467a47
--- /dev/null
+++ b/arch/x86/xen/xen-asm.h
@@ -0,0 +1,12 @@
1#ifndef _XEN_XEN_ASM_H
2#define _XEN_XEN_ASM_H
3
4#include <linux/linkage.h>
5
6#define RELOC(x, v) .globl x##_reloc; x##_reloc=v
7#define ENDPATCH(x) .globl x##_end; x##_end=.
8
9/* Pseudo-flag used for virtual NMI, which we don't implement yet */
10#define XEN_EFLAGS_NMI 0x80000000
11
12#endif
diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
index 42786f59d9c0..082d173caaf3 100644
--- a/arch/x86/xen/xen-asm_32.S
+++ b/arch/x86/xen/xen-asm_32.S
@@ -11,101 +11,28 @@
11 generally too large to inline anyway. 11 generally too large to inline anyway.
12 */ 12 */
13 13
14#include <linux/linkage.h> 14//#include <asm/asm-offsets.h>
15
16#include <asm/asm-offsets.h>
17#include <asm/thread_info.h> 15#include <asm/thread_info.h>
18#include <asm/percpu.h>
19#include <asm/processor-flags.h> 16#include <asm/processor-flags.h>
20#include <asm/segment.h> 17#include <asm/segment.h>
21 18
22#include <xen/interface/xen.h> 19#include <xen/interface/xen.h>
23 20
24#define RELOC(x, v) .globl x##_reloc; x##_reloc=v 21#include "xen-asm.h"
25#define ENDPATCH(x) .globl x##_end; x##_end=.
26
27/* Pseudo-flag used for virtual NMI, which we don't implement yet */
28#define XEN_EFLAGS_NMI 0x80000000
29
30/*
31 Enable events. This clears the event mask and tests the pending
32 event status with one and operation. If there are pending
33 events, then enter the hypervisor to get them handled.
34 */
35ENTRY(xen_irq_enable_direct)
36 /* Unmask events */
37 movb $0, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_mask
38
39 /* Preempt here doesn't matter because that will deal with
40 any pending interrupts. The pending check may end up being
41 run on the wrong CPU, but that doesn't hurt. */
42
43 /* Test for pending */
44 testb $0xff, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_pending
45 jz 1f
46
472: call check_events
481:
49ENDPATCH(xen_irq_enable_direct)
50 ret
51 ENDPROC(xen_irq_enable_direct)
52 RELOC(xen_irq_enable_direct, 2b+1)
53
54 22
55/* 23/*
56 Disabling events is simply a matter of making the event mask 24 Force an event check by making a hypercall,
57 non-zero. 25 but preserve regs before making the call.
58 */
59ENTRY(xen_irq_disable_direct)
60 movb $1, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_mask
61ENDPATCH(xen_irq_disable_direct)
62 ret
63 ENDPROC(xen_irq_disable_direct)
64 RELOC(xen_irq_disable_direct, 0)
65
66/*
67 (xen_)save_fl is used to get the current interrupt enable status.
68 Callers expect the status to be in X86_EFLAGS_IF, and other bits
69 may be set in the return value. We take advantage of this by
70 making sure that X86_EFLAGS_IF has the right value (and other bits
71 in that byte are 0), but other bits in the return value are
72 undefined. We need to toggle the state of the bit, because
73 Xen and x86 use opposite senses (mask vs enable).
74 */
75ENTRY(xen_save_fl_direct)
76 testb $0xff, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_mask
77 setz %ah
78 addb %ah,%ah
79ENDPATCH(xen_save_fl_direct)
80 ret
81 ENDPROC(xen_save_fl_direct)
82 RELOC(xen_save_fl_direct, 0)
83
84
85/*
86 In principle the caller should be passing us a value return
87 from xen_save_fl_direct, but for robustness sake we test only
88 the X86_EFLAGS_IF flag rather than the whole byte. After
89 setting the interrupt mask state, it checks for unmasked
90 pending events and enters the hypervisor to get them delivered
91 if so.
92 */ 26 */
93ENTRY(xen_restore_fl_direct) 27check_events:
94 testb $X86_EFLAGS_IF>>8, %ah 28 push %eax
95 setz PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_mask 29 push %ecx
96 /* Preempt here doesn't matter because that will deal with 30 push %edx
97 any pending interrupts. The pending check may end up being 31 call xen_force_evtchn_callback
98 run on the wrong CPU, but that doesn't hurt. */ 32 pop %edx
99 33 pop %ecx
100 /* check for unmasked and pending */ 34 pop %eax
101 cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_pending
102 jz 1f
1032: call check_events
1041:
105ENDPATCH(xen_restore_fl_direct)
106 ret 35 ret
107 ENDPROC(xen_restore_fl_direct)
108 RELOC(xen_restore_fl_direct, 2b+1)
109 36
110/* 37/*
111 We can't use sysexit directly, because we're not running in ring0. 38 We can't use sysexit directly, because we're not running in ring0.
@@ -289,17 +216,3 @@ ENTRY(xen_iret_crit_fixup)
289 lea 4(%edi),%esp /* point esp to new frame */ 216 lea 4(%edi),%esp /* point esp to new frame */
2902: jmp xen_do_upcall 2172: jmp xen_do_upcall
291 218
292
293/*
294 Force an event check by making a hypercall,
295 but preserve regs before making the call.
296 */
297check_events:
298 push %eax
299 push %ecx
300 push %edx
301 call xen_force_evtchn_callback
302 pop %edx
303 pop %ecx
304 pop %eax
305 ret
diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S
index 05794c566e87..d205a283efe0 100644
--- a/arch/x86/xen/xen-asm_64.S
+++ b/arch/x86/xen/xen-asm_64.S
@@ -11,143 +11,14 @@
11 generally too large to inline anyway. 11 generally too large to inline anyway.
12 */ 12 */
13 13
14#include <linux/linkage.h>
15
16#include <asm/asm-offsets.h>
17#include <asm/processor-flags.h>
18#include <asm/errno.h> 14#include <asm/errno.h>
15#include <asm/percpu.h>
16#include <asm/processor-flags.h>
19#include <asm/segment.h> 17#include <asm/segment.h>
20 18
21#include <xen/interface/xen.h> 19#include <xen/interface/xen.h>
22 20
23#define RELOC(x, v) .globl x##_reloc; x##_reloc=v 21#include "xen-asm.h"
24#define ENDPATCH(x) .globl x##_end; x##_end=.
25
26/* Pseudo-flag used for virtual NMI, which we don't implement yet */
27#define XEN_EFLAGS_NMI 0x80000000
28
29#if 1
30/*
31 x86-64 does not yet support direct access to percpu variables
32 via a segment override, so we just need to make sure this code
33 never gets used
34 */
35#define BUG ud2a
36#define PER_CPU_VAR(var, off) 0xdeadbeef
37#endif
38
39/*
40 Enable events. This clears the event mask and tests the pending
41 event status with one and operation. If there are pending
42 events, then enter the hypervisor to get them handled.
43 */
44ENTRY(xen_irq_enable_direct)
45 BUG
46
47 /* Unmask events */
48 movb $0, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask)
49
50 /* Preempt here doesn't matter because that will deal with
51 any pending interrupts. The pending check may end up being
52 run on the wrong CPU, but that doesn't hurt. */
53
54 /* Test for pending */
55 testb $0xff, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_pending)
56 jz 1f
57
582: call check_events
591:
60ENDPATCH(xen_irq_enable_direct)
61 ret
62 ENDPROC(xen_irq_enable_direct)
63 RELOC(xen_irq_enable_direct, 2b+1)
64
65/*
66 Disabling events is simply a matter of making the event mask
67 non-zero.
68 */
69ENTRY(xen_irq_disable_direct)
70 BUG
71
72 movb $1, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask)
73ENDPATCH(xen_irq_disable_direct)
74 ret
75 ENDPROC(xen_irq_disable_direct)
76 RELOC(xen_irq_disable_direct, 0)
77
78/*
79 (xen_)save_fl is used to get the current interrupt enable status.
80 Callers expect the status to be in X86_EFLAGS_IF, and other bits
81 may be set in the return value. We take advantage of this by
82 making sure that X86_EFLAGS_IF has the right value (and other bits
83 in that byte are 0), but other bits in the return value are
84 undefined. We need to toggle the state of the bit, because
85 Xen and x86 use opposite senses (mask vs enable).
86 */
87ENTRY(xen_save_fl_direct)
88 BUG
89
90 testb $0xff, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask)
91 setz %ah
92 addb %ah,%ah
93ENDPATCH(xen_save_fl_direct)
94 ret
95 ENDPROC(xen_save_fl_direct)
96 RELOC(xen_save_fl_direct, 0)
97
98/*
99 In principle the caller should be passing us a value return
100 from xen_save_fl_direct, but for robustness sake we test only
101 the X86_EFLAGS_IF flag rather than the whole byte. After
102 setting the interrupt mask state, it checks for unmasked
103 pending events and enters the hypervisor to get them delivered
104 if so.
105 */
106ENTRY(xen_restore_fl_direct)
107 BUG
108
109 testb $X86_EFLAGS_IF>>8, %ah
110 setz PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask)
111 /* Preempt here doesn't matter because that will deal with
112 any pending interrupts. The pending check may end up being
113 run on the wrong CPU, but that doesn't hurt. */
114
115 /* check for unmasked and pending */
116 cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_pending)
117 jz 1f
1182: call check_events
1191:
120ENDPATCH(xen_restore_fl_direct)
121 ret
122 ENDPROC(xen_restore_fl_direct)
123 RELOC(xen_restore_fl_direct, 2b+1)
124
125
126/*
127 Force an event check by making a hypercall,
128 but preserve regs before making the call.
129 */
130check_events:
131 push %rax
132 push %rcx
133 push %rdx
134 push %rsi
135 push %rdi
136 push %r8
137 push %r9
138 push %r10
139 push %r11
140 call xen_force_evtchn_callback
141 pop %r11
142 pop %r10
143 pop %r9
144 pop %r8
145 pop %rdi
146 pop %rsi
147 pop %rdx
148 pop %rcx
149 pop %rax
150 ret
151 22
152ENTRY(xen_adjust_exception_frame) 23ENTRY(xen_adjust_exception_frame)
153 mov 8+0(%rsp),%rcx 24 mov 8+0(%rsp),%rcx
@@ -195,11 +66,11 @@ RELOC(xen_sysexit, 1b+1)
195ENTRY(xen_sysret64) 66ENTRY(xen_sysret64)
196 /* We're already on the usermode stack at this point, but still 67 /* We're already on the usermode stack at this point, but still
197 with the kernel gs, so we can easily switch back */ 68 with the kernel gs, so we can easily switch back */
198 movq %rsp, %gs:pda_oldrsp 69 movq %rsp, PER_CPU_VAR(old_rsp)
199 movq %gs:pda_kernelstack,%rsp 70 movq PER_CPU_VAR(kernel_stack),%rsp
200 71
201 pushq $__USER_DS 72 pushq $__USER_DS
202 pushq %gs:pda_oldrsp 73 pushq PER_CPU_VAR(old_rsp)
203 pushq %r11 74 pushq %r11
204 pushq $__USER_CS 75 pushq $__USER_CS
205 pushq %rcx 76 pushq %rcx
@@ -212,11 +83,11 @@ RELOC(xen_sysret64, 1b+1)
212ENTRY(xen_sysret32) 83ENTRY(xen_sysret32)
213 /* We're already on the usermode stack at this point, but still 84 /* We're already on the usermode stack at this point, but still
214 with the kernel gs, so we can easily switch back */ 85 with the kernel gs, so we can easily switch back */
215 movq %rsp, %gs:pda_oldrsp 86 movq %rsp, PER_CPU_VAR(old_rsp)
216 movq %gs:pda_kernelstack, %rsp 87 movq PER_CPU_VAR(kernel_stack), %rsp
217 88
218 pushq $__USER32_DS 89 pushq $__USER32_DS
219 pushq %gs:pda_oldrsp 90 pushq PER_CPU_VAR(old_rsp)
220 pushq %r11 91 pushq %r11
221 pushq $__USER32_CS 92 pushq $__USER32_CS
222 pushq %rcx 93 pushq %rcx
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index c1f8faf0a2c5..2f5ef2632ea2 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -10,9 +10,12 @@
10extern const char xen_hypervisor_callback[]; 10extern const char xen_hypervisor_callback[];
11extern const char xen_failsafe_callback[]; 11extern const char xen_failsafe_callback[];
12 12
13extern void *xen_initial_gdt;
14
13struct trap_info; 15struct trap_info;
14void xen_copy_trap_info(struct trap_info *traps); 16void xen_copy_trap_info(struct trap_info *traps);
15 17
18DECLARE_PER_CPU(struct vcpu_info, xen_vcpu_info);
16DECLARE_PER_CPU(unsigned long, xen_cr3); 19DECLARE_PER_CPU(unsigned long, xen_cr3);
17DECLARE_PER_CPU(unsigned long, xen_current_cr3); 20DECLARE_PER_CPU(unsigned long, xen_current_cr3);
18 21
@@ -22,6 +25,13 @@ extern struct shared_info *HYPERVISOR_shared_info;
22 25
23void xen_setup_mfn_list_list(void); 26void xen_setup_mfn_list_list(void);
24void xen_setup_shared_info(void); 27void xen_setup_shared_info(void);
28void xen_setup_machphys_mapping(void);
29pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn);
30void xen_ident_map_ISA(void);
31void xen_reserve_top(void);
32
33void xen_leave_lazy(void);
34void xen_post_allocator_init(void);
25 35
26char * __init xen_memory_setup(void); 36char * __init xen_memory_setup(void);
27void __init xen_arch_setup(void); 37void __init xen_arch_setup(void);